mirror of
https://github.com/ARM-software/devlib.git
synced 2025-09-22 20:01:53 +01:00
Compare commits
349 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
e6c52c49ff | ||
|
6825130e48 | ||
|
80c0e37d11 | ||
|
f523afda95 | ||
|
b64ec714a0 | ||
|
6249c06b44 | ||
|
3af3463c3c | ||
|
7065847f77 | ||
|
79783fa09a | ||
|
796536d67d | ||
|
b9374d530e | ||
|
34e51e7230 | ||
|
fa595e1a3d | ||
|
78938cf243 | ||
|
4941a7183a | ||
|
3ab9d23a4a | ||
|
5cf18a7b3c | ||
|
5bfeae08f4 | ||
|
a87a1df0fb | ||
|
3bf763688e | ||
|
a5cced85ce | ||
|
9f55ae7603 | ||
|
e7bafd6e5b | ||
|
ca84124fae | ||
|
1f41853341 | ||
|
82a2f7d8b6 | ||
|
2a633b783a | ||
|
b6d1863e77 | ||
|
bbc891341c | ||
|
d14df074ee | ||
|
81f9ee2c50 | ||
|
09d0a0f500 | ||
|
fe2fe3ae04 | ||
|
4859e818fb | ||
|
5d342044a2 | ||
|
d953377ff3 | ||
|
4f2d9fa66d | ||
|
4e44863777 | ||
|
6cabad14d0 | ||
|
31f7c1e8f9 | ||
|
3bc98f855b | ||
|
d2b80ccaf9 | ||
|
552040f390 | ||
|
0d259be01b | ||
|
792101819a | ||
|
3b8317d42e | ||
|
e3da419e5b | ||
|
e251b158b2 | ||
|
c0a5765da5 | ||
|
b32f15bbdb | ||
|
5116d46141 | ||
|
beb3b011bd | ||
|
bf4e242129 | ||
|
b1538fd184 | ||
|
5b37dfc50b | ||
|
a948982700 | ||
|
d300b9e57f | ||
|
81db8200e2 | ||
|
9e9af8c6de | ||
|
5473031ab7 | ||
|
a82db5ed37 | ||
|
1381944e5b | ||
|
822c50273f | ||
|
8f3200679c | ||
|
2cfb076e4c | ||
|
98bc0a31e1 | ||
|
345a9ed199 | ||
|
1fc9f6cc94 | ||
|
4194b1dd5e | ||
|
ef2d1a6fa4 | ||
|
33397649b6 | ||
|
ebf1c1a2e1 | ||
|
1d1ba7811d | ||
|
dc7faf46e4 | ||
|
0498017bf0 | ||
|
b2950686a7 | ||
|
f2b5f85dab | ||
|
c0f26e536a | ||
|
1a02f77fdd | ||
|
117686996b | ||
|
8695344969 | ||
|
f23fbd22b6 | ||
|
24e6de67ae | ||
|
07bbf902ba | ||
|
590069f01f | ||
|
bef1ec3afc | ||
|
0c72763d2a | ||
|
2129d85422 | ||
|
80bddf38a2 | ||
|
00f3f5f690 | ||
|
bc9478c324 | ||
|
9a2c413372 | ||
|
3cb2793e51 | ||
|
1ad2e895b3 | ||
|
3d5a164338 | ||
|
af8c47151e | ||
|
20d1eabaf0 | ||
|
45ee68fdd4 | ||
|
b52462440c | ||
|
bae741dc81 | ||
|
b717deb8e4 | ||
|
ccde9de257 | ||
|
c25852b210 | ||
|
f7b7aaf527 | ||
|
569e4bd057 | ||
|
07cad78046 | ||
|
21cb10f550 | ||
|
d2aea077b4 | ||
|
d464053546 | ||
|
cfb28c47c0 | ||
|
b941c6c5a6 | ||
|
ea9f9c878b | ||
|
4f10387688 | ||
|
a4f9231707 | ||
|
3c85738f0d | ||
|
45881b9f0d | ||
|
a8ff622f33 | ||
|
fcd2439b50 | ||
|
3709e06b5c | ||
|
7c8573a416 | ||
|
6f1ffee2b7 | ||
|
7ade1b8bcc | ||
|
3c28c280de | ||
|
b9d50ec164 | ||
|
7780cfdd5c | ||
|
7c79a040b7 | ||
|
779b0cbc77 | ||
|
b6cab6467d | ||
|
ec0a5884c0 | ||
|
7f5e0f5b4d | ||
|
7e682ed97d | ||
|
62e24c5764 | ||
|
eb6fa93845 | ||
|
9d5d70564f | ||
|
922686a348 | ||
|
98e2e51d09 | ||
|
92e16ee873 | ||
|
72ded188fa | ||
|
dcab0b3718 | ||
|
37a6b4f96d | ||
|
1ddbb75e74 | ||
|
696dec9b91 | ||
|
17374cf2b4 | ||
|
9661c6bff3 | ||
|
0aeb5bc409 | ||
|
a5640502ac | ||
|
6fe78b4d47 | ||
|
5bda1c0eee | ||
|
0465a75c56 | ||
|
795c0f233f | ||
|
5ff278b133 | ||
|
b72fb470e7 | ||
|
a4fd57f023 | ||
|
cf8ebf6668 | ||
|
15a77a841d | ||
|
9bf9f2dd1b | ||
|
19887de71e | ||
|
baa7ad1650 | ||
|
75621022be | ||
|
01dd80df34 | ||
|
eb0661a6b4 | ||
|
f303d1326b | ||
|
abd88548d2 | ||
|
2a934288eb | ||
|
2bf4d8a433 | ||
|
cf26dee308 | ||
|
e7bd2a5b22 | ||
|
72be3d01f8 | ||
|
745dc9499a | ||
|
6c9f80ff76 | ||
|
182f4e7b3f | ||
|
4df2b9a4c4 | ||
|
aa64951398 | ||
|
0fa91d6c4c | ||
|
0e6280ae31 | ||
|
2650a534f3 | ||
|
c212ef2146 | ||
|
5b5da7c392 | ||
|
3801fe1d67 | ||
|
43673e3fc5 | ||
|
bbe3bb6adb | ||
|
656da00d2a | ||
|
6b0b12d833 | ||
|
56cdc2e6c3 | ||
|
def235064b | ||
|
4d1299d678 | ||
|
d4f3316120 | ||
|
76ef9e0364 | ||
|
249b8336b5 | ||
|
c5d06ee3d6 | ||
|
207291e940 | ||
|
6b72b50c40 | ||
|
c73266c3a9 | ||
|
0d6c6883dd | ||
|
bb1552151a | ||
|
5e69f06d77 | ||
|
9e6cfde832 | ||
|
4fe0b2cb64 | ||
|
b9654c694c | ||
|
ed135febde | ||
|
5d4315c5d2 | ||
|
9982f810e1 | ||
|
5601fdb108 | ||
|
4e36bad2ab | ||
|
72e4443b7d | ||
|
9ddf763650 | ||
|
18830b74da | ||
|
66de30799b | ||
|
156915f26f | ||
|
74edfcbe43 | ||
|
aa62a52ee3 | ||
|
9c86174ff5 | ||
|
ea19235aed | ||
|
e1fb6cf911 | ||
|
d9d187471f | ||
|
c944d34593 | ||
|
964fde2fef | ||
|
988de69b61 | ||
|
ded30eef00 | ||
|
71bd8b10ed | ||
|
986261bc7e | ||
|
dc5f4c6b49 | ||
|
88f8c9e9ac | ||
|
0c434e8a1b | ||
|
5848369846 | ||
|
002ade33a8 | ||
|
2e8d42db79 | ||
|
6b414cc291 | ||
|
0d798f1c4f | ||
|
1325e59b1a | ||
|
f141899dae | ||
|
984556bc8e | ||
|
03a469fc38 | ||
|
2d86474682 | ||
|
ada318f27b | ||
|
b8f7b24790 | ||
|
a9b9938b0f | ||
|
f619f1dd07 | ||
|
ad350c9267 | ||
|
8343794d34 | ||
|
f2bc5dbc14 | ||
|
6f42f67e95 | ||
|
ae7f01fd19 | ||
|
b5f36610ad | ||
|
4c8f2430e2 | ||
|
a8b6e56874 | ||
|
c92756d65a | ||
|
8512f116fc | ||
|
be8b87d559 | ||
|
d76c2d63fe | ||
|
8bfa050226 | ||
|
8871fe3c25 | ||
|
aa50b2d42d | ||
|
ebcb1664e7 | ||
|
0ff8628c9c | ||
|
c0d8a98d90 | ||
|
441eea9897 | ||
|
b0db2067a2 | ||
|
1417e81605 | ||
|
2e81a72b39 | ||
|
22f2c8b663 | ||
|
c2db6c17ab | ||
|
e01a76ef1b | ||
|
9fcca25031 | ||
|
a6b9542f0f | ||
|
413e83f5d6 | ||
|
ac19873423 | ||
|
17d4b22b9f | ||
|
f65130b7c7 | ||
|
5b51c2644e | ||
|
a752f55956 | ||
|
781f9b068d | ||
|
7e79eeb9cb | ||
|
911a9f2ef4 | ||
|
cc0679e40f | ||
|
5dea9f8bcf | ||
|
a9ee41855d | ||
|
c13e3c260b | ||
|
aabb74c8cb | ||
|
a4c22cef71 | ||
|
3da7fbc9dd | ||
|
f2a87ce61c | ||
|
2b6cb264cf | ||
|
7e0e6e8706 | ||
|
4fabcae0b4 | ||
|
3c4a282c29 | ||
|
683da92067 | ||
|
1569be9ba7 | ||
|
f1b7fd184a | ||
|
22a5945460 | ||
|
fbf0875357 | ||
|
b7ab340d33 | ||
|
beb824256d | ||
|
efbf630422 | ||
|
389ec76c1e | ||
|
1f50b0ffc2 | ||
|
ed7f0e56a2 | ||
|
d376bc10ee | ||
|
60c2e7721e | ||
|
5e13a045a3 | ||
|
c4c76ebcf8 | ||
|
bdaea26f6f | ||
|
a3c04fc140 | ||
|
94c1339efd | ||
|
85e0fb08fe | ||
|
74444210e7 | ||
|
da3afeba2e | ||
|
4a4739cefb | ||
|
01c39cfe4c | ||
|
b9b38a20f6 | ||
|
809d987f84 | ||
|
bf1310c278 | ||
|
78de479a43 | ||
|
75332cf14a | ||
|
6089eaf40a | ||
|
fa41bb01d2 | ||
|
8654a6dc2b | ||
|
150fe2b32b | ||
|
f2a88fd1dc | ||
|
b7a04c9ebc | ||
|
5d97c3186b | ||
|
d86d67f49c | ||
|
996ee82f09 | ||
|
61208ce2e0 | ||
|
8cd1470bb8 | ||
|
66be73be3e | ||
|
63d2fb53fc | ||
|
30dc161f12 | ||
|
d6df5c81fd | ||
|
b0463e58d8 | ||
|
512c5f3737 | ||
|
cc0582ef59 | ||
|
ec717e3399 | ||
|
511d478164 | ||
|
d6d322c8ac | ||
|
ae99db3e24 | ||
|
241c7e01bd | ||
|
68b418dac2 | ||
|
df61b2a269 | ||
|
e8a03e00f3 | ||
|
4b5f65699f | ||
|
454b94501c | ||
|
5cb551b315 | ||
|
3b0df282a9 | ||
|
27fc75f74c | ||
|
473f37f1bc | ||
|
ae8db119a9 | ||
|
472c5a3294 | ||
|
8ac89fe9ed |
10
README.rst
10
README.rst
@@ -14,6 +14,16 @@ Installation
|
||||
sudo -H pip install devlib
|
||||
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
``devlib`` should install all dependencies automatically, however if you run
|
||||
into issues please ensure you are using that latest version of pip.
|
||||
|
||||
On some systems there may additional steps required to install the dependency
|
||||
``paramiko`` please consult the `module documentation <http://www.paramiko.org/installing.html>`_
|
||||
for more information.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
|
@@ -15,7 +15,7 @@
|
||||
|
||||
from devlib.target import Target, LinuxTarget, AndroidTarget, LocalLinuxTarget, ChromeOsTarget
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.exception import DevlibError, TargetError, HostError, TargetNotRespondingError
|
||||
from devlib.exception import DevlibError, DevlibTransientError, DevlibStableError, TargetError, TargetTransientError, TargetStableError, TargetNotRespondingError, HostError
|
||||
|
||||
from devlib.module import Module, HardRestModule, BootModule, FlashModule
|
||||
from devlib.module import get_module, register_module
|
||||
@@ -34,25 +34,35 @@ from devlib.instrument.hwmon import HwmonInstrument
|
||||
from devlib.instrument.monsoon import MonsoonInstrument
|
||||
from devlib.instrument.netstats import NetstatsInstrument
|
||||
from devlib.instrument.gem5power import Gem5PowerInstrument
|
||||
from devlib.instrument.baylibre_acme import (
|
||||
BaylibreAcmeNetworkInstrument,
|
||||
BaylibreAcmeXMLInstrument,
|
||||
BaylibreAcmeLocalInstrument,
|
||||
BaylibreAcmeInstrument,
|
||||
)
|
||||
|
||||
from devlib.derived import DerivedMeasurements, DerivedMetric
|
||||
from devlib.derived.energy import DerivedEnergyMeasurements
|
||||
from devlib.derived.fps import DerivedGfxInfoStats, DerivedSurfaceFlingerStats
|
||||
|
||||
from devlib.trace.ftrace import FtraceCollector
|
||||
from devlib.trace.serial_trace import SerialTraceCollector
|
||||
from devlib.collector.ftrace import FtraceCollector
|
||||
from devlib.collector.perf import PerfCollector
|
||||
from devlib.collector.serial_trace import SerialTraceCollector
|
||||
from devlib.collector.dmesg import DmesgCollector
|
||||
from devlib.collector.logcat import LogcatCollector
|
||||
|
||||
from devlib.host import LocalConnection
|
||||
from devlib.utils.android import AdbConnection
|
||||
from devlib.utils.ssh import SshConnection, TelnetConnection, Gem5Connection
|
||||
|
||||
from devlib.utils.version import get_commit as __get_commit
|
||||
from devlib.utils.version import (get_devlib_version as __get_devlib_version,
|
||||
get_commit as __get_commit)
|
||||
|
||||
|
||||
__version__ = '1.0.0'
|
||||
__version__ = __get_devlib_version()
|
||||
|
||||
__commit = __get_commit()
|
||||
if __commit:
|
||||
__full_version__ = '{}-{}'.format(__version__, __commit)
|
||||
__full_version__ = '{}+{}'.format(__version__, __commit)
|
||||
else:
|
||||
__full_version__ = __version__
|
||||
|
BIN
devlib/bin/arm/simpleperf
Executable file
BIN
devlib/bin/arm/simpleperf
Executable file
Binary file not shown.
Binary file not shown.
BIN
devlib/bin/arm64/get_clock_boottime
Executable file
BIN
devlib/bin/arm64/get_clock_boottime
Executable file
Binary file not shown.
BIN
devlib/bin/arm64/perf
Normal file
BIN
devlib/bin/arm64/perf
Normal file
Binary file not shown.
BIN
devlib/bin/arm64/simpleperf
Executable file
BIN
devlib/bin/arm64/simpleperf
Executable file
Binary file not shown.
Binary file not shown.
BIN
devlib/bin/armeabi/get_clock_boottime
Executable file
BIN
devlib/bin/armeabi/get_clock_boottime
Executable file
Binary file not shown.
BIN
devlib/bin/armeabi/perf
Normal file
BIN
devlib/bin/armeabi/perf
Normal file
Binary file not shown.
BIN
devlib/bin/ppc64le/busybox
Normal file
BIN
devlib/bin/ppc64le/busybox
Normal file
Binary file not shown.
BIN
devlib/bin/ppc64le/trace-cmd
Executable file
BIN
devlib/bin/ppc64le/trace-cmd
Executable file
Binary file not shown.
@@ -214,7 +214,7 @@ cgroups_freezer_set_state() {
|
||||
|
||||
# Set the state of the freezer
|
||||
echo $STATE > $SYSFS_ENTRY
|
||||
|
||||
|
||||
# And check it applied cleanly
|
||||
for i in `seq 1 10`; do
|
||||
[ $($CAT $SYSFS_ENTRY) = $STATE ] && exit 0
|
||||
@@ -238,6 +238,19 @@ hotplug_online_all() {
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
################################################################################
|
||||
# Scheduler
|
||||
################################################################################
|
||||
|
||||
sched_get_kernel_attributes() {
|
||||
MATCH=${1:-'.*'}
|
||||
[ -d /proc/sys/kernel/ ] || exit 1
|
||||
$GREP '' /proc/sys/kernel/sched_* | \
|
||||
$SED -e 's|/proc/sys/kernel/sched_||' | \
|
||||
$GREP -e "$MATCH"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Misc
|
||||
################################################################################
|
||||
@@ -264,6 +277,48 @@ read_tree_values() {
|
||||
fi
|
||||
}
|
||||
|
||||
read_tree_tgz_b64() {
|
||||
BASEPATH=$1
|
||||
MAXDEPTH=$2
|
||||
TMPBASE=$3
|
||||
|
||||
if [ ! -e $BASEPATH ]; then
|
||||
echo "ERROR: $BASEPATH does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd $TMPBASE
|
||||
TMP_FOLDER=$($BUSYBOX realpath $($BUSYBOX mktemp -d XXXXXX))
|
||||
|
||||
# 'tar' doesn't work as expected on debugfs, so copy the tree first to
|
||||
# workaround the issue
|
||||
cd $BASEPATH
|
||||
for CUR_FILE in $($BUSYBOX find . -follow -type f -maxdepth $MAXDEPTH); do
|
||||
$BUSYBOX cp --parents $CUR_FILE $TMP_FOLDER/ 2> /dev/null
|
||||
done
|
||||
|
||||
cd $TMP_FOLDER
|
||||
$BUSYBOX tar cz * 2>/dev/null | $BUSYBOX base64
|
||||
|
||||
# Clean-up the tmp folder since we won't need it any more
|
||||
cd $TMPBASE
|
||||
rm -rf $TMP_FOLDER
|
||||
}
|
||||
|
||||
get_linux_system_id() {
|
||||
kernel=$($BUSYBOX uname -r)
|
||||
hardware=$($BUSYBOX ip a | $BUSYBOX grep 'link/ether' | $BUSYBOX sed 's/://g' | $BUSYBOX awk '{print $2}' | $BUSYBOX tr -d '\n')
|
||||
filesystem=$(ls /dev/disk/by-uuid | $BUSYBOX tr '\n' '-' | $BUSYBOX sed 's/-$//')
|
||||
echo "$hardware/$kernel/$filesystem"
|
||||
}
|
||||
|
||||
get_android_system_id() {
|
||||
kernel=$($BUSYBOX uname -r)
|
||||
hardware=$($BUSYBOX ip a | $BUSYBOX grep 'link/ether' | $BUSYBOX sed 's/://g' | $BUSYBOX awk '{print $2}' | $BUSYBOX tr -d '\n')
|
||||
filesystem=$(content query --uri content://settings/secure --projection value --where "name='android_id'" | $BUSYBOX cut -f2 -d=)
|
||||
echo "$hardware/$kernel/$filesystem"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Main Function Dispatcher
|
||||
################################################################################
|
||||
@@ -323,6 +378,18 @@ hotplug_online_all)
|
||||
read_tree_values)
|
||||
read_tree_values $*
|
||||
;;
|
||||
read_tree_tgz_b64)
|
||||
read_tree_tgz_b64 $*
|
||||
;;
|
||||
get_linux_system_id)
|
||||
get_linux_system_id $*
|
||||
;;
|
||||
get_android_system_id)
|
||||
get_android_system_id $*
|
||||
;;
|
||||
sched_get_kernel_attributes)
|
||||
sched_get_kernel_attributes $*
|
||||
;;
|
||||
*)
|
||||
echo "Command [$CMD] not supported"
|
||||
exit -1
|
||||
|
BIN
devlib/bin/x86/busybox
Executable file
BIN
devlib/bin/x86/busybox
Executable file
Binary file not shown.
BIN
devlib/bin/x86/simpleperf
Executable file
BIN
devlib/bin/x86/simpleperf
Executable file
Binary file not shown.
Binary file not shown.
BIN
devlib/bin/x86_64/simpleperf
Executable file
BIN
devlib/bin/x86_64/simpleperf
Executable file
Binary file not shown.
75
devlib/collector/__init__.py
Normal file
75
devlib/collector/__init__.py
Normal file
@@ -0,0 +1,75 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
|
||||
from devlib.utils.types import caseless_string
|
||||
|
||||
class CollectorBase(object):
|
||||
|
||||
def __init__(self, target):
|
||||
self.target = target
|
||||
self.logger = logging.getLogger(self.__class__.__name__)
|
||||
self.output_path = None
|
||||
|
||||
def reset(self):
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
return CollectorOutput()
|
||||
|
||||
def __enter__(self):
|
||||
self.reset()
|
||||
self.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.stop()
|
||||
|
||||
class CollectorOutputEntry(object):
|
||||
|
||||
path_kinds = ['file', 'directory']
|
||||
|
||||
def __init__(self, path, path_kind):
|
||||
self.path = path
|
||||
|
||||
path_kind = caseless_string(path_kind)
|
||||
if path_kind not in self.path_kinds:
|
||||
msg = '{} is not a valid path_kind [{}]'
|
||||
raise ValueError(msg.format(path_kind, ' '.join(self.path_kinds)))
|
||||
self.path_kind = path_kind
|
||||
|
||||
def __str__(self):
|
||||
return self.path
|
||||
|
||||
def __repr__(self):
|
||||
return '<{} ({})>'.format(self.path, self.path_kind)
|
||||
|
||||
def __fspath__(self):
|
||||
"""Allow using with os.path operations"""
|
||||
return self.path
|
||||
|
||||
|
||||
class CollectorOutput(list):
|
||||
pass
|
216
devlib/collector/dmesg.py
Normal file
216
devlib/collector/dmesg.py
Normal file
@@ -0,0 +1,216 @@
|
||||
# Copyright 2019 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from __future__ import division
|
||||
import re
|
||||
from itertools import takewhile
|
||||
from datetime import timedelta
|
||||
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
from devlib.target import KernelConfigTristate
|
||||
from devlib.exception import TargetStableError
|
||||
|
||||
|
||||
class KernelLogEntry(object):
|
||||
"""
|
||||
Entry of the kernel ring buffer.
|
||||
|
||||
:param facility: facility the entry comes from
|
||||
:type facility: str
|
||||
|
||||
:param level: log level
|
||||
:type level: str
|
||||
|
||||
:param timestamp: Timestamp of the entry
|
||||
:type timestamp: datetime.timedelta
|
||||
|
||||
:param msg: Content of the entry
|
||||
:type msg: str
|
||||
"""
|
||||
|
||||
_TIMESTAMP_MSG_REGEX = re.compile(r'\[(.*?)\] (.*)')
|
||||
_RAW_LEVEL_REGEX = re.compile(r'<([0-9]+)>(.*)')
|
||||
_PRETTY_LEVEL_REGEX = re.compile(r'\s*([a-z]+)\s*:([a-z]+)\s*:\s*(.*)')
|
||||
|
||||
def __init__(self, facility, level, timestamp, msg):
|
||||
self.facility = facility
|
||||
self.level = level
|
||||
self.timestamp = timestamp
|
||||
self.msg = msg
|
||||
|
||||
@classmethod
|
||||
def from_str(cls, line):
|
||||
"""
|
||||
Parses a "dmesg --decode" output line, formatted as following:
|
||||
kern :err : [3618282.310743] nouveau 0000:01:00.0: systemd-logind[988]: nv50cal_space: -16
|
||||
|
||||
Or the more basic output given by "dmesg -r":
|
||||
<3>[3618282.310743] nouveau 0000:01:00.0: systemd-logind[988]: nv50cal_space: -16
|
||||
|
||||
"""
|
||||
|
||||
def parse_raw_level(line):
|
||||
match = cls._RAW_LEVEL_REGEX.match(line)
|
||||
if not match:
|
||||
raise ValueError('dmesg entry format not recognized: {}'.format(line))
|
||||
level, remainder = match.groups()
|
||||
levels = DmesgCollector.LOG_LEVELS
|
||||
# BusyBox dmesg can output numbers that need to wrap around
|
||||
level = levels[int(level) % len(levels)]
|
||||
return level, remainder
|
||||
|
||||
def parse_pretty_level(line):
|
||||
match = cls._PRETTY_LEVEL_REGEX.match(line)
|
||||
facility, level, remainder = match.groups()
|
||||
return facility, level, remainder
|
||||
|
||||
def parse_timestamp_msg(line):
|
||||
match = cls._TIMESTAMP_MSG_REGEX.match(line)
|
||||
timestamp, msg = match.groups()
|
||||
timestamp = timedelta(seconds=float(timestamp.strip()))
|
||||
return timestamp, msg
|
||||
|
||||
line = line.strip()
|
||||
|
||||
# If we can parse the raw prio directly, that is a basic line
|
||||
try:
|
||||
level, remainder = parse_raw_level(line)
|
||||
facility = None
|
||||
except ValueError:
|
||||
facility, level, remainder = parse_pretty_level(line)
|
||||
|
||||
timestamp, msg = parse_timestamp_msg(remainder)
|
||||
|
||||
return cls(
|
||||
facility=facility,
|
||||
level=level,
|
||||
timestamp=timestamp,
|
||||
msg=msg.strip(),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_dmesg_output(cls, dmesg_out):
|
||||
"""
|
||||
Return a generator of :class:`KernelLogEntry` for each line of the
|
||||
output of dmesg command.
|
||||
|
||||
.. note:: The same restrictions on the dmesg output format as for
|
||||
:meth:`from_str` apply.
|
||||
"""
|
||||
for line in dmesg_out.splitlines():
|
||||
if line.strip():
|
||||
yield cls.from_str(line)
|
||||
|
||||
def __str__(self):
|
||||
facility = self.facility + ': ' if self.facility else ''
|
||||
return '{facility}{level}: [{timestamp}] {msg}'.format(
|
||||
facility=facility,
|
||||
level=self.level,
|
||||
timestamp=self.timestamp.total_seconds(),
|
||||
msg=self.msg,
|
||||
)
|
||||
|
||||
|
||||
class DmesgCollector(CollectorBase):
|
||||
"""
|
||||
Dmesg output collector.
|
||||
|
||||
:param level: Minimum log level to enable. All levels that are more
|
||||
critical will be collected as well.
|
||||
:type level: str
|
||||
|
||||
:param facility: Facility to record, see dmesg --help for the list.
|
||||
:type level: str
|
||||
|
||||
.. warning:: If BusyBox dmesg is used, facility and level will be ignored,
|
||||
and the parsed entries will also lack that information.
|
||||
"""
|
||||
|
||||
# taken from "dmesg --help"
|
||||
# This list needs to be ordered by priority
|
||||
LOG_LEVELS = [
|
||||
"emerg", # system is unusable
|
||||
"alert", # action must be taken immediately
|
||||
"crit", # critical conditions
|
||||
"err", # error conditions
|
||||
"warn", # warning conditions
|
||||
"notice", # normal but significant condition
|
||||
"info", # informational
|
||||
"debug", # debug-level messages
|
||||
]
|
||||
|
||||
def __init__(self, target, level=LOG_LEVELS[-1], facility='kern'):
|
||||
super(DmesgCollector, self).__init__(target)
|
||||
|
||||
if not target.is_rooted:
|
||||
raise TargetStableError('Cannot collect dmesg on non-rooted target')
|
||||
|
||||
self.output_path = None
|
||||
|
||||
if level not in self.LOG_LEVELS:
|
||||
raise ValueError('level needs to be one of: {}'.format(
|
||||
', '.join(self.LOG_LEVELS)
|
||||
))
|
||||
self.level = level
|
||||
|
||||
# Check if dmesg is the BusyBox one, or the one from util-linux in a
|
||||
# recent version.
|
||||
# Note: BusyBox dmesg does not support -h, but will still print the
|
||||
# help with an exit code of 1
|
||||
self.basic_dmesg = '--force-prefix' not in \
|
||||
self.target.execute('dmesg -h', check_exit_code=False)
|
||||
self.facility = facility
|
||||
self.needs_root = bool(target.config.typed_config.get(
|
||||
'CONFIG_SECURITY_DMESG_RESTRICT', KernelConfigTristate.NO))
|
||||
self.reset()
|
||||
|
||||
@property
|
||||
def entries(self):
|
||||
return KernelLogEntry.from_dmesg_output(self.dmesg_out)
|
||||
|
||||
def reset(self):
|
||||
self.dmesg_out = None
|
||||
|
||||
def start(self):
|
||||
self.reset()
|
||||
# Empty the dmesg ring buffer. This requires root in all cases
|
||||
self.target.execute('dmesg -c', as_root=True)
|
||||
|
||||
def stop(self):
|
||||
levels_list = list(takewhile(
|
||||
lambda level: level != self.level,
|
||||
self.LOG_LEVELS
|
||||
))
|
||||
levels_list.append(self.level)
|
||||
if self.basic_dmesg:
|
||||
cmd = 'dmesg -r'
|
||||
else:
|
||||
cmd = 'dmesg --facility={facility} --force-prefix --decode --level={levels}'.format(
|
||||
levels=','.join(levels_list),
|
||||
facility=self.facility,
|
||||
)
|
||||
|
||||
self.dmesg_out = self.target.execute(cmd, as_root=self.needs_root)
|
||||
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
with open(self.output_path, 'wt') as f:
|
||||
f.write(self.dmesg_out + '\n')
|
||||
return CollectorOutput([CollectorOutputEntry(self.output_path, 'file')])
|
@@ -20,11 +20,14 @@ import time
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import contextlib
|
||||
from pipes import quote
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.utils.misc import check_output, which
|
||||
from devlib.exception import TargetStableError, HostError
|
||||
from devlib.utils.misc import check_output, which, memoized
|
||||
|
||||
|
||||
TRACE_MARKER_START = 'TRACE_MARKER_START'
|
||||
@@ -48,11 +51,14 @@ TIMEOUT = 180
|
||||
CPU_RE = re.compile(r' Function \(CPU([0-9]+)\)')
|
||||
STATS_RE = re.compile(r'([^ ]*) +([0-9]+) +([0-9.]+) us +([0-9.]+) us +([0-9.]+) us')
|
||||
|
||||
class FtraceCollector(TraceCollector):
|
||||
class FtraceCollector(CollectorBase):
|
||||
|
||||
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
|
||||
def __init__(self, target,
|
||||
events=None,
|
||||
functions=None,
|
||||
tracer=None,
|
||||
trace_children_functions=False,
|
||||
buffer_size=None,
|
||||
buffer_size_step=1000,
|
||||
tracing_path='/sys/kernel/debug/tracing',
|
||||
@@ -62,28 +68,37 @@ class FtraceCollector(TraceCollector):
|
||||
no_install=False,
|
||||
strict=False,
|
||||
report_on_target=False,
|
||||
trace_clock='local',
|
||||
saved_cmdlines_nr=4096,
|
||||
):
|
||||
super(FtraceCollector, self).__init__(target)
|
||||
self.events = events if events is not None else DEFAULT_EVENTS
|
||||
self.functions = functions
|
||||
self.tracer = tracer
|
||||
self.trace_children_functions = trace_children_functions
|
||||
self.buffer_size = buffer_size
|
||||
self.buffer_size_step = buffer_size_step
|
||||
self.tracing_path = tracing_path
|
||||
self.automark = automark
|
||||
self.autoreport = autoreport
|
||||
self.autoview = autoview
|
||||
self.strict = strict
|
||||
self.report_on_target = report_on_target
|
||||
self.target_output_file = target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE)
|
||||
text_file_name = target.path.splitext(OUTPUT_TRACE_FILE)[0] + '.txt'
|
||||
self.target_text_file = target.path.join(self.target.working_directory, text_file_name)
|
||||
self.output_path = None
|
||||
self.target_binary = None
|
||||
self.host_binary = None
|
||||
self.start_time = None
|
||||
self.stop_time = None
|
||||
self.event_string = None
|
||||
self.function_string = None
|
||||
self.trace_clock = trace_clock
|
||||
self.saved_cmdlines_nr = saved_cmdlines_nr
|
||||
self._reset_needed = True
|
||||
|
||||
# pylint: disable=bad-whitespace
|
||||
# Setup tracing paths
|
||||
self.available_events_file = self.target.path.join(self.tracing_path, 'available_events')
|
||||
self.available_functions_file = self.target.path.join(self.tracing_path, 'available_filter_functions')
|
||||
@@ -92,12 +107,15 @@ class FtraceCollector(TraceCollector):
|
||||
self.function_profile_file = self.target.path.join(self.tracing_path, 'function_profile_enabled')
|
||||
self.marker_file = self.target.path.join(self.tracing_path, 'trace_marker')
|
||||
self.ftrace_filter_file = self.target.path.join(self.tracing_path, 'set_ftrace_filter')
|
||||
self.trace_clock_file = self.target.path.join(self.tracing_path, 'trace_clock')
|
||||
self.save_cmdlines_size_file = self.target.path.join(self.tracing_path, 'saved_cmdlines_size')
|
||||
self.available_tracers_file = self.target.path.join(self.tracing_path, 'available_tracers')
|
||||
|
||||
self.host_binary = which('trace-cmd')
|
||||
self.kernelshark = which('kernelshark')
|
||||
|
||||
if not self.target.is_rooted:
|
||||
raise TargetError('trace-cmd instrument cannot be used on an unrooted device.')
|
||||
raise TargetStableError('trace-cmd instrument cannot be used on an unrooted device.')
|
||||
if self.autoreport and not self.report_on_target and self.host_binary is None:
|
||||
raise HostError('trace-cmd binary must be installed on the host if autoreport=True.')
|
||||
if self.autoview and self.kernelshark is None:
|
||||
@@ -107,69 +125,150 @@ class FtraceCollector(TraceCollector):
|
||||
self.target_binary = self.target.install(host_file)
|
||||
else:
|
||||
if not self.target.is_installed('trace-cmd'):
|
||||
raise TargetError('No trace-cmd found on device and no_install=True is specified.')
|
||||
raise TargetStableError('No trace-cmd found on device and no_install=True is specified.')
|
||||
self.target_binary = 'trace-cmd'
|
||||
|
||||
# Validate required events to be traced
|
||||
available_events = self.target.execute(
|
||||
'cat {}'.format(self.available_events_file),
|
||||
as_root=True).splitlines()
|
||||
selected_events = []
|
||||
for event in self.events:
|
||||
# Convert globs supported by FTrace into valid regexp globs
|
||||
_event = event
|
||||
if event[0] != '*':
|
||||
_event = '*' + event
|
||||
event_re = re.compile(_event.replace('*', '.*'))
|
||||
# Select events matching the required ones
|
||||
if len(list(filter(event_re.match, available_events))) == 0:
|
||||
message = 'Event [{}] not available for tracing'.format(event)
|
||||
if strict:
|
||||
raise TargetError(message)
|
||||
self.target.logger.warning(message)
|
||||
def event_to_regex(event):
|
||||
if not event.startswith('*'):
|
||||
event = '*' + event
|
||||
|
||||
return re.compile(event.replace('*', '.*'))
|
||||
|
||||
def event_is_in_list(event, events):
|
||||
return any(
|
||||
event_to_regex(event).match(_event)
|
||||
for _event in events
|
||||
)
|
||||
|
||||
unavailable_events = [
|
||||
event
|
||||
for event in self.events
|
||||
if not event_is_in_list(event, self.available_events)
|
||||
]
|
||||
if unavailable_events:
|
||||
message = 'Events not available for tracing: {}'.format(
|
||||
', '.join(unavailable_events)
|
||||
)
|
||||
if self.strict:
|
||||
raise TargetStableError(message)
|
||||
else:
|
||||
selected_events.append(event)
|
||||
# If function profiling is enabled we always need at least one event.
|
||||
# Thus, if not other events have been specified, try to add at least
|
||||
# a tracepoint which is always available and possibly triggered few
|
||||
# times.
|
||||
if self.functions and len(selected_events) == 0:
|
||||
selected_events = ['sched_wakeup_new']
|
||||
self.event_string = _build_trace_events(selected_events)
|
||||
self.target.logger.warning(message)
|
||||
|
||||
selected_events = sorted(set(self.events) - set(unavailable_events))
|
||||
|
||||
if self.tracer and self.tracer not in self.available_tracers:
|
||||
raise TargetStableError('Unsupported tracer "{}". Available tracers: {}'.format(
|
||||
self.tracer, ', '.join(self.available_tracers)))
|
||||
|
||||
# Check for function tracing support
|
||||
if self.functions:
|
||||
if not self.target.file_exists(self.function_profile_file):
|
||||
raise TargetError('Function profiling not supported. '\
|
||||
'A kernel build with CONFIG_FUNCTION_PROFILER enable is required')
|
||||
# Validate required functions to be traced
|
||||
available_functions = self.target.execute(
|
||||
'cat {}'.format(self.available_functions_file),
|
||||
as_root=True).splitlines()
|
||||
selected_functions = []
|
||||
for function in self.functions:
|
||||
if function not in available_functions:
|
||||
message = 'Function [{}] not available for profiling'.format(function)
|
||||
if strict:
|
||||
raise TargetError(message)
|
||||
if function not in self.available_functions:
|
||||
message = 'Function [{}] not available for tracing/profiling'.format(function)
|
||||
if self.strict:
|
||||
raise TargetStableError(message)
|
||||
self.target.logger.warning(message)
|
||||
else:
|
||||
selected_functions.append(function)
|
||||
self.function_string = _build_trace_functions(selected_functions)
|
||||
|
||||
# Function profiling
|
||||
if self.tracer is None:
|
||||
if not self.target.file_exists(self.function_profile_file):
|
||||
raise TargetStableError('Function profiling not supported. '\
|
||||
'A kernel build with CONFIG_FUNCTION_PROFILER enable is required')
|
||||
self.function_string = _build_trace_functions(selected_functions)
|
||||
# If function profiling is enabled we always need at least one event.
|
||||
# Thus, if not other events have been specified, try to add at least
|
||||
# a tracepoint which is always available and possibly triggered few
|
||||
# times.
|
||||
if not selected_events:
|
||||
selected_events = ['sched_wakeup_new']
|
||||
|
||||
# Function tracing
|
||||
elif self.tracer == 'function':
|
||||
self.function_string = _build_graph_functions(selected_functions, False)
|
||||
|
||||
# Function graphing
|
||||
elif self.tracer == 'function_graph':
|
||||
self.function_string = _build_graph_functions(selected_functions, trace_children_functions)
|
||||
|
||||
self.event_string = _build_trace_events(selected_events)
|
||||
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def available_tracers(self):
|
||||
"""
|
||||
List of ftrace tracers supported by the target's kernel.
|
||||
"""
|
||||
return self.target.read_value(self.available_tracers_file).split(' ')
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def available_events(self):
|
||||
"""
|
||||
List of ftrace events supported by the target's kernel.
|
||||
"""
|
||||
return self.target.read_value(self.available_events_file).splitlines()
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def available_functions(self):
|
||||
"""
|
||||
List of functions whose tracing/profiling is supported by the target's kernel.
|
||||
"""
|
||||
return self.target.read_value(self.available_functions_file).splitlines()
|
||||
|
||||
def reset(self):
|
||||
if self.buffer_size:
|
||||
self._set_buffer_size()
|
||||
self.target.execute('{} reset'.format(self.target_binary),
|
||||
as_root=True, timeout=TIMEOUT)
|
||||
if self.functions:
|
||||
self.target.write_value(self.function_profile_file, 0, verify=False)
|
||||
self._reset_needed = False
|
||||
|
||||
def start(self):
|
||||
self.start_time = time.time()
|
||||
if self._reset_needed:
|
||||
self.reset()
|
||||
self.target.execute('{} start {}'.format(self.target_binary, self.event_string),
|
||||
as_root=True)
|
||||
|
||||
if self.tracer is not None and 'function' in self.tracer:
|
||||
tracecmd_functions = self.function_string
|
||||
else:
|
||||
tracecmd_functions = ''
|
||||
|
||||
tracer_string = '-p {}'.format(self.tracer) if self.tracer else ''
|
||||
|
||||
# Ensure kallsyms contains addresses if possible, so that function the
|
||||
# collected trace contains enough data for pretty printing
|
||||
with contextlib.suppress(TargetStableError):
|
||||
self.target.write_value('/proc/sys/kernel/kptr_restrict', 0)
|
||||
|
||||
self.target.write_value(self.trace_clock_file, self.trace_clock, verify=False)
|
||||
try:
|
||||
self.target.write_value(self.save_cmdlines_size_file, self.saved_cmdlines_nr)
|
||||
except TargetStableError as e:
|
||||
message = 'Could not set "save_cmdlines_size"'
|
||||
if self.strict:
|
||||
self.logger.error(message)
|
||||
raise e
|
||||
else:
|
||||
self.logger.warning(message)
|
||||
self.logger.debug(e)
|
||||
|
||||
self.target.execute(
|
||||
'{} start {events} {tracer} {functions}'.format(
|
||||
self.target_binary,
|
||||
events=self.event_string,
|
||||
tracer=tracer_string,
|
||||
functions=tracecmd_functions,
|
||||
),
|
||||
as_root=True,
|
||||
)
|
||||
if self.automark:
|
||||
self.mark_start()
|
||||
if 'cpufreq' in self.target.modules:
|
||||
@@ -179,7 +278,7 @@ class FtraceCollector(TraceCollector):
|
||||
self.logger.debug('Trace CPUIdle states')
|
||||
self.target.cpuidle.perturb_cpus()
|
||||
# Enable kernel function profiling
|
||||
if self.functions:
|
||||
if self.functions and self.tracer is None:
|
||||
self.target.execute('echo nop > {}'.format(self.current_tracer_file),
|
||||
as_root=True)
|
||||
self.target.execute('echo 0 > {}'.format(self.function_profile_file),
|
||||
@@ -192,8 +291,8 @@ class FtraceCollector(TraceCollector):
|
||||
|
||||
def stop(self):
|
||||
# Disable kernel function profiling
|
||||
if self.functions:
|
||||
self.target.execute('echo 1 > {}'.format(self.function_profile_file),
|
||||
if self.functions and self.tracer is None:
|
||||
self.target.execute('echo 0 > {}'.format(self.function_profile_file),
|
||||
as_root=True)
|
||||
if 'cpufreq' in self.target.modules:
|
||||
self.logger.debug('Trace CPUFreq frequencies')
|
||||
@@ -205,9 +304,14 @@ class FtraceCollector(TraceCollector):
|
||||
timeout=TIMEOUT, as_root=True)
|
||||
self._reset_needed = True
|
||||
|
||||
def get_trace(self, outfile):
|
||||
if os.path.isdir(outfile):
|
||||
outfile = os.path.join(outfile, os.path.basename(self.target_output_file))
|
||||
def set_output(self, output_path):
|
||||
if os.path.isdir(output_path):
|
||||
output_path = os.path.join(output_path, os.path.basename(self.target_output_file))
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
self.target.execute('{0} extract -o {1}; chmod 666 {1}'.format(self.target_binary,
|
||||
self.target_output_file),
|
||||
timeout=TIMEOUT, as_root=True)
|
||||
@@ -216,27 +320,32 @@ class FtraceCollector(TraceCollector):
|
||||
# Therefore timout for the pull command must also be adjusted
|
||||
# accordingly.
|
||||
pull_timeout = 10 * (self.stop_time - self.start_time)
|
||||
self.target.pull(self.target_output_file, outfile, timeout=pull_timeout)
|
||||
if not os.path.isfile(outfile):
|
||||
self.target.pull(self.target_output_file, self.output_path, timeout=pull_timeout)
|
||||
output = CollectorOutput()
|
||||
if not os.path.isfile(self.output_path):
|
||||
self.logger.warning('Binary trace not pulled from device.')
|
||||
else:
|
||||
output.append(CollectorOutputEntry(self.output_path, 'file'))
|
||||
if self.autoreport:
|
||||
textfile = os.path.splitext(outfile)[0] + '.txt'
|
||||
textfile = os.path.splitext(self.output_path)[0] + '.txt'
|
||||
if self.report_on_target:
|
||||
self.generate_report_on_target()
|
||||
self.target.pull(self.target_text_file,
|
||||
textfile, timeout=pull_timeout)
|
||||
else:
|
||||
self.report(outfile, textfile)
|
||||
self.report(self.output_path, textfile)
|
||||
output.append(CollectorOutputEntry(textfile, 'file'))
|
||||
if self.autoview:
|
||||
self.view(outfile)
|
||||
self.view(self.output_path)
|
||||
return output
|
||||
|
||||
def get_stats(self, outfile):
|
||||
if not self.functions:
|
||||
if not (self.functions and self.tracer is None):
|
||||
return
|
||||
|
||||
if os.path.isdir(outfile):
|
||||
outfile = os.path.join(outfile, OUTPUT_PROFILE_FILE)
|
||||
# pylint: disable=protected-access
|
||||
output = self.target._execute_util('ftrace_get_function_stats',
|
||||
as_root=True)
|
||||
|
||||
@@ -264,7 +373,7 @@ class FtraceCollector(TraceCollector):
|
||||
|
||||
self.logger.debug("FTrace stats output [%s]...", outfile)
|
||||
with open(outfile, 'w') as fh:
|
||||
json.dump(function_stats, fh, indent=4)
|
||||
json.dump(function_stats, fh, indent=4)
|
||||
self.logger.debug("FTrace function stats save in [%s]", outfile)
|
||||
|
||||
return function_stats
|
||||
@@ -278,9 +387,9 @@ class FtraceCollector(TraceCollector):
|
||||
process = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True)
|
||||
_, error = process.communicate()
|
||||
if sys.version_info[0] == 3:
|
||||
error = error.decode(sys.stdout.encoding, 'replace')
|
||||
error = error.decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
if process.returncode:
|
||||
raise TargetError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
|
||||
raise TargetStableError('trace-cmd returned non-zero exit code {}'.format(process.returncode))
|
||||
if error:
|
||||
# logged at debug level, as trace-cmd always outputs some
|
||||
# errors that seem benign.
|
||||
@@ -348,3 +457,10 @@ def _build_trace_events(events):
|
||||
def _build_trace_functions(functions):
|
||||
function_string = " ".join(functions)
|
||||
return function_string
|
||||
|
||||
def _build_graph_functions(functions, trace_children_functions):
|
||||
opt = 'g' if trace_children_functions else 'l'
|
||||
return ' '.join(
|
||||
'-{} {}'.format(opt, quote(f))
|
||||
for f in functions
|
||||
)
|
@@ -14,19 +14,22 @@
|
||||
#
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
from devlib.utils.android import LogcatMonitor
|
||||
|
||||
class LogcatCollector(TraceCollector):
|
||||
class LogcatCollector(CollectorBase):
|
||||
|
||||
def __init__(self, target, regexps=None):
|
||||
def __init__(self, target, regexps=None, logcat_format=None):
|
||||
super(LogcatCollector, self).__init__(target)
|
||||
self.regexps = regexps
|
||||
self.logcat_format = logcat_format
|
||||
self.output_path = None
|
||||
self._collecting = False
|
||||
self._prev_log = None
|
||||
self._monitor = None
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
@@ -45,12 +48,14 @@ class LogcatCollector(TraceCollector):
|
||||
"""
|
||||
Start collecting logcat lines
|
||||
"""
|
||||
self._monitor = LogcatMonitor(self.target, self.regexps)
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
self._monitor = LogcatMonitor(self.target, self.regexps, logcat_format=self.logcat_format)
|
||||
if self._prev_log:
|
||||
# Append new data collection to previous collection
|
||||
self._monitor.start(self._prev_log)
|
||||
else:
|
||||
self._monitor.start()
|
||||
self._monitor.start(self.output_path)
|
||||
|
||||
self._collecting = True
|
||||
|
||||
@@ -65,9 +70,10 @@ class LogcatCollector(TraceCollector):
|
||||
self._collecting = False
|
||||
self._prev_log = self._monitor.logfile
|
||||
|
||||
def get_trace(self, outfile):
|
||||
"""
|
||||
Output collected logcat lines to designated file
|
||||
"""
|
||||
# copy self._monitor.logfile to outfile
|
||||
shutil.copy(self._monitor.logfile, outfile)
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("No data collected.")
|
||||
return CollectorOutput([CollectorOutputEntry(self.output_path, 'file')])
|
280
devlib/collector/perf.py
Normal file
280
devlib/collector/perf.py
Normal file
@@ -0,0 +1,280 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from past.builtins import basestring, zip
|
||||
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
from devlib.utils.misc import ensure_file_directory_exists as _f
|
||||
|
||||
|
||||
PERF_STAT_COMMAND_TEMPLATE = '{binary} {command} {options} {events} {sleep_cmd} > {outfile} 2>&1 '
|
||||
PERF_REPORT_COMMAND_TEMPLATE= '{binary} report {options} -i {datafile} > {outfile} 2>&1 '
|
||||
PERF_REPORT_SAMPLE_COMMAND_TEMPLATE= '{binary} report-sample {options} -i {datafile} > {outfile} '
|
||||
PERF_RECORD_COMMAND_TEMPLATE= '{binary} record {options} {events} -o {outfile}'
|
||||
|
||||
PERF_DEFAULT_EVENTS = [
|
||||
'cpu-migrations',
|
||||
'context-switches',
|
||||
]
|
||||
|
||||
SIMPLEPERF_DEFAULT_EVENTS = [
|
||||
'raw-cpu-cycles',
|
||||
'raw-l1-dcache',
|
||||
'raw-l1-dcache-refill',
|
||||
'raw-br-mis-pred',
|
||||
'raw-instruction-retired',
|
||||
]
|
||||
|
||||
DEFAULT_EVENTS = {'perf':PERF_DEFAULT_EVENTS, 'simpleperf':SIMPLEPERF_DEFAULT_EVENTS}
|
||||
|
||||
class PerfCollector(CollectorBase):
|
||||
"""
|
||||
Perf is a Linux profiling with performance counters.
|
||||
Simpleperf is an Android profiling tool with performance counters.
|
||||
|
||||
It is highly recomended to use perf_type = simpleperf when using this instrument
|
||||
on android devices, since it recognises android symbols in record mode and is much more stable
|
||||
when reporting record .data files. For more information see simpleperf documentation at:
|
||||
https://android.googlesource.com/platform/system/extras/+/master/simpleperf/doc/README.md
|
||||
|
||||
Performance counters are CPU hardware registers that count hardware events
|
||||
such as instructions executed, cache-misses suffered, or branches
|
||||
mispredicted. They form a basis for profiling applications to trace dynamic
|
||||
control flow and identify hotspots.
|
||||
|
||||
pref accepts options and events. If no option is given the default '-a' is
|
||||
used. For events, the default events are migrations and cs for perf and raw-cpu-cycles,
|
||||
raw-l1-dcache, raw-l1-dcache-refill, raw-instructions-retired. They both can
|
||||
be specified in the config file.
|
||||
|
||||
Events must be provided as a list that contains them and they will look like
|
||||
this ::
|
||||
|
||||
perf_events = ['migrations', 'cs']
|
||||
|
||||
Events can be obtained by typing the following in the command line on the
|
||||
device ::
|
||||
|
||||
perf list
|
||||
simpleperf list
|
||||
|
||||
Whereas options, they can be provided as a single string as following ::
|
||||
|
||||
perf_options = '-a -i'
|
||||
|
||||
Options can be obtained by running the following in the command line ::
|
||||
|
||||
man perf-stat
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
target,
|
||||
perf_type='perf',
|
||||
command='stat',
|
||||
events=None,
|
||||
optionstring=None,
|
||||
report_options=None,
|
||||
run_report_sample=False,
|
||||
report_sample_options=None,
|
||||
labels=None,
|
||||
force_install=False):
|
||||
super(PerfCollector, self).__init__(target)
|
||||
self.force_install = force_install
|
||||
self.labels = labels
|
||||
self.report_options = report_options
|
||||
self.run_report_sample = run_report_sample
|
||||
self.report_sample_options = report_sample_options
|
||||
self.output_path = None
|
||||
|
||||
# Validate parameters
|
||||
if isinstance(optionstring, list):
|
||||
self.optionstrings = optionstring
|
||||
else:
|
||||
self.optionstrings = [optionstring]
|
||||
if perf_type in ['perf', 'simpleperf']:
|
||||
self.perf_type = perf_type
|
||||
else:
|
||||
raise ValueError('Invalid perf type: {}, must be perf or simpleperf'.format(perf_type))
|
||||
if not events:
|
||||
self.events = DEFAULT_EVENTS[self.perf_type]
|
||||
else:
|
||||
self.events = events
|
||||
if isinstance(self.events, basestring):
|
||||
self.events = [self.events]
|
||||
if not self.labels:
|
||||
self.labels = ['perf_{}'.format(i) for i in range(len(self.optionstrings))]
|
||||
if len(self.labels) != len(self.optionstrings):
|
||||
raise ValueError('The number of labels must match the number of optstrings provided for perf.')
|
||||
if command in ['stat', 'record']:
|
||||
self.command = command
|
||||
else:
|
||||
raise ValueError('Unsupported perf command, must be stat or record')
|
||||
if report_options and (command != 'record'):
|
||||
raise ValueError('report_options specified, but command is not record')
|
||||
if report_sample_options and (command != 'record'):
|
||||
raise ValueError('report_sample_options specified, but command is not record')
|
||||
|
||||
self.binary = self.target.get_installed(self.perf_type)
|
||||
if self.force_install or not self.binary:
|
||||
self.binary = self._deploy_perf()
|
||||
|
||||
self._validate_events(self.events)
|
||||
|
||||
self.commands = self._build_commands()
|
||||
|
||||
def reset(self):
|
||||
self.target.killall(self.perf_type, as_root=self.target.is_rooted)
|
||||
self.target.remove(self.target.get_workpath('TemporaryFile*'))
|
||||
for label in self.labels:
|
||||
filepath = self._get_target_file(label, 'data')
|
||||
self.target.remove(filepath)
|
||||
filepath = self._get_target_file(label, 'rpt')
|
||||
self.target.remove(filepath)
|
||||
filepath = self._get_target_file(label, 'rptsamples')
|
||||
self.target.remove(filepath)
|
||||
|
||||
def start(self):
|
||||
for command in self.commands:
|
||||
self.target.background(command, as_root=self.target.is_rooted)
|
||||
|
||||
def stop(self):
|
||||
self.target.killall(self.perf_type, signal='SIGINT',
|
||||
as_root=self.target.is_rooted)
|
||||
if self.perf_type == "perf" and self.command == "stat":
|
||||
# perf doesn't transmit the signal to its sleep call so handled here:
|
||||
self.target.killall('sleep', as_root=self.target.is_rooted)
|
||||
# NB: we hope that no other "important" sleep is on-going
|
||||
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
|
||||
output = CollectorOutput()
|
||||
|
||||
for label in self.labels:
|
||||
if self.command == 'record':
|
||||
self._wait_for_data_file_write(label, self.output_path)
|
||||
path = self._pull_target_file_to_host(label, 'rpt', self.output_path)
|
||||
output.append(CollectorOutputEntry(path, 'file'))
|
||||
if self.run_report_sample:
|
||||
report_samples_path = self._pull_target_file_to_host(label, 'rptsamples', self.output_path)
|
||||
output.append(CollectorOutputEntry(report_samples_path, 'file'))
|
||||
else:
|
||||
path = self._pull_target_file_to_host(label, 'out', self.output_path)
|
||||
output.append(CollectorOutputEntry(path, 'file'))
|
||||
return output
|
||||
|
||||
def _deploy_perf(self):
|
||||
host_executable = os.path.join(PACKAGE_BIN_DIRECTORY,
|
||||
self.target.abi, self.perf_type)
|
||||
return self.target.install(host_executable)
|
||||
|
||||
def _get_target_file(self, label, extension):
|
||||
return self.target.get_workpath('{}.{}'.format(label, extension))
|
||||
|
||||
def _build_commands(self):
|
||||
commands = []
|
||||
for opts, label in zip(self.optionstrings, self.labels):
|
||||
if self.command == 'stat':
|
||||
commands.append(self._build_perf_stat_command(opts, self.events, label))
|
||||
else:
|
||||
commands.append(self._build_perf_record_command(opts, label))
|
||||
return commands
|
||||
|
||||
def _build_perf_stat_command(self, options, events, label):
|
||||
event_string = ' '.join(['-e {}'.format(e) for e in events])
|
||||
sleep_cmd = 'sleep 1000' if self.perf_type == 'perf' else ''
|
||||
command = PERF_STAT_COMMAND_TEMPLATE.format(binary = self.binary,
|
||||
command = self.command,
|
||||
options = options or '',
|
||||
events = event_string,
|
||||
sleep_cmd = sleep_cmd,
|
||||
outfile = self._get_target_file(label, 'out'))
|
||||
return command
|
||||
|
||||
def _build_perf_report_command(self, report_options, label):
|
||||
command = PERF_REPORT_COMMAND_TEMPLATE.format(binary=self.binary,
|
||||
options=report_options or '',
|
||||
datafile=self._get_target_file(label, 'data'),
|
||||
outfile=self._get_target_file(label, 'rpt'))
|
||||
return command
|
||||
|
||||
def _build_perf_report_sample_command(self, label):
|
||||
command = PERF_REPORT_SAMPLE_COMMAND_TEMPLATE.format(binary=self.binary,
|
||||
options=self.report_sample_options or '',
|
||||
datafile=self._get_target_file(label, 'data'),
|
||||
outfile=self._get_target_file(label, 'rptsamples'))
|
||||
return command
|
||||
|
||||
def _build_perf_record_command(self, options, label):
|
||||
event_string = ' '.join(['-e {}'.format(e) for e in self.events])
|
||||
command = PERF_RECORD_COMMAND_TEMPLATE.format(binary=self.binary,
|
||||
options=options or '',
|
||||
events=event_string,
|
||||
outfile=self._get_target_file(label, 'data'))
|
||||
return command
|
||||
|
||||
def _pull_target_file_to_host(self, label, extension, output_path):
|
||||
target_file = self._get_target_file(label, extension)
|
||||
host_relpath = os.path.basename(target_file)
|
||||
host_file = _f(os.path.join(output_path, host_relpath))
|
||||
self.target.pull(target_file, host_file)
|
||||
return host_file
|
||||
|
||||
def _wait_for_data_file_write(self, label, output_path):
|
||||
data_file_finished_writing = False
|
||||
max_tries = 80
|
||||
current_tries = 0
|
||||
while not data_file_finished_writing:
|
||||
files = self.target.execute('cd {} && ls'.format(self.target.get_workpath('')))
|
||||
# Perf stores data in tempory files whilst writing to data output file. Check if they have been removed.
|
||||
if 'TemporaryFile' in files and current_tries <= max_tries:
|
||||
time.sleep(0.25)
|
||||
current_tries += 1
|
||||
else:
|
||||
if current_tries >= max_tries:
|
||||
self.logger.warning('''writing {}.data file took longer than expected,
|
||||
file may not have written correctly'''.format(label))
|
||||
data_file_finished_writing = True
|
||||
report_command = self._build_perf_report_command(self.report_options, label)
|
||||
self.target.execute(report_command)
|
||||
if self.run_report_sample:
|
||||
report_sample_command = self._build_perf_report_sample_command(label)
|
||||
self.target.execute(report_sample_command)
|
||||
|
||||
def _validate_events(self, events):
|
||||
available_events_string = self.target.execute('{} list | {} cat'.format(self.perf_type, self.target.busybox))
|
||||
available_events = available_events_string.splitlines()
|
||||
for available_event in available_events:
|
||||
if available_event == '':
|
||||
continue
|
||||
if 'OR' in available_event:
|
||||
available_events.append(available_event.split('OR')[1])
|
||||
available_events[available_events.index(available_event)] = available_event.split()[0].strip()
|
||||
# Raw hex event codes can also be passed in that do not appear on perf/simpleperf list, prefixed with 'r'
|
||||
raw_event_code_regex = re.compile(r"^r(0x|0X)?[A-Fa-f0-9]+$")
|
||||
for event in events:
|
||||
if event in available_events or re.match(raw_event_code_regex, event):
|
||||
continue
|
||||
else:
|
||||
raise ValueError('Event: {} is not in available event list for {}'.format(event, self.perf_type))
|
@@ -19,13 +19,14 @@ import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
from devlib.exception import WorkerThreadError
|
||||
|
||||
|
||||
class ScreenCapturePoller(threading.Thread):
|
||||
|
||||
def __init__(self, target, period, output_path=None, timeout=30):
|
||||
def __init__(self, target, period, timeout=30):
|
||||
super(ScreenCapturePoller, self).__init__()
|
||||
self.target = target
|
||||
self.logger = logging.getLogger('screencapture')
|
||||
@@ -36,11 +37,16 @@ class ScreenCapturePoller(threading.Thread):
|
||||
self.last_poll = 0
|
||||
self.daemon = True
|
||||
self.exc = None
|
||||
self.output_path = None
|
||||
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def run(self):
|
||||
self.logger.debug('Starting screen capture polling')
|
||||
try:
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
while True:
|
||||
if self.stop_signal.is_set():
|
||||
break
|
||||
@@ -66,24 +72,33 @@ class ScreenCapturePoller(threading.Thread):
|
||||
self.target.capture_screen(os.path.join(self.output_path, "screencap_{ts}.png"))
|
||||
|
||||
|
||||
class ScreenCaptureCollector(TraceCollector):
|
||||
class ScreenCaptureCollector(CollectorBase):
|
||||
|
||||
def __init__(self, target, output_path=None, period=None):
|
||||
def __init__(self, target, period=None):
|
||||
super(ScreenCaptureCollector, self).__init__(target)
|
||||
self._collecting = False
|
||||
self.output_path = output_path
|
||||
self.output_path = None
|
||||
self.period = period
|
||||
self.target = target
|
||||
self._poller = ScreenCapturePoller(self.target, self.period,
|
||||
self.output_path)
|
||||
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def reset(self):
|
||||
pass
|
||||
self._poller = ScreenCapturePoller(self.target, self.period)
|
||||
|
||||
def get_data(self):
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("No data collected.")
|
||||
return CollectorOutput([CollectorOutputEntry(self.output_path, 'directory')])
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start collecting the screenshots
|
||||
"""
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
self._poller.set_output(self.output_path)
|
||||
self._poller.start()
|
||||
self._collecting = True
|
||||
|
@@ -13,15 +13,16 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from pexpect.exceptions import TIMEOUT
|
||||
import shutil
|
||||
from tempfile import NamedTemporaryFile
|
||||
from pexpect.exceptions import TIMEOUT
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
from devlib.utils.serial_port import get_connection
|
||||
|
||||
|
||||
class SerialTraceCollector(TraceCollector):
|
||||
class SerialTraceCollector(CollectorBase):
|
||||
|
||||
@property
|
||||
def collecting(self):
|
||||
@@ -32,32 +33,35 @@ class SerialTraceCollector(TraceCollector):
|
||||
self.serial_port = serial_port
|
||||
self.baudrate = baudrate
|
||||
self.timeout = timeout
|
||||
self.output_path = None
|
||||
|
||||
self._serial_target = None
|
||||
self._conn = None
|
||||
self._tmpfile = None
|
||||
self._outfile_fh = None
|
||||
self._collecting = False
|
||||
|
||||
def reset(self):
|
||||
if self._collecting:
|
||||
raise RuntimeError("reset was called whilst collecting")
|
||||
|
||||
if self._tmpfile:
|
||||
self._tmpfile.close()
|
||||
self._tmpfile = None
|
||||
if self._outfile_fh:
|
||||
self._outfile_fh.close()
|
||||
self._outfile_fh = None
|
||||
|
||||
def start(self):
|
||||
if self._collecting:
|
||||
raise RuntimeError("start was called whilst collecting")
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
|
||||
|
||||
self._tmpfile = NamedTemporaryFile()
|
||||
self._tmpfile.write("-------- Starting serial logging --------\n")
|
||||
self._outfile_fh = open(self.output_path, 'wb')
|
||||
start_marker = "-------- Starting serial logging --------\n"
|
||||
self._outfile_fh.write(start_marker.encode('utf-8'))
|
||||
|
||||
self._serial_target, self._conn = get_connection(port=self.serial_port,
|
||||
baudrate=self.baudrate,
|
||||
timeout=self.timeout,
|
||||
logfile=self._tmpfile,
|
||||
logfile=self._outfile_fh,
|
||||
init_dtr=0)
|
||||
self._collecting = True
|
||||
|
||||
@@ -76,17 +80,20 @@ class SerialTraceCollector(TraceCollector):
|
||||
self._serial_target.close()
|
||||
del self._conn
|
||||
|
||||
self._tmpfile.write("-------- Stopping serial logging --------\n")
|
||||
stop_marker = "-------- Stopping serial logging --------\n"
|
||||
self._outfile_fh.write(stop_marker.encode('utf-8'))
|
||||
self._outfile_fh.flush()
|
||||
self._outfile_fh.close()
|
||||
self._outfile_fh = None
|
||||
|
||||
self._collecting = False
|
||||
|
||||
def get_trace(self, outfile):
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
if self._collecting:
|
||||
raise RuntimeError("get_trace was called whilst collecting")
|
||||
|
||||
self._tmpfile.flush()
|
||||
|
||||
shutil.copy(self._tmpfile.name, outfile)
|
||||
|
||||
self._tmpfile.close()
|
||||
self._tmpfile = None
|
||||
raise RuntimeError("get_data was called whilst collecting")
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("No data collected.")
|
||||
return CollectorOutput([CollectorOutputEntry(self.output_path, 'file')])
|
@@ -13,30 +13,16 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Copyright 2018 Arm Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from shutil import copyfile
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.utils.android import platform_tools
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
from devlib.exception import TargetStableError, HostError
|
||||
import devlib.utils.android
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
|
||||
@@ -48,7 +34,7 @@ DEFAULT_CATEGORIES = [
|
||||
'idle'
|
||||
]
|
||||
|
||||
class SystraceCollector(TraceCollector):
|
||||
class SystraceCollector(CollectorBase):
|
||||
"""
|
||||
A trace collector based on Systrace
|
||||
|
||||
@@ -74,13 +60,11 @@ class SystraceCollector(TraceCollector):
|
||||
@property
|
||||
@memoized
|
||||
def available_categories(self):
|
||||
lines = subprocess.check_output([self.systrace_binary, '-l']).splitlines()
|
||||
lines = subprocess.check_output(
|
||||
[self.systrace_binary, '-l'], universal_newlines=True
|
||||
).splitlines()
|
||||
|
||||
categories = []
|
||||
for line in lines:
|
||||
categories.append(line.split()[0])
|
||||
|
||||
return categories
|
||||
return [line.split()[0] for line in lines if line]
|
||||
|
||||
def __init__(self, target,
|
||||
categories=None,
|
||||
@@ -91,13 +75,15 @@ class SystraceCollector(TraceCollector):
|
||||
|
||||
self.categories = categories or DEFAULT_CATEGORIES
|
||||
self.buffer_size = buffer_size
|
||||
self.output_path = None
|
||||
|
||||
self._systrace_process = None
|
||||
self._tmpfile = None
|
||||
self._outfile_fh = None
|
||||
|
||||
# Try to find a systrace binary
|
||||
self.systrace_binary = None
|
||||
|
||||
platform_tools = devlib.utils.android.platform_tools
|
||||
systrace_binary_path = os.path.join(platform_tools, 'systrace', 'systrace.py')
|
||||
if not os.path.isfile(systrace_binary_path):
|
||||
raise HostError('Could not find any systrace binary under {}'.format(platform_tools))
|
||||
@@ -109,22 +95,23 @@ class SystraceCollector(TraceCollector):
|
||||
if category not in self.available_categories:
|
||||
message = 'Category [{}] not available for tracing'.format(category)
|
||||
if strict:
|
||||
raise TargetError(message)
|
||||
raise TargetStableError(message)
|
||||
self.logger.warning(message)
|
||||
|
||||
self.categories = list(set(self.categories) & set(self.available_categories))
|
||||
if not self.categories:
|
||||
raise TargetError('None of the requested categories are available')
|
||||
raise TargetStableError('None of the requested categories are available')
|
||||
|
||||
def __del__(self):
|
||||
self.reset()
|
||||
|
||||
def _build_cmd(self):
|
||||
self._tmpfile = NamedTemporaryFile()
|
||||
self._outfile_fh = open(self.output_path, 'w')
|
||||
|
||||
self.systrace_cmd = '{} -o {} -e {}'.format(
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
self.systrace_cmd = 'python2 -u {} -o {} -e {}'.format(
|
||||
self.systrace_binary,
|
||||
self._tmpfile.name,
|
||||
self._outfile_fh.name,
|
||||
self.target.adb_name
|
||||
)
|
||||
|
||||
@@ -137,13 +124,11 @@ class SystraceCollector(TraceCollector):
|
||||
if self._systrace_process:
|
||||
self.stop()
|
||||
|
||||
if self._tmpfile:
|
||||
self._tmpfile.close()
|
||||
self._tmpfile = None
|
||||
|
||||
def start(self):
|
||||
if self._systrace_process:
|
||||
raise RuntimeError("Tracing is already underway, call stop() first")
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
|
||||
self.reset()
|
||||
|
||||
@@ -152,8 +137,11 @@ class SystraceCollector(TraceCollector):
|
||||
self._systrace_process = subprocess.Popen(
|
||||
self.systrace_cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
shell=True
|
||||
stdout=subprocess.PIPE,
|
||||
shell=True,
|
||||
universal_newlines=True
|
||||
)
|
||||
self._systrace_process.stdout.read(1)
|
||||
|
||||
def stop(self):
|
||||
if not self._systrace_process:
|
||||
@@ -163,11 +151,16 @@ class SystraceCollector(TraceCollector):
|
||||
self._systrace_process.communicate('\n')
|
||||
self._systrace_process = None
|
||||
|
||||
def get_trace(self, outfile):
|
||||
if self._outfile_fh:
|
||||
self._outfile_fh.close()
|
||||
self._outfile_fh = None
|
||||
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
if self._systrace_process:
|
||||
raise RuntimeError("Tracing is underway, call stop() first")
|
||||
|
||||
if not self._tmpfile:
|
||||
raise RuntimeError("No tracing data available")
|
||||
|
||||
copyfile(self._tmpfile.name, outfile)
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("No data collected.")
|
||||
return CollectorOutput([CollectorOutputEntry(self.output_path, 'file')])
|
533
devlib/connection.py
Normal file
533
devlib/connection.py
Normal file
@@ -0,0 +1,533 @@
|
||||
# Copyright 2019 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
from weakref import WeakSet
|
||||
from shlex import quote
|
||||
from time import monotonic
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
import threading
|
||||
import time
|
||||
import logging
|
||||
|
||||
from devlib.utils.misc import InitCheckpoint
|
||||
|
||||
_KILL_TIMEOUT = 3
|
||||
|
||||
|
||||
def _kill_pgid_cmd(pgid, sig, busybox):
|
||||
return '{} kill -{} -{}'.format(busybox, sig.value, pgid)
|
||||
|
||||
|
||||
class ConnectionBase(InitCheckpoint):
|
||||
"""
|
||||
Base class for all connections.
|
||||
"""
|
||||
def __init__(self):
|
||||
self._current_bg_cmds = WeakSet()
|
||||
self._closed = False
|
||||
self._close_lock = threading.Lock()
|
||||
self.busybox = None
|
||||
|
||||
def cancel_running_command(self):
|
||||
bg_cmds = set(self._current_bg_cmds)
|
||||
for bg_cmd in bg_cmds:
|
||||
bg_cmd.cancel()
|
||||
|
||||
@abstractmethod
|
||||
def _close(self):
|
||||
"""
|
||||
Close the connection.
|
||||
|
||||
The public :meth:`close` method makes sure that :meth:`_close` will
|
||||
only be called once, and will serialize accesses to it if it happens to
|
||||
be called from multiple threads at once.
|
||||
"""
|
||||
|
||||
def close(self):
|
||||
# Locking the closing allows any thread to safely call close() as long
|
||||
# as the connection can be closed from a thread that is not the one it
|
||||
# started its life in.
|
||||
with self._close_lock:
|
||||
if not self._closed:
|
||||
self._close()
|
||||
self._closed = True
|
||||
|
||||
# Ideally, that should not be relied upon but that will improve the chances
|
||||
# of the connection being properly cleaned up when it's not in use anymore.
|
||||
def __del__(self):
|
||||
# Since __del__ will be called if an exception is raised in __init__
|
||||
# (e.g. we cannot connect), we only run close() when we are sure
|
||||
# __init__ has completed successfully.
|
||||
if self.initialized:
|
||||
self.close()
|
||||
|
||||
|
||||
class BackgroundCommand(ABC):
|
||||
"""
|
||||
Allows managing a running background command using a subset of the
|
||||
:class:`subprocess.Popen` API.
|
||||
|
||||
Instances of this class can be used as context managers, with the same
|
||||
semantic as :class:`subprocess.Popen`.
|
||||
"""
|
||||
@abstractmethod
|
||||
def send_signal(self, sig):
|
||||
"""
|
||||
Send a POSIX signal to the background command's process group ID
|
||||
(PGID).
|
||||
|
||||
:param signal: Signal to send.
|
||||
:type signal: signal.Signals
|
||||
"""
|
||||
|
||||
def kill(self):
|
||||
"""
|
||||
Send SIGKILL to the background command.
|
||||
"""
|
||||
self.send_signal(signal.SIGKILL)
|
||||
|
||||
def cancel(self, kill_timeout=_KILL_TIMEOUT):
|
||||
"""
|
||||
Try to gracefully terminate the process by sending ``SIGTERM``, then
|
||||
waiting for ``kill_timeout`` to send ``SIGKILL``.
|
||||
"""
|
||||
if self.poll() is None:
|
||||
self._cancel(kill_timeout=kill_timeout)
|
||||
|
||||
@abstractmethod
|
||||
def _cancel(self, kill_timeout):
|
||||
"""
|
||||
Method to override in subclasses to implement :meth:`cancel`.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def wait(self):
|
||||
"""
|
||||
Block until the background command completes, and return its exit code.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def poll(self):
|
||||
"""
|
||||
Return exit code if the command has exited, None otherwise.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stdin(self):
|
||||
"""
|
||||
File-like object connected to the background's command stdin.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stdout(self):
|
||||
"""
|
||||
File-like object connected to the background's command stdout.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stderr(self):
|
||||
"""
|
||||
File-like object connected to the background's command stderr.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def pid(self):
|
||||
"""
|
||||
Process Group ID (PGID) of the background command.
|
||||
|
||||
Since the command is usually wrapped in shell processes for IO
|
||||
redirections, sudo etc, the PID cannot be assumed to be the actual PID
|
||||
of the command passed by the user. It's is guaranteed to be a PGID
|
||||
instead, which means signals sent to it as such will target all
|
||||
subprocesses involved in executing that command.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def close(self):
|
||||
"""
|
||||
Close all opened streams and then wait for command completion.
|
||||
|
||||
:returns: Exit code of the command.
|
||||
|
||||
.. note:: If the command is writing to its stdout/stderr, it might be
|
||||
blocked on that and die when the streams are closed.
|
||||
"""
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
self.close()
|
||||
|
||||
|
||||
class PopenBackgroundCommand(BackgroundCommand):
|
||||
"""
|
||||
:class:`subprocess.Popen`-based background command.
|
||||
"""
|
||||
|
||||
def __init__(self, popen):
|
||||
self.popen = popen
|
||||
|
||||
def send_signal(self, sig):
|
||||
return os.killpg(self.popen.pid, sig)
|
||||
|
||||
@property
|
||||
def stdin(self):
|
||||
return self.popen.stdin
|
||||
|
||||
@property
|
||||
def stdout(self):
|
||||
return self.popen.stdout
|
||||
|
||||
@property
|
||||
def stderr(self):
|
||||
return self.popen.stderr
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
return self.popen.pid
|
||||
|
||||
def wait(self):
|
||||
return self.popen.wait()
|
||||
|
||||
def poll(self):
|
||||
return self.popen.poll()
|
||||
|
||||
def _cancel(self, kill_timeout):
|
||||
popen = self.popen
|
||||
os.killpg(os.getpgid(popen.pid), signal.SIGTERM)
|
||||
try:
|
||||
popen.wait(timeout=kill_timeout)
|
||||
except subprocess.TimeoutExpired:
|
||||
os.killpg(os.getpgid(popen.pid), signal.SIGKILL)
|
||||
|
||||
def close(self):
|
||||
self.popen.__exit__(None, None, None)
|
||||
return self.popen.returncode
|
||||
|
||||
def __enter__(self):
|
||||
self.popen.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
self.popen.__exit__(*args, **kwargs)
|
||||
|
||||
|
||||
class ParamikoBackgroundCommand(BackgroundCommand):
|
||||
"""
|
||||
:mod:`paramiko`-based background command.
|
||||
"""
|
||||
def __init__(self, conn, chan, pid, as_root, stdin, stdout, stderr, redirect_thread):
|
||||
self.chan = chan
|
||||
self.as_root = as_root
|
||||
self.conn = conn
|
||||
self._pid = pid
|
||||
self._stdin = stdin
|
||||
self._stdout = stdout
|
||||
self._stderr = stderr
|
||||
self.redirect_thread = redirect_thread
|
||||
|
||||
def send_signal(self, sig):
|
||||
# If the command has already completed, we don't want to send a signal
|
||||
# to another process that might have gotten that PID in the meantime.
|
||||
if self.poll() is not None:
|
||||
return
|
||||
# Use -PGID to target a process group rather than just the process
|
||||
# itself
|
||||
cmd = _kill_pgid_cmd(self.pid, sig, self.conn.busybox)
|
||||
self.conn.execute(cmd, as_root=self.as_root)
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
return self._pid
|
||||
|
||||
def wait(self):
|
||||
return self.chan.recv_exit_status()
|
||||
|
||||
def poll(self):
|
||||
if self.chan.exit_status_ready():
|
||||
return self.wait()
|
||||
else:
|
||||
return None
|
||||
|
||||
def _cancel(self, kill_timeout):
|
||||
self.send_signal(signal.SIGTERM)
|
||||
# Check if the command terminated quickly
|
||||
time.sleep(10e-3)
|
||||
# Otherwise wait for the full timeout and kill it
|
||||
if self.poll() is None:
|
||||
time.sleep(kill_timeout)
|
||||
self.send_signal(signal.SIGKILL)
|
||||
self.wait()
|
||||
|
||||
@property
|
||||
def stdin(self):
|
||||
return self._stdin
|
||||
|
||||
@property
|
||||
def stdout(self):
|
||||
return self._stdout
|
||||
|
||||
@property
|
||||
def stderr(self):
|
||||
return self._stderr
|
||||
|
||||
def close(self):
|
||||
for x in (self.stdin, self.stdout, self.stderr):
|
||||
if x is not None:
|
||||
x.close()
|
||||
|
||||
exit_code = self.wait()
|
||||
thread = self.redirect_thread
|
||||
if thread:
|
||||
thread.join()
|
||||
|
||||
return exit_code
|
||||
|
||||
|
||||
class AdbBackgroundCommand(BackgroundCommand):
|
||||
"""
|
||||
``adb``-based background command.
|
||||
"""
|
||||
|
||||
def __init__(self, conn, adb_popen, pid, as_root):
|
||||
self.conn = conn
|
||||
self.as_root = as_root
|
||||
self.adb_popen = adb_popen
|
||||
self._pid = pid
|
||||
|
||||
def send_signal(self, sig):
|
||||
self.conn.execute(
|
||||
_kill_pgid_cmd(self.pid, sig, self.conn.busybox),
|
||||
as_root=self.as_root,
|
||||
)
|
||||
|
||||
@property
|
||||
def stdin(self):
|
||||
return self.adb_popen.stdin
|
||||
|
||||
@property
|
||||
def stdout(self):
|
||||
return self.adb_popen.stdout
|
||||
|
||||
@property
|
||||
def stderr(self):
|
||||
return self.adb_popen.stderr
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
return self._pid
|
||||
|
||||
def wait(self):
|
||||
return self.adb_popen.wait()
|
||||
|
||||
def poll(self):
|
||||
return self.adb_popen.poll()
|
||||
|
||||
def _cancel(self, kill_timeout):
|
||||
self.send_signal(signal.SIGTERM)
|
||||
try:
|
||||
self.adb_popen.wait(timeout=kill_timeout)
|
||||
except subprocess.TimeoutExpired:
|
||||
self.send_signal(signal.SIGKILL)
|
||||
self.adb_popen.kill()
|
||||
|
||||
def close(self):
|
||||
self.adb_popen.__exit__(None, None, None)
|
||||
return self.adb_popen.returncode
|
||||
|
||||
def __enter__(self):
|
||||
self.adb_popen.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
self.adb_popen.__exit__(*args, **kwargs)
|
||||
|
||||
|
||||
class TransferManagerBase(ABC):
|
||||
|
||||
def _pull_dest_size(self, dest):
|
||||
if os.path.isdir(dest):
|
||||
return sum(
|
||||
os.stat(os.path.join(dirpath, f)).st_size
|
||||
for dirpath, _, fnames in os.walk(dest)
|
||||
for f in fnames
|
||||
)
|
||||
else:
|
||||
return os.stat(dest).st_size
|
||||
return 0
|
||||
|
||||
def _push_dest_size(self, dest):
|
||||
cmd = '{} du -s {}'.format(quote(self.conn.busybox), quote(dest))
|
||||
out = self.conn.execute(cmd)
|
||||
try:
|
||||
return int(out.split()[0])
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
def __init__(self, conn, poll_period, start_transfer_poll_delay, total_timeout):
|
||||
self.conn = conn
|
||||
self.poll_period = poll_period
|
||||
self.total_timeout = total_timeout
|
||||
self.start_transfer_poll_delay = start_transfer_poll_delay
|
||||
|
||||
self.logger = logging.getLogger('FileTransfer')
|
||||
self.managing = threading.Event()
|
||||
self.transfer_started = threading.Event()
|
||||
self.transfer_completed = threading.Event()
|
||||
self.transfer_aborted = threading.Event()
|
||||
|
||||
self.monitor_thread = None
|
||||
self.sources = None
|
||||
self.dest = None
|
||||
self.direction = None
|
||||
|
||||
@abstractmethod
|
||||
def _cancel(self):
|
||||
pass
|
||||
|
||||
def cancel(self, reason=None):
|
||||
msg = 'Cancelling file transfer {} -> {}'.format(self.sources, self.dest)
|
||||
if reason is not None:
|
||||
msg += ' due to \'{}\''.format(reason)
|
||||
self.logger.warning(msg)
|
||||
self.transfer_aborted.set()
|
||||
self._cancel()
|
||||
|
||||
@abstractmethod
|
||||
def isactive(self):
|
||||
pass
|
||||
|
||||
@contextmanager
|
||||
def manage(self, sources, dest, direction):
|
||||
try:
|
||||
self.sources, self.dest, self.direction = sources, dest, direction
|
||||
m_thread = threading.Thread(target=self._monitor)
|
||||
|
||||
self.transfer_completed.clear()
|
||||
self.transfer_aborted.clear()
|
||||
self.transfer_started.set()
|
||||
|
||||
m_thread.start()
|
||||
yield self
|
||||
except BaseException:
|
||||
self.cancel(reason='exception during transfer')
|
||||
raise
|
||||
finally:
|
||||
self.transfer_completed.set()
|
||||
self.transfer_started.set()
|
||||
m_thread.join()
|
||||
self.transfer_started.clear()
|
||||
self.transfer_completed.clear()
|
||||
self.transfer_aborted.clear()
|
||||
|
||||
def _monitor(self):
|
||||
start_t = monotonic()
|
||||
self.transfer_completed.wait(self.start_transfer_poll_delay)
|
||||
while not self.transfer_completed.wait(self.poll_period):
|
||||
if not self.isactive():
|
||||
self.cancel(reason='transfer inactive')
|
||||
elif monotonic() - start_t > self.total_timeout:
|
||||
self.cancel(reason='transfer timed out')
|
||||
|
||||
|
||||
class PopenTransferManager(TransferManagerBase):
|
||||
|
||||
def __init__(self, conn, poll_period=30, start_transfer_poll_delay=30, total_timeout=3600):
|
||||
super().__init__(conn, poll_period, start_transfer_poll_delay, total_timeout)
|
||||
self.transfer = None
|
||||
self.last_sample = None
|
||||
|
||||
def _cancel(self):
|
||||
if self.transfer:
|
||||
self.transfer.cancel()
|
||||
self.transfer = None
|
||||
self.last_sample = None
|
||||
|
||||
def isactive(self):
|
||||
size_fn = self._push_dest_size if self.direction == 'push' else self._pull_dest_size
|
||||
curr_size = size_fn(self.dest)
|
||||
self.logger.debug('Polled file transfer, destination size {}'.format(curr_size))
|
||||
active = True if self.last_sample is None else curr_size > self.last_sample
|
||||
self.last_sample = curr_size
|
||||
return active
|
||||
|
||||
def set_transfer_and_wait(self, popen_bg_cmd):
|
||||
self.transfer = popen_bg_cmd
|
||||
self.last_sample = None
|
||||
ret = self.transfer.wait()
|
||||
|
||||
if ret and not self.transfer_aborted.is_set():
|
||||
raise subprocess.CalledProcessError(ret, self.transfer.popen.args)
|
||||
elif self.transfer_aborted.is_set():
|
||||
raise TimeoutError(self.transfer.popen.args)
|
||||
|
||||
|
||||
class SSHTransferManager(TransferManagerBase):
|
||||
|
||||
def __init__(self, conn, poll_period=30, start_transfer_poll_delay=30, total_timeout=3600):
|
||||
super().__init__(conn, poll_period, start_transfer_poll_delay, total_timeout)
|
||||
self.transferer = None
|
||||
self.progressed = False
|
||||
self.transferred = None
|
||||
self.to_transfer = None
|
||||
|
||||
def _cancel(self):
|
||||
self.transferer.close()
|
||||
|
||||
def isactive(self):
|
||||
progressed = self.progressed
|
||||
self.progressed = False
|
||||
msg = 'Polled transfer: {}% [{}B/{}B]'
|
||||
pc = format((self.transferred / self.to_transfer) * 100, '.2f')
|
||||
self.logger.debug(msg.format(pc, self.transferred, self.to_transfer))
|
||||
return progressed
|
||||
|
||||
@contextmanager
|
||||
def manage(self, sources, dest, direction, transferer):
|
||||
with super().manage(sources, dest, direction):
|
||||
try:
|
||||
self.progressed = False
|
||||
self.transferer = transferer # SFTPClient or SCPClient
|
||||
yield self
|
||||
except socket.error as e:
|
||||
if self.transfer_aborted.is_set():
|
||||
self.transfer_aborted.clear()
|
||||
method = 'SCP' if self.conn.use_scp else 'SFTP'
|
||||
raise TimeoutError('{} {}: {} -> {}'.format(method, self.direction, sources, self.dest))
|
||||
else:
|
||||
raise e
|
||||
|
||||
def progress_cb(self, *args):
|
||||
if self.transfer_started.is_set():
|
||||
self.progressed = True
|
||||
if len(args) == 3: # For SCPClient callbacks
|
||||
self.transferred = args[2]
|
||||
self.to_transfer = args[1]
|
||||
elif len(args) == 2: # For SFTPClient callbacks
|
||||
self.transferred = args[0]
|
||||
self.to_transfer = args[1]
|
@@ -36,25 +36,28 @@ class DerivedMetric(object):
|
||||
msg = 'Unknown measurement type: {}'
|
||||
raise ValueError(msg.format(measurement_type))
|
||||
|
||||
def __cmp__(self, other):
|
||||
if hasattr(other, 'value'):
|
||||
return cmp(self.value, other.value)
|
||||
else:
|
||||
return cmp(self.value, other)
|
||||
|
||||
def __str__(self):
|
||||
if self.units:
|
||||
return '{}: {} {}'.format(self.name, self.value, self.units)
|
||||
else:
|
||||
return '{}: {}'.format(self.name, self.value)
|
||||
|
||||
# pylint: disable=undefined-variable
|
||||
def __cmp__(self, other):
|
||||
if hasattr(other, 'value'):
|
||||
return cmp(self.value, other.value)
|
||||
else:
|
||||
return cmp(self.value, other)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
class DerivedMeasurements(object):
|
||||
|
||||
# pylint: disable=no-self-use,unused-argument
|
||||
def process(self, measurements_csv):
|
||||
return []
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def process_raw(self, *args):
|
||||
return []
|
||||
|
@@ -15,12 +15,13 @@
|
||||
from __future__ import division
|
||||
from collections import defaultdict
|
||||
|
||||
from devlib import DerivedMeasurements, DerivedMetric
|
||||
from devlib.instrument import MEASUREMENT_TYPES, InstrumentChannel
|
||||
from devlib.derived import DerivedMeasurements, DerivedMetric
|
||||
from devlib.instrument import MEASUREMENT_TYPES
|
||||
|
||||
|
||||
class DerivedEnergyMeasurements(DerivedMeasurements):
|
||||
|
||||
# pylint: disable=too-many-locals,too-many-branches
|
||||
@staticmethod
|
||||
def process(measurements_csv):
|
||||
|
||||
|
@@ -15,7 +15,6 @@
|
||||
|
||||
from __future__ import division
|
||||
import os
|
||||
import re
|
||||
|
||||
try:
|
||||
import pandas as pd
|
||||
@@ -24,8 +23,9 @@ except ImportError:
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib import DerivedMeasurements, DerivedMetric, MeasurementsCsv, InstrumentChannel
|
||||
from devlib.derived import DerivedMeasurements, DerivedMetric
|
||||
from devlib.exception import HostError
|
||||
from devlib.instrument import MeasurementsCsv
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
from devlib.utils.rendering import gfxinfo_get_last_dump, VSYNC_INTERVAL
|
||||
from devlib.utils.types import numeric
|
||||
@@ -45,6 +45,7 @@ class DerivedFpsStats(DerivedMeasurements):
|
||||
if filename is not None and os.sep in filename:
|
||||
raise ValueError('filename cannot be a path (cannot countain "{}"'.format(os.sep))
|
||||
|
||||
# pylint: disable=no-member
|
||||
def process(self, measurements_csv):
|
||||
if isinstance(measurements_csv, basestring):
|
||||
measurements_csv = MeasurementsCsv(measurements_csv)
|
||||
@@ -65,6 +66,7 @@ class DerivedFpsStats(DerivedMeasurements):
|
||||
|
||||
class DerivedGfxInfoStats(DerivedFpsStats):
|
||||
|
||||
#pylint: disable=arguments-differ
|
||||
@staticmethod
|
||||
def process_raw(filepath, *args):
|
||||
metrics = []
|
||||
@@ -104,17 +106,17 @@ class DerivedGfxInfoStats(DerivedFpsStats):
|
||||
frame_count += 1
|
||||
|
||||
if start_vsync is None:
|
||||
start_vsync = frame_data.Vsync_time_us
|
||||
end_vsync = frame_data.Vsync_time_us
|
||||
start_vsync = frame_data.Vsync_time_ns
|
||||
end_vsync = frame_data.Vsync_time_ns
|
||||
|
||||
frame_time = frame_data.FrameCompleted_time_us - frame_data.IntendedVsync_time_us
|
||||
frame_time = frame_data.FrameCompleted_time_ns - frame_data.IntendedVsync_time_ns
|
||||
pff = 1e9 / frame_time
|
||||
if pff > self.drop_threshold:
|
||||
per_frame_fps.append([pff])
|
||||
|
||||
if frame_count:
|
||||
duration = end_vsync - start_vsync
|
||||
fps = (1e6 * frame_count) / float(duration)
|
||||
fps = (1e9 * frame_count) / float(duration)
|
||||
else:
|
||||
duration = 0
|
||||
fps = 0
|
||||
@@ -131,15 +133,15 @@ class DerivedGfxInfoStats(DerivedFpsStats):
|
||||
def _process_with_pandas(self, measurements_csv):
|
||||
data = pd.read_csv(measurements_csv.path)
|
||||
data = data[data.Flags_flags == 0]
|
||||
frame_time = data.FrameCompleted_time_us - data.IntendedVsync_time_us
|
||||
per_frame_fps = (1e6 / frame_time)
|
||||
frame_time = data.FrameCompleted_time_ns - data.IntendedVsync_time_ns
|
||||
per_frame_fps = (1e9 / frame_time)
|
||||
keep_filter = per_frame_fps > self.drop_threshold
|
||||
per_frame_fps = per_frame_fps[keep_filter]
|
||||
per_frame_fps.name = 'fps'
|
||||
|
||||
frame_count = data.index.size
|
||||
if frame_count > 1:
|
||||
duration = data.Vsync_time_us.iloc[-1] - data.Vsync_time_us.iloc[0]
|
||||
duration = data.Vsync_time_ns.iloc[-1] - data.Vsync_time_ns.iloc[0]
|
||||
fps = (1e9 * frame_count) / float(duration)
|
||||
else:
|
||||
duration = 0
|
||||
@@ -155,6 +157,7 @@ class DerivedGfxInfoStats(DerivedFpsStats):
|
||||
|
||||
class DerivedSurfaceFlingerStats(DerivedFpsStats):
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
def _process_with_pandas(self, measurements_csv):
|
||||
data = pd.read_csv(measurements_csv.path)
|
||||
|
||||
@@ -193,7 +196,7 @@ class DerivedSurfaceFlingerStats(DerivedFpsStats):
|
||||
janks = 0
|
||||
not_at_vsync = 0
|
||||
|
||||
janks_pc = 0 if frame_count == 0 else janks * 100 / frame_count
|
||||
janks_pc = 0 if frame_count == 0 else janks * 100 / frame_count
|
||||
|
||||
return [DerivedMetric('fps', fps, 'fps'),
|
||||
DerivedMetric('total_frames', frame_count, 'frames'),
|
||||
@@ -202,6 +205,7 @@ class DerivedSurfaceFlingerStats(DerivedFpsStats):
|
||||
DerivedMetric('janks_pc', janks_pc, 'percent'),
|
||||
DerivedMetric('missed_vsync', not_at_vsync, 'count')]
|
||||
|
||||
# pylint: disable=unused-argument,no-self-use
|
||||
def _process_without_pandas(self, measurements_csv):
|
||||
# Given that SurfaceFlinger has been deprecated in favor of GfxInfo,
|
||||
# it does not seem worth it implementing this.
|
||||
|
@@ -15,11 +15,35 @@
|
||||
|
||||
class DevlibError(Exception):
|
||||
"""Base class for all Devlib exceptions."""
|
||||
|
||||
def __init__(self, *args):
|
||||
message = args[0] if args else None
|
||||
self._message = message
|
||||
|
||||
@property
|
||||
def message(self):
|
||||
if self.args:
|
||||
return self.args[0]
|
||||
return str(self)
|
||||
if self._message is not None:
|
||||
return self._message
|
||||
else:
|
||||
return str(self)
|
||||
|
||||
|
||||
class DevlibStableError(DevlibError):
|
||||
"""Non transient target errors, that are not subject to random variations
|
||||
in the environment and can be reliably linked to for example a missing
|
||||
feature on a target."""
|
||||
pass
|
||||
|
||||
|
||||
class DevlibTransientError(DevlibError):
|
||||
"""Exceptions inheriting from ``DevlibTransientError`` represent random
|
||||
transient events that are usually related to issues in the environment, as
|
||||
opposed to programming errors, for example network failures or
|
||||
timeout-related exceptions. When the error could come from
|
||||
indistinguishable transient or non-transient issue, it can generally be
|
||||
assumed that the configuration is correct and therefore, a transient
|
||||
exception is raised."""
|
||||
pass
|
||||
|
||||
|
||||
class TargetError(DevlibError):
|
||||
@@ -27,7 +51,20 @@ class TargetError(DevlibError):
|
||||
pass
|
||||
|
||||
|
||||
class TargetNotRespondingError(DevlibError):
|
||||
class TargetTransientError(TargetError, DevlibTransientError):
|
||||
"""Transient target errors that can happen randomly when everything is
|
||||
properly configured."""
|
||||
pass
|
||||
|
||||
|
||||
class TargetStableError(TargetError, DevlibStableError):
|
||||
"""Non-transient target errors that can be linked to a programming error or
|
||||
a configuration issue, and is not influenced by non-controllable parameters
|
||||
such as network issues."""
|
||||
pass
|
||||
|
||||
|
||||
class TargetNotRespondingError(TargetTransientError):
|
||||
"""The target is unresponsive."""
|
||||
pass
|
||||
|
||||
@@ -37,7 +74,8 @@ class HostError(DevlibError):
|
||||
pass
|
||||
|
||||
|
||||
class TimeoutError(DevlibError):
|
||||
# pylint: disable=redefined-builtin
|
||||
class TimeoutError(DevlibTransientError):
|
||||
"""Raised when a subprocess command times out. This is basically a ``DevlibError``-derived version
|
||||
of ``subprocess.CalledProcessError``, the thinking being that while a timeout could be due to
|
||||
programming error (e.g. not setting long enough timers), it is often due to some failure in the
|
||||
@@ -73,19 +111,29 @@ class WorkerThreadError(DevlibError):
|
||||
super(WorkerThreadError, self).__init__(message)
|
||||
|
||||
|
||||
class KernelConfigKeyError(KeyError, IndexError, DevlibError):
|
||||
"""
|
||||
Exception raised when a kernel config option cannot be found.
|
||||
|
||||
It inherits from :exc:`IndexError` for backward compatibility, and
|
||||
:exc:`KeyError` to behave like a regular mapping.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def get_traceback(exc=None):
|
||||
"""
|
||||
Returns the string with the traceback for the specifiec exc
|
||||
object, or for the current exception exc is not specified.
|
||||
|
||||
"""
|
||||
import io, traceback, sys
|
||||
import io, traceback, sys # pylint: disable=multiple-imports
|
||||
if exc is None:
|
||||
exc = sys.exc_info()
|
||||
if not exc:
|
||||
return None
|
||||
tb = exc[2]
|
||||
sio = io.BytesIO()
|
||||
sio = io.StringIO()
|
||||
traceback.print_tb(tb, file=sio)
|
||||
del tb # needs to be done explicitly see: http://docs.python.org/2/library/sys.html#sys.exc_info
|
||||
return sio.getvalue()
|
||||
|
118
devlib/host.py
118
devlib/host.py
@@ -12,79 +12,151 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from glob import iglob
|
||||
import glob
|
||||
import os
|
||||
import signal
|
||||
import shutil
|
||||
import subprocess
|
||||
import logging
|
||||
from distutils.dir_util import copy_tree
|
||||
from getpass import getpass
|
||||
from pipes import quote
|
||||
|
||||
from devlib.exception import TargetError
|
||||
from devlib.exception import TargetTransientError, TargetStableError
|
||||
from devlib.utils.misc import check_output
|
||||
from devlib.connection import ConnectionBase, PopenBackgroundCommand
|
||||
|
||||
|
||||
PACKAGE_BIN_DIRECTORY = os.path.join(os.path.dirname(__file__), 'bin')
|
||||
|
||||
|
||||
# pylint: disable=redefined-outer-name
|
||||
def kill_children(pid, signal=signal.SIGKILL):
|
||||
with open('/proc/{0}/task/{0}/children'.format(pid), 'r') as fd:
|
||||
for cpid in map(int, fd.read().strip().split()):
|
||||
kill_children(cpid, signal)
|
||||
os.kill(cpid, signal)
|
||||
|
||||
class LocalConnection(object):
|
||||
|
||||
class LocalConnection(ConnectionBase):
|
||||
|
||||
name = 'local'
|
||||
host = 'localhost'
|
||||
|
||||
@property
|
||||
def connected_as_root(self):
|
||||
if self._connected_as_root is None:
|
||||
result = self.execute('id', as_root=False)
|
||||
self._connected_as_root = 'uid=0(' in result
|
||||
return self._connected_as_root
|
||||
|
||||
@connected_as_root.setter
|
||||
def connected_as_root(self, state):
|
||||
self._connected_as_root = state
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def __init__(self, platform=None, keep_password=True, unrooted=False,
|
||||
password=None, timeout=None):
|
||||
super().__init__()
|
||||
self._connected_as_root = None
|
||||
self.logger = logging.getLogger('local_connection')
|
||||
self.keep_password = keep_password
|
||||
self.unrooted = unrooted
|
||||
self.password = password
|
||||
|
||||
def push(self, source, dest, timeout=None, as_root=False): # pylint: disable=unused-argument
|
||||
self.logger.debug('cp {} {}'.format(source, dest))
|
||||
shutil.copy(source, dest)
|
||||
|
||||
def pull(self, source, dest, timeout=None, as_root=False): # pylint: disable=unused-argument
|
||||
self.logger.debug('cp {} {}'.format(source, dest))
|
||||
if ('*' in source or '?' in source) and os.path.isdir(dest):
|
||||
# Pull all files matching a wildcard expression
|
||||
for each_source in iglob(source):
|
||||
shutil.copy(each_source, dest)
|
||||
def _copy_path(self, source, dest):
|
||||
self.logger.debug('copying {} to {}'.format(source, dest))
|
||||
if os.path.isdir(source):
|
||||
# Behave similarly as cp, scp, adb push, etc. by creating a new
|
||||
# folder instead of merging hierarchies
|
||||
if os.path.exists(dest):
|
||||
dest = os.path.join(dest, os.path.basename(os.path.normpath(src)))
|
||||
|
||||
# Use distutils copy_tree since it behaves the same as
|
||||
# shutils.copytree except that it won't fail if some folders
|
||||
# already exist.
|
||||
#
|
||||
# Mirror the behavior of all other targets which only copy the
|
||||
# content without metadata
|
||||
copy_tree(source, dest, preserve_mode=False, preserve_times=False)
|
||||
else:
|
||||
shutil.copy(source, dest)
|
||||
|
||||
def _copy_paths(self, sources, dest):
|
||||
for source in sources:
|
||||
self._copy_path(source, dest)
|
||||
|
||||
def push(self, sources, dest, timeout=None, as_root=False): # pylint: disable=unused-argument
|
||||
self._copy_paths(sources, dest)
|
||||
|
||||
def pull(self, sources, dest, timeout=None, as_root=False): # pylint: disable=unused-argument
|
||||
self._copy_paths(sources, dest)
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def execute(self, command, timeout=None, check_exit_code=True,
|
||||
as_root=False, strip_colors=True):
|
||||
as_root=False, strip_colors=True, will_succeed=False):
|
||||
self.logger.debug(command)
|
||||
if as_root:
|
||||
use_sudo = as_root and not self.connected_as_root
|
||||
if use_sudo:
|
||||
if self.unrooted:
|
||||
raise TargetError('unrooted')
|
||||
raise TargetStableError('unrooted')
|
||||
password = self._get_password()
|
||||
command = 'echo \'{}\' | sudo -S '.format(password) + command
|
||||
command = "echo {} | sudo -k -p ' ' -S -- sh -c {}".format(quote(password), quote(command))
|
||||
ignore = None if check_exit_code else 'all'
|
||||
try:
|
||||
return check_output(command, shell=True, timeout=timeout, ignore=ignore)[0]
|
||||
stdout, stderr = check_output(command, shell=True, timeout=timeout, ignore=ignore)
|
||||
except subprocess.CalledProcessError as e:
|
||||
message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'.format(
|
||||
e.returncode, command, e.output)
|
||||
raise TargetError(message)
|
||||
if will_succeed:
|
||||
raise TargetTransientError(message)
|
||||
else:
|
||||
raise TargetStableError(message)
|
||||
|
||||
# Remove the one-character prompt of sudo -S -p
|
||||
if use_sudo and stderr:
|
||||
stderr = stderr[1:]
|
||||
|
||||
return stdout + stderr
|
||||
|
||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
|
||||
if as_root:
|
||||
if as_root and not self.connected_as_root:
|
||||
if self.unrooted:
|
||||
raise TargetError('unrooted')
|
||||
raise TargetStableError('unrooted')
|
||||
password = self._get_password()
|
||||
command = 'echo \'{}\' | sudo -S '.format(password) + command
|
||||
return subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True)
|
||||
# The sudo prompt will add a space on stderr, but we cannot filter
|
||||
# it out here
|
||||
command = "echo {} | sudo -k -p ' ' -S -- sh -c {}".format(quote(password), quote(command))
|
||||
|
||||
def close(self):
|
||||
# Make sure to get a new PGID so PopenBackgroundCommand() can kill
|
||||
# all sub processes that could be started without troubles.
|
||||
def preexec_fn():
|
||||
os.setpgrp()
|
||||
|
||||
popen = subprocess.Popen(
|
||||
command,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
shell=True,
|
||||
preexec_fn=preexec_fn,
|
||||
)
|
||||
bg_cmd = PopenBackgroundCommand(popen)
|
||||
self._current_bg_cmds.add(bg_cmd)
|
||||
return bg_cmd
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def cancel_running_command(self):
|
||||
pass
|
||||
|
||||
def wait_for_device(self, timeout=30):
|
||||
return
|
||||
|
||||
def reboot_bootloader(self, timeout=30):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_password(self):
|
||||
if self.password:
|
||||
return self.password
|
||||
|
@@ -58,6 +58,7 @@ class MeasurementType(object):
|
||||
raise ValueError(msg.format(self.name, to.name))
|
||||
return self.conversions[to.name](value)
|
||||
|
||||
# pylint: disable=undefined-variable
|
||||
def __cmp__(self, other):
|
||||
if isinstance(other, MeasurementType):
|
||||
other = other.name
|
||||
@@ -96,20 +97,30 @@ _measurement_types = [
|
||||
# covert without being familar with individual instruments.
|
||||
MeasurementType('time', 'seconds', 'time',
|
||||
conversions={
|
||||
'time_us': lambda x: x * 1000000,
|
||||
'time_ms': lambda x: x * 1000,
|
||||
'time_us': lambda x: x * 1e6,
|
||||
'time_ms': lambda x: x * 1e3,
|
||||
'time_ns': lambda x: x * 1e9,
|
||||
}
|
||||
),
|
||||
MeasurementType('time_us', 'microseconds', 'time',
|
||||
conversions={
|
||||
'time': lambda x: x / 1000000,
|
||||
'time_ms': lambda x: x / 1000,
|
||||
'time': lambda x: x / 1e6,
|
||||
'time_ms': lambda x: x / 1e3,
|
||||
'time_ns': lambda x: x * 1e3,
|
||||
}
|
||||
),
|
||||
MeasurementType('time_ms', 'milliseconds', 'time',
|
||||
conversions={
|
||||
'time': lambda x: x / 1000,
|
||||
'time_us': lambda x: x * 1000,
|
||||
'time': lambda x: x / 1e3,
|
||||
'time_us': lambda x: x * 1e3,
|
||||
'time_ns': lambda x: x * 1e6,
|
||||
}
|
||||
),
|
||||
MeasurementType('time_ns', 'nanoseconds', 'time',
|
||||
conversions={
|
||||
'time': lambda x: x / 1e9,
|
||||
'time_ms': lambda x: x / 1e6,
|
||||
'time_us': lambda x: x / 1e3,
|
||||
}
|
||||
),
|
||||
|
||||
@@ -151,6 +162,7 @@ class Measurement(object):
|
||||
self.value = value
|
||||
self.channel = channel
|
||||
|
||||
# pylint: disable=undefined-variable
|
||||
def __cmp__(self, other):
|
||||
if hasattr(other, 'value'):
|
||||
return cmp(self.value, other.value)
|
||||
@@ -204,7 +216,7 @@ class MeasurementsCsv(object):
|
||||
for mt in MEASUREMENT_TYPES:
|
||||
suffix = '_{}'.format(mt)
|
||||
if entry.endswith(suffix):
|
||||
site = entry[:-len(suffix)]
|
||||
site = entry[:-len(suffix)]
|
||||
measure = mt
|
||||
break
|
||||
else:
|
||||
@@ -218,6 +230,7 @@ class MeasurementsCsv(object):
|
||||
chan = InstrumentChannel(site, measure)
|
||||
self.channels.append(chan)
|
||||
|
||||
# pylint: disable=stop-iteration-return
|
||||
def _iter_rows(self):
|
||||
with csvreader(self.path) as reader:
|
||||
next(reader) # headings
|
||||
@@ -308,16 +321,16 @@ class Instrument(object):
|
||||
msg = 'Unexpected channel "{}"; must be in {}'
|
||||
raise ValueError(msg.format(e, self.channels.keys()))
|
||||
elif sites is None and kinds is None:
|
||||
self.active_channels = sorted(self.channels.itervalues(), key=lambda x: x.label)
|
||||
self.active_channels = sorted(self.channels.values(), key=lambda x: x.label)
|
||||
else:
|
||||
if isinstance(sites, basestring):
|
||||
sites = [sites]
|
||||
if isinstance(kinds, basestring):
|
||||
kinds = [kinds]
|
||||
|
||||
wanted = lambda ch : ((kinds is None or ch.kind in kinds) and
|
||||
wanted = lambda ch: ((kinds is None or ch.kind in kinds) and
|
||||
(sites is None or ch.site in sites))
|
||||
self.active_channels = filter(wanted, self.channels.itervalues())
|
||||
self.active_channels = list(filter(wanted, self.channels.values()))
|
||||
|
||||
# instantaneous
|
||||
|
||||
@@ -332,6 +345,7 @@ class Instrument(object):
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def get_data(self, outfile):
|
||||
pass
|
||||
|
||||
|
@@ -19,9 +19,11 @@ import os
|
||||
import sys
|
||||
import time
|
||||
import tempfile
|
||||
import shlex
|
||||
from fcntl import fcntl, F_GETFL, F_SETFL
|
||||
from string import Template
|
||||
from subprocess import Popen, PIPE, STDOUT
|
||||
from pipes import quote
|
||||
|
||||
from devlib import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import HostError
|
||||
@@ -56,12 +58,14 @@ class AcmeCapeInstrument(Instrument):
|
||||
iio_capture=which('iio-capture'),
|
||||
host='baylibre-acme.local',
|
||||
iio_device='iio:device0',
|
||||
buffer_size=256):
|
||||
buffer_size=256,
|
||||
keep_raw=False):
|
||||
super(AcmeCapeInstrument, self).__init__(target)
|
||||
self.iio_capture = iio_capture
|
||||
self.host = host
|
||||
self.iio_device = iio_device
|
||||
self.buffer_size = buffer_size
|
||||
self.keep_raw = keep_raw
|
||||
self.sample_rate_hz = 100
|
||||
if self.iio_capture is None:
|
||||
raise HostError('Missing iio-capture binary')
|
||||
@@ -85,15 +89,17 @@ class AcmeCapeInstrument(Instrument):
|
||||
params = dict(
|
||||
iio_capture=self.iio_capture,
|
||||
host=self.host,
|
||||
buffer_size=self.buffer_size,
|
||||
# This must be a string for quote()
|
||||
buffer_size=str(self.buffer_size),
|
||||
iio_device=self.iio_device,
|
||||
outfile=self.raw_data_file
|
||||
)
|
||||
params = {k: quote(v) for k, v in params.items()}
|
||||
self.command = IIOCAP_CMD_TEMPLATE.substitute(**params)
|
||||
self.logger.debug('ACME cape command: {}'.format(self.command))
|
||||
|
||||
def start(self):
|
||||
self.process = Popen(self.command.split(), stdout=PIPE, stderr=STDOUT)
|
||||
self.process = Popen(shlex.split(self.command), stdout=PIPE, stderr=STDOUT)
|
||||
|
||||
def stop(self):
|
||||
self.process.terminate()
|
||||
@@ -112,7 +118,7 @@ class AcmeCapeInstrument(Instrument):
|
||||
raise HostError(msg.format(output))
|
||||
if self.process.returncode != 15: # iio-capture exits with 15 when killed
|
||||
if sys.version_info[0] == 3:
|
||||
output += self.process.stdout.read().decode(sys.stdout.encoding, 'replace')
|
||||
output += self.process.stdout.read().decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
else:
|
||||
output += self.process.stdout.read()
|
||||
self.logger.info('ACME instrument encountered an error, '
|
||||
@@ -155,3 +161,8 @@ class AcmeCapeInstrument(Instrument):
|
||||
|
||||
def get_raw(self):
|
||||
return [self.raw_data_file]
|
||||
|
||||
def teardown(self):
|
||||
if not self.keep_raw:
|
||||
if os.path.isfile(self.raw_data_file):
|
||||
os.remove(self.raw_data_file)
|
||||
|
@@ -34,8 +34,7 @@ from __future__ import division
|
||||
import os
|
||||
import subprocess
|
||||
import signal
|
||||
import struct
|
||||
import sys
|
||||
from pipes import quote
|
||||
|
||||
import tempfile
|
||||
import shutil
|
||||
@@ -72,7 +71,7 @@ class ArmEnergyProbeInstrument(Instrument):
|
||||
|
||||
MAX_CHANNELS = 12 # 4 Arm Energy Probes
|
||||
|
||||
def __init__(self, target, config_file='./config-aep', ):
|
||||
def __init__(self, target, config_file='./config-aep', keep_raw=False):
|
||||
super(ArmEnergyProbeInstrument, self).__init__(target)
|
||||
self.arm_probe = which('arm-probe')
|
||||
if self.arm_probe is None:
|
||||
@@ -81,6 +80,7 @@ class ArmEnergyProbeInstrument(Instrument):
|
||||
self.attributes = ['power', 'voltage', 'current']
|
||||
self.sample_rate_hz = 10000
|
||||
self.config_file = config_file
|
||||
self.keep_raw = keep_raw
|
||||
|
||||
self.parser = AepParser()
|
||||
#TODO make it generic
|
||||
@@ -99,7 +99,7 @@ class ArmEnergyProbeInstrument(Instrument):
|
||||
self.output_file_figure = os.path.join(self.output_directory, 'summary.txt')
|
||||
self.output_file_error = os.path.join(self.output_directory, 'error.log')
|
||||
self.output_fd_error = open(self.output_file_error, 'w')
|
||||
self.command = 'arm-probe --config {} > {}'.format(self.config_file, self.output_file_raw)
|
||||
self.command = 'arm-probe --config {} > {}'.format(quote(self.config_file), quote(self.output_file_raw))
|
||||
|
||||
def start(self):
|
||||
self.logger.debug(self.command)
|
||||
@@ -109,8 +109,8 @@ class ArmEnergyProbeInstrument(Instrument):
|
||||
shell=True)
|
||||
|
||||
def stop(self):
|
||||
self.logger.debug("kill running arm-probe")
|
||||
os.killpg(self.armprobe.pid, signal.SIGTERM)
|
||||
self.logger.debug("kill running arm-probe")
|
||||
os.killpg(self.armprobe.pid, signal.SIGTERM)
|
||||
|
||||
def get_data(self, outfile): # pylint: disable=R0914
|
||||
self.logger.debug("Parse data and compute consumed energy")
|
||||
@@ -133,7 +133,7 @@ class ArmEnergyProbeInstrument(Instrument):
|
||||
if len(row) < len(active_channels):
|
||||
continue
|
||||
# all data are in micro (seconds/watt)
|
||||
new = [ float(row[i])/1000000 for i in active_indexes ]
|
||||
new = [float(row[i])/1000000 for i in active_indexes]
|
||||
writer.writerow(new)
|
||||
|
||||
self.output_fd_error.close()
|
||||
@@ -143,3 +143,8 @@ class ArmEnergyProbeInstrument(Instrument):
|
||||
|
||||
def get_raw(self):
|
||||
return [self.output_file_raw]
|
||||
|
||||
def teardown(self):
|
||||
if not self.keep_raw:
|
||||
if os.path.isfile(self.output_file_raw):
|
||||
os.remove(self.output_file_raw)
|
||||
|
557
devlib/instrument/baylibre_acme.py
Normal file
557
devlib/instrument/baylibre_acme.py
Normal file
@@ -0,0 +1,557 @@
|
||||
#pylint: disable=attribute-defined-outside-init
|
||||
|
||||
import collections
|
||||
import functools
|
||||
import re
|
||||
import threading
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
try:
|
||||
import iio
|
||||
except ImportError as e:
|
||||
iio_import_failed = True
|
||||
iio_import_error = e
|
||||
else:
|
||||
iio_import_failed = False
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
from devlib import CONTINUOUS, Instrument, HostError, MeasurementsCsv, TargetError
|
||||
from devlib.utils.ssh import SshConnection
|
||||
|
||||
class IIOINA226Channel(object):
|
||||
|
||||
def __init__(self, iio_channel):
|
||||
|
||||
channel_id = iio_channel.id
|
||||
channel_type = iio_channel.attrs['type'].value
|
||||
|
||||
re_measure = r'(?P<measure>\w+)(?P<index>\d*)$'
|
||||
re_dtype = r'le:(?P<sign>\w)(?P<width>\d+)/(?P<size>\d+)>>(?P<align>\d+)'
|
||||
|
||||
match_measure = re.search(re_measure, channel_id)
|
||||
match_dtype = re.search(re_dtype, channel_type)
|
||||
|
||||
if not match_measure:
|
||||
msg = "IIO channel ID '{}' does not match expected RE '{}'"
|
||||
raise ValueError(msg.format(channel_id, re_measure))
|
||||
|
||||
if not match_dtype:
|
||||
msg = "'IIO channel type '{}' does not match expected RE '{}'"
|
||||
raise ValueError(msg.format(channel_type, re_dtype))
|
||||
|
||||
self.measure = match_measure.group('measure')
|
||||
self.iio_dtype = 'int{}'.format(match_dtype.group('width'))
|
||||
self.iio_channel = iio_channel
|
||||
# Data is reported in amps, volts, watts and microseconds:
|
||||
self.iio_scale = (1. if 'scale' not in iio_channel.attrs
|
||||
else float(iio_channel.attrs['scale'].value))
|
||||
self.iio_scale /= 1000
|
||||
# As calls to iio_store_buffer will be blocking and probably coming
|
||||
# from a loop retrieving samples from the ACME, we want to provide
|
||||
# consistency in processing timing between iterations i.e. we want
|
||||
# iio_store_buffer to be o(1) for every call (can't have that with []):
|
||||
self.sample_buffers = collections.deque()
|
||||
|
||||
def iio_store_buffer_samples(self, iio_buffer):
|
||||
# IIO buffers receive and store their data as an interlaced array of
|
||||
# samples from all the IIO channels of the IIO device. The IIO library
|
||||
# provides a reliable function to extract the samples (bytes, actually)
|
||||
# corresponding to a channel from the received buffer; in Python, it is
|
||||
# iio.Channel.read(iio.Buffer).
|
||||
#
|
||||
# NB: As this is called in a potentially tightly timed loop, we do as
|
||||
# little work as possible:
|
||||
self.sample_buffers.append(self.iio_channel.read(iio_buffer))
|
||||
|
||||
def iio_get_samples(self, absolute_timestamps=False):
|
||||
# Up to this point, the data is not interpreted yet i.e. these are
|
||||
# bytearrays. Hence the use of np.dtypes.
|
||||
buffers = [np.frombuffer(b, dtype=self.iio_dtype)
|
||||
for b in self.sample_buffers]
|
||||
|
||||
must_shift = (self.measure == 'timestamp' and not absolute_timestamps)
|
||||
samples = np.concatenate(buffers)
|
||||
return (samples - samples[0] if must_shift else samples) * self.iio_scale
|
||||
|
||||
def iio_forget_samples(self):
|
||||
self.sample_buffers.clear()
|
||||
|
||||
|
||||
# Decorators for the attributes of IIOINA226Instrument:
|
||||
|
||||
def only_set_to(valid_values, dynamic=False):
|
||||
def validating_wrapper(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(self, value):
|
||||
values = (valid_values if not dynamic
|
||||
else getattr(self, valid_values))
|
||||
if value not in values:
|
||||
msg = '{} is invalid; expected values are {}'
|
||||
raise ValueError(msg.format(value, valid_values))
|
||||
return func(self, value)
|
||||
return wrapper
|
||||
return validating_wrapper
|
||||
|
||||
|
||||
def with_input_as(wanted_type):
|
||||
def typecasting_wrapper(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(self, value):
|
||||
return func(self, wanted_type(value))
|
||||
return wrapper
|
||||
return typecasting_wrapper
|
||||
|
||||
|
||||
def _IIODeviceAttr(attr_name, attr_type, writable=False, dyn_vals=None, stat_vals=None):
|
||||
|
||||
def getter(self):
|
||||
return attr_type(self.iio_device.attrs[attr_name].value)
|
||||
|
||||
def setter(self, value):
|
||||
self.iio_device.attrs[attr_name].value = str(attr_type(value))
|
||||
|
||||
if writable and (dyn_vals or stat_vals):
|
||||
vals, dyn = dyn_vals or stat_vals, dyn_vals is not None
|
||||
setter = with_input_as(attr_type)(only_set_to(vals, dyn)(setter))
|
||||
|
||||
return property(getter, setter if writable else None)
|
||||
|
||||
|
||||
def _IIOChannelIntTime(chan_name):
|
||||
|
||||
attr_name, attr_type = 'integration_time', float
|
||||
|
||||
def getter(self):
|
||||
ch = self.iio_device.find_channel(chan_name)
|
||||
return attr_type(ch.attrs[attr_name].value)
|
||||
|
||||
@only_set_to('INTEGRATION_TIMES_AVAILABLE', dynamic=True)
|
||||
@with_input_as(attr_type)
|
||||
def setter(self, value):
|
||||
ch = self.iio_device.find_channel(chan_name)
|
||||
ch.attrs[attr_name].value = str(value)
|
||||
|
||||
return property(getter, setter)
|
||||
|
||||
|
||||
def _setify(x):
|
||||
return {x} if isinstance(x, basestring) else set(x) #Py3: basestring->str
|
||||
|
||||
|
||||
class IIOINA226Instrument(object):
|
||||
|
||||
IIO_DEVICE_NAME = 'ina226'
|
||||
|
||||
def __init__(self, iio_device):
|
||||
|
||||
if iio_device.name != self.IIO_DEVICE_NAME:
|
||||
msg = 'IIO device is {}; expected {}'
|
||||
raise TargetError(msg.format(iio_device.name, self.IIO_DEVICE_NAME))
|
||||
|
||||
self.iio_device = iio_device
|
||||
self.absolute_timestamps = False
|
||||
self.high_resolution = True
|
||||
self.buffer_samples_count = None
|
||||
self.buffer_is_circular = False
|
||||
|
||||
self.collector = None
|
||||
self.work_done = threading.Event()
|
||||
self.collector_exception = None
|
||||
|
||||
self.data = collections.OrderedDict()
|
||||
|
||||
channels = {
|
||||
'timestamp': 'timestamp',
|
||||
'shunt' : 'voltage0',
|
||||
'voltage' : 'voltage1', # bus
|
||||
'power' : 'power2',
|
||||
'current' : 'current3',
|
||||
}
|
||||
self.computable_channels = {'current' : {'shunt'},
|
||||
'power' : {'shunt', 'voltage'}}
|
||||
self.uncomputable_channels = set(channels) - set(self.computable_channels)
|
||||
self.channels = {k: IIOINA226Channel(self.iio_device.find_channel(v))
|
||||
for k, v in channels.items()}
|
||||
# We distinguish between "output" channels (as seen by the user of this
|
||||
# class) and "hardware" channels (as requested from the INA226).
|
||||
# This is necessary because of the 'high_resolution' feature which
|
||||
# requires outputting computed channels:
|
||||
self.active_channels = set() # "hardware" channels
|
||||
self.wanted_channels = set() # "output" channels
|
||||
|
||||
|
||||
# Properties
|
||||
|
||||
OVERSAMPLING_RATIOS_AVAILABLE = (1, 4, 16, 64, 128, 256, 512, 1024)
|
||||
INTEGRATION_TIMES_AVAILABLE = _IIODeviceAttr('integration_time_available',
|
||||
lambda x: tuple(map(float, x.split())))
|
||||
|
||||
sample_rate_hz = _IIODeviceAttr('in_sampling_frequency', int)
|
||||
shunt_resistor = _IIODeviceAttr('in_shunt_resistor' , int, True)
|
||||
oversampling_ratio = _IIODeviceAttr('in_oversampling_ratio', int, True,
|
||||
dyn_vals='OVERSAMPLING_RATIOS_AVAILABLE')
|
||||
|
||||
integration_time_shunt = _IIOChannelIntTime('voltage0')
|
||||
integration_time_bus = _IIOChannelIntTime('voltage1')
|
||||
|
||||
def list_channels(self):
|
||||
return self.channels.keys()
|
||||
|
||||
def activate(self, channels=None):
|
||||
all_channels = set(self.channels)
|
||||
requested_channels = (all_channels if channels is None
|
||||
else _setify(channels))
|
||||
|
||||
unknown = ', '.join(requested_channels - all_channels)
|
||||
if unknown:
|
||||
raise ValueError('Unknown channel(s): {}'.format(unknown))
|
||||
|
||||
self.wanted_channels |= requested_channels
|
||||
|
||||
def deactivate(self, channels=None):
|
||||
unwanted_channels = (self.wanted_channels if channels is None
|
||||
else _setify(channels))
|
||||
|
||||
unknown = ', '.join(unwanted_channels - set(self.channels))
|
||||
if unknown:
|
||||
raise ValueError('Unknown channel(s): {}'.format(unknown))
|
||||
|
||||
unactive = ', '.join(unwanted_channels - self.wanted_channels)
|
||||
if unactive:
|
||||
raise ValueError('Already unactive channel(s): {}'.format(unactive))
|
||||
|
||||
self.wanted_channels -= unwanted_channels
|
||||
|
||||
def sample_collector(self):
|
||||
class Collector(threading.Thread):
|
||||
def run(collector_self):
|
||||
for name, ch in self.channels.items():
|
||||
ch.iio_channel.enabled = (name in self.active_channels)
|
||||
|
||||
samples_count = self.buffer_samples_count or self.sample_rate_hz
|
||||
|
||||
iio_buffer = iio.Buffer(self.iio_device, samples_count,
|
||||
self.buffer_is_circular)
|
||||
# NB: This buffer creates a communication pipe to the
|
||||
# BeagleBone (or is it between the BBB and the ACME?)
|
||||
# that locks down any configuration. The IIO drivers
|
||||
# do not limit access when a buffer exists so that
|
||||
# configuring the INA226 (i.e. accessing iio.Device.attrs
|
||||
# or iio.Channel.attrs from iio.Device.channels i.e.
|
||||
# assigning to or reading from any property of this class
|
||||
# or calling its setup or reset methods) will screw up the
|
||||
# whole system and will require rebooting the BBB-ACME board!
|
||||
|
||||
self.collector_exception = None
|
||||
try:
|
||||
refilled_once = False
|
||||
while not (refilled_once and self.work_done.is_set()):
|
||||
refilled_once = True
|
||||
iio_buffer.refill()
|
||||
for name in self.active_channels:
|
||||
self.channels[name].iio_store_buffer_samples(iio_buffer)
|
||||
except Exception as e:
|
||||
self.collector_exception = e
|
||||
finally:
|
||||
del iio_buffer
|
||||
for ch in self.channels.values():
|
||||
ch.enabled = False
|
||||
|
||||
return Collector()
|
||||
|
||||
def start_capturing(self):
|
||||
if not self.wanted_channels:
|
||||
raise TargetError('No active channel: aborting.')
|
||||
|
||||
self.active_channels = self.wanted_channels.copy()
|
||||
if self.high_resolution:
|
||||
self.active_channels &= self.uncomputable_channels
|
||||
for channel, dependencies in self.computable_channels.items():
|
||||
if channel in self.wanted_channels:
|
||||
self.active_channels |= dependencies
|
||||
|
||||
self.work_done.clear()
|
||||
self.collector = self.sample_collector()
|
||||
self.collector.daemon = True
|
||||
self.collector.start()
|
||||
|
||||
def stop_capturing(self):
|
||||
self.work_done.set()
|
||||
self.collector.join()
|
||||
|
||||
if self.collector_exception:
|
||||
raise self.collector_exception
|
||||
|
||||
self.data.clear()
|
||||
for channel in self.active_channels:
|
||||
ch = self.channels[channel]
|
||||
self.data[channel] = ch.iio_get_samples(self.absolute_timestamps)
|
||||
ch.iio_forget_samples()
|
||||
|
||||
if self.high_resolution:
|
||||
res_ohm = 1e-6 * self.shunt_resistor
|
||||
current = self.data['shunt'] / res_ohm
|
||||
if 'current' in self.wanted_channels:
|
||||
self.data['current'] = current
|
||||
if 'power' in self.wanted_channels:
|
||||
self.data['power'] = current * self.data['voltage']
|
||||
for channel in set(self.data) - self.wanted_channels:
|
||||
del self.data[channel]
|
||||
|
||||
self.active_channels.clear()
|
||||
|
||||
def get_data(self):
|
||||
return self.data
|
||||
|
||||
|
||||
class BaylibreAcmeInstrument(Instrument):
|
||||
|
||||
mode = CONTINUOUS
|
||||
|
||||
MINIMAL_ACME_SD_IMAGE_VERSION = (2, 1, 3)
|
||||
MINIMAL_ACME_IIO_DRIVERS_VERSION = (0, 6)
|
||||
MINIMAL_HOST_IIO_DRIVERS_VERSION = (0, 15)
|
||||
|
||||
def __init__(self, target=None, iio_context=None,
|
||||
use_base_iio_context=False, probe_names=None):
|
||||
|
||||
if iio_import_failed:
|
||||
raise HostError('Could not import "iio": {}'.format(iio_import_error))
|
||||
|
||||
super(BaylibreAcmeInstrument, self).__init__(target)
|
||||
|
||||
if isinstance(probe_names, basestring):
|
||||
probe_names = [probe_names]
|
||||
|
||||
self.iio_context = (iio_context if not use_base_iio_context
|
||||
else iio.Context(iio_context))
|
||||
|
||||
self.check_version()
|
||||
|
||||
if probe_names is not None:
|
||||
if len(probe_names) != len(set(probe_names)):
|
||||
msg = 'Probe names should be unique: {}'
|
||||
raise ValueError(msg.format(probe_names))
|
||||
|
||||
if len(probe_names) != len(self.iio_context.devices):
|
||||
msg = ('There should be as many probe_names ({}) '
|
||||
'as detected probes ({}).')
|
||||
raise ValueError(msg.format(len(probe_names),
|
||||
len(self.iio_context.devices)))
|
||||
|
||||
probes = [IIOINA226Instrument(d) for d in self.iio_context.devices]
|
||||
|
||||
self.probes = (dict(zip(probe_names, probes)) if probe_names
|
||||
else {p.iio_device.id : p for p in probes})
|
||||
self.active_probes = set()
|
||||
|
||||
for probe in self.probes:
|
||||
for measure in ['voltage', 'power', 'current']:
|
||||
self.add_channel(site=probe, measure=measure)
|
||||
self.add_channel('timestamp', 'time_us')
|
||||
|
||||
self.data = pd.DataFrame()
|
||||
|
||||
def check_version(self):
|
||||
msg = ('The IIO drivers running on {} ({}) are out-of-date; '
|
||||
'devlib requires {} or later.')
|
||||
|
||||
if iio.version[:2] < self.MINIMAL_HOST_IIO_DRIVERS_VERSION:
|
||||
ver_str = '.'.join(map(str, iio.version[:2]))
|
||||
min_str = '.'.join(map(str, self.MINIMAL_HOST_IIO_DRIVERS_VERSION))
|
||||
raise HostError(msg.format('this host', ver_str, min_str))
|
||||
|
||||
if self.version[:2] < self.MINIMAL_ACME_IIO_DRIVERS_VERSION:
|
||||
ver_str = '.'.join(map(str, self.version[:2]))
|
||||
min_str = '.'.join(map(str, self.MINIMAL_ACME_IIO_DRIVERS_VERSION))
|
||||
raise TargetError(msg.format('the BBB', ver_str, min_str))
|
||||
|
||||
# properties
|
||||
|
||||
def probes_unique_property(self, property_name):
|
||||
probes = self.active_probes or self.probes
|
||||
try:
|
||||
# This will fail if there is not exactly one single value:
|
||||
(value,) = {getattr(self.probes[p], property_name) for p in probes}
|
||||
except ValueError:
|
||||
msg = 'Probes have different values for {}.'
|
||||
raise ValueError(msg.format(property_name) if probes else 'No probe')
|
||||
return value
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
return self.iio_context.version
|
||||
|
||||
@property
|
||||
def OVERSAMPLING_RATIOS_AVAILABLE(self):
|
||||
return self.probes_unique_property('OVERSAMPLING_RATIOS_AVAILABLE')
|
||||
|
||||
@property
|
||||
def INTEGRATION_TIMES_AVAILABLE(self):
|
||||
return self.probes_unique_property('INTEGRATION_TIMES_AVAILABLE')
|
||||
|
||||
@property
|
||||
def sample_rate_hz(self):
|
||||
return self.probes_unique_property('sample_rate_hz')
|
||||
|
||||
@sample_rate_hz.setter
|
||||
# This setter is required for compliance with the inherited methods
|
||||
def sample_rate_hz(self, value):
|
||||
if value is not None:
|
||||
raise AttributeError("can't set attribute")
|
||||
|
||||
# initialization and teardown
|
||||
|
||||
def setup(self, shunt_resistor,
|
||||
integration_time_bus,
|
||||
integration_time_shunt,
|
||||
oversampling_ratio,
|
||||
buffer_samples_count=None,
|
||||
buffer_is_circular=False,
|
||||
absolute_timestamps=False,
|
||||
high_resolution=True):
|
||||
|
||||
def pseudo_list(v, i):
|
||||
try:
|
||||
return v[i]
|
||||
except TypeError:
|
||||
return v
|
||||
|
||||
for i, p in enumerate(self.probes.values()):
|
||||
for attr, val in locals().items():
|
||||
if attr != 'self':
|
||||
setattr(p, attr, pseudo_list(val, i))
|
||||
|
||||
self.absolute_timestamps = all(pseudo_list(absolute_timestamps, i)
|
||||
for i in range(len(self.probes)))
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
|
||||
# populate self.active_channels:
|
||||
super(BaylibreAcmeInstrument, self).reset(sites, kinds, channels)
|
||||
|
||||
for ch in self.active_channels:
|
||||
if ch.site != 'timestamp':
|
||||
self.probes[ch.site].activate(['timestamp', ch.kind])
|
||||
self.active_probes.add(ch.site)
|
||||
|
||||
def teardown(self):
|
||||
del self.active_channels[:]
|
||||
self.active_probes.clear()
|
||||
|
||||
def start(self):
|
||||
for p in self.active_probes:
|
||||
self.probes[p].start_capturing()
|
||||
|
||||
def stop(self):
|
||||
for p in self.active_probes:
|
||||
self.probes[p].stop_capturing()
|
||||
|
||||
max_rate_probe = max(self.active_probes,
|
||||
key=lambda p: self.probes[p].sample_rate_hz)
|
||||
|
||||
probes_dataframes = {
|
||||
probe: pd.DataFrame.from_dict(self.probes[probe].get_data())
|
||||
.set_index('timestamp')
|
||||
for probe in self.active_probes
|
||||
}
|
||||
|
||||
for df in probes_dataframes.values():
|
||||
df.set_index(pd.to_datetime(df.index, unit='us'), inplace=True)
|
||||
|
||||
final_index = probes_dataframes[max_rate_probe].index
|
||||
|
||||
df = pd.concat(probes_dataframes, axis=1).sort_index()
|
||||
df.columns = ['_'.join(c).strip() for c in df.columns.values]
|
||||
|
||||
self.data = df.interpolate('time').reindex(final_index)
|
||||
|
||||
if not self.absolute_timestamps:
|
||||
epoch_index = self.data.index.astype(np.int64) // 1000
|
||||
self.data.set_index(epoch_index, inplace=True)
|
||||
# self.data.index is in [us]
|
||||
# columns are in volts, amps and watts
|
||||
|
||||
def get_data(self, outfile=None, **to_csv_kwargs):
|
||||
if outfile is None:
|
||||
return self.data
|
||||
|
||||
self.data.to_csv(outfile, **to_csv_kwargs)
|
||||
return MeasurementsCsv(outfile, self.active_channels)
|
||||
|
||||
class BaylibreAcmeLocalInstrument(BaylibreAcmeInstrument):
|
||||
|
||||
def __init__(self, target=None, probe_names=None):
|
||||
|
||||
if iio_import_failed:
|
||||
raise HostError('Could not import "iio": {}'.format(iio_import_error))
|
||||
|
||||
super(BaylibreAcmeLocalInstrument, self).__init__(
|
||||
target=target,
|
||||
iio_context=iio.LocalContext(),
|
||||
probe_names=probe_names
|
||||
)
|
||||
|
||||
class BaylibreAcmeXMLInstrument(BaylibreAcmeInstrument):
|
||||
|
||||
def __init__(self, target=None, xmlfile=None, probe_names=None):
|
||||
|
||||
if iio_import_failed:
|
||||
raise HostError('Could not import "iio": {}'.format(iio_import_error))
|
||||
|
||||
super(BaylibreAcmeXMLInstrument, self).__init__(
|
||||
target=target,
|
||||
iio_context=iio.XMLContext(xmlfile),
|
||||
probe_names=probe_names
|
||||
)
|
||||
|
||||
class BaylibreAcmeNetworkInstrument(BaylibreAcmeInstrument):
|
||||
|
||||
def __init__(self, target=None, hostname=None, probe_names=None):
|
||||
|
||||
if iio_import_failed:
|
||||
raise HostError('Could not import "iio": {}'.format(iio_import_error))
|
||||
|
||||
super(BaylibreAcmeNetworkInstrument, self).__init__(
|
||||
target=target,
|
||||
iio_context=iio.NetworkContext(hostname),
|
||||
probe_names=probe_names
|
||||
)
|
||||
|
||||
try:
|
||||
self.ssh_connection = SshConnection(hostname, username='root', password=None)
|
||||
except TargetError as e:
|
||||
msg = 'No SSH connexion could be established to {}: {}'
|
||||
self.logger.debug(msg.format(hostname, e))
|
||||
self.ssh_connection = None
|
||||
|
||||
def check_version(self):
|
||||
super(BaylibreAcmeNetworkInstrument, self).check_version()
|
||||
|
||||
cmd = r"""sed -nr 's/^VERSION_ID="(.+)"$/\1/p' < /etc/os-release"""
|
||||
try:
|
||||
ver_str = self._ssh(cmd).rstrip()
|
||||
ver = tuple(map(int, ver_str.split('.')))
|
||||
except Exception as e:
|
||||
self.logger.debug('Unable to verify ACME SD image version through SSH: {}'.format(e))
|
||||
else:
|
||||
if ver < self.MINIMAL_ACME_SD_IMAGE_VERSION:
|
||||
min_str = '.'.join(map(str, self.MINIMAL_ACME_SD_IMAGE_VERSION))
|
||||
msg = ('The ACME SD image for the BBB (ver. {}) is out-of-date; '
|
||||
'devlib requires {} or later.')
|
||||
raise TargetError(msg.format(ver_str, min_str))
|
||||
|
||||
def _ssh(self, cmd=''):
|
||||
"""Connections are assumed to be rare."""
|
||||
if self.ssh_connection is None:
|
||||
raise TargetError('No SSH connection; see log.')
|
||||
return self.ssh_connection.execute(cmd)
|
||||
|
||||
def _reboot(self):
|
||||
"""Always delete the object after calling its _reboot method"""
|
||||
try:
|
||||
self._ssh('reboot')
|
||||
except:
|
||||
pass
|
@@ -14,20 +14,23 @@
|
||||
#
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
from itertools import chain
|
||||
import time
|
||||
from itertools import chain, zip_longest
|
||||
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.instrument import Instrument, MeasurementsCsv, CONTINUOUS
|
||||
from devlib.exception import HostError
|
||||
from devlib.utils.csvutil import csvwriter, create_reader
|
||||
from devlib.utils.misc import unique
|
||||
|
||||
try:
|
||||
from daqpower.client import execute_command, Status
|
||||
from daqpower.config import DeviceConfiguration, ServerConfiguration
|
||||
from daqpower.client import DaqClient
|
||||
from daqpower.config import DeviceConfiguration
|
||||
except ImportError as e:
|
||||
execute_command, Status = None, None
|
||||
DeviceConfiguration, ServerConfiguration, ConfigurationError = None, None, None
|
||||
DaqClient = None
|
||||
DeviceConfiguration = None
|
||||
import_error_mesg = e.args[0] if e.args else str(e)
|
||||
|
||||
|
||||
@@ -44,25 +47,30 @@ class DaqInstrument(Instrument):
|
||||
dv_range=0.2,
|
||||
sample_rate_hz=10000,
|
||||
channel_map=(0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23),
|
||||
keep_raw=False,
|
||||
time_as_clock_boottime=True
|
||||
):
|
||||
# pylint: disable=no-member
|
||||
super(DaqInstrument, self).__init__(target)
|
||||
self.keep_raw = keep_raw
|
||||
self._need_reset = True
|
||||
self._raw_files = []
|
||||
if execute_command is None:
|
||||
self.tempdir = None
|
||||
self.target_boottime_clock_at_start = 0.0
|
||||
if DaqClient is None:
|
||||
raise HostError('Could not import "daqpower": {}'.format(import_error_mesg))
|
||||
if labels is None:
|
||||
labels = ['PORT_{}'.format(i) for i in range(len(resistor_values))]
|
||||
if len(labels) != len(resistor_values):
|
||||
raise ValueError('"labels" and "resistor_values" must be of the same length')
|
||||
self.server_config = ServerConfiguration(host=host,
|
||||
port=port)
|
||||
result = self.execute('list_devices')
|
||||
if result.status == Status.OK:
|
||||
if device_id not in result.data:
|
||||
raise ValueError('Device "{}" is not found on the DAQ server.'.format(device_id))
|
||||
elif result.status != Status.OKISH:
|
||||
raise HostError('Problem querying DAQ server: {}'.format(result.message))
|
||||
self.daq_client = DaqClient(host, port)
|
||||
try:
|
||||
devices = self.daq_client.list_devices()
|
||||
if device_id not in devices:
|
||||
msg = 'Device "{}" is not found on the DAQ server. Available devices are: "{}"'
|
||||
raise ValueError(msg.format(device_id, ', '.join(devices)))
|
||||
except Exception as e:
|
||||
raise HostError('Problem querying DAQ server: {}'.format(e))
|
||||
|
||||
self.device_config = DeviceConfiguration(device_id=device_id,
|
||||
v_range=v_range,
|
||||
@@ -72,36 +80,63 @@ class DaqInstrument(Instrument):
|
||||
channel_map=channel_map,
|
||||
labels=labels)
|
||||
self.sample_rate_hz = sample_rate_hz
|
||||
self.time_as_clock_boottime = time_as_clock_boottime
|
||||
|
||||
self.add_channel('Time', 'time')
|
||||
for label in labels:
|
||||
for kind in ['power', 'voltage']:
|
||||
self.add_channel(label, kind)
|
||||
|
||||
if time_as_clock_boottime:
|
||||
host_path = os.path.join(PACKAGE_BIN_DIRECTORY, self.target.abi,
|
||||
'get_clock_boottime')
|
||||
self.clock_boottime_cmd = self.target.install_if_needed(host_path,
|
||||
search_system_binaries=False)
|
||||
|
||||
def calculate_boottime_offset(self):
|
||||
time_before = time.time()
|
||||
out = self.target.execute(self.clock_boottime_cmd)
|
||||
time_after = time.time()
|
||||
|
||||
remote_clock_boottime = float(out)
|
||||
propagation_delay = (time_after - time_before) / 2
|
||||
boottime_at_end = remote_clock_boottime + propagation_delay
|
||||
|
||||
return time_after - boottime_at_end
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(DaqInstrument, self).reset(sites, kinds, channels)
|
||||
self.execute('close')
|
||||
result = self.execute('configure', config=self.device_config)
|
||||
if not result.status == Status.OK: # pylint: disable=no-member
|
||||
raise HostError(result.message)
|
||||
self.daq_client.close()
|
||||
self.daq_client.configure(self.device_config)
|
||||
self._need_reset = False
|
||||
self._raw_files = []
|
||||
|
||||
def start(self):
|
||||
if self._need_reset:
|
||||
self.reset()
|
||||
self.execute('start')
|
||||
# Preserve channel order
|
||||
self.reset(channels=self.channels.keys())
|
||||
|
||||
if self.time_as_clock_boottime:
|
||||
target_boottime_offset = self.calculate_boottime_offset()
|
||||
time_start = time.time()
|
||||
|
||||
self.daq_client.start()
|
||||
|
||||
if self.time_as_clock_boottime:
|
||||
time_end = time.time()
|
||||
self.target_boottime_clock_at_start = (time_start + time_end) / 2 - target_boottime_offset
|
||||
|
||||
def stop(self):
|
||||
self.execute('stop')
|
||||
self.daq_client.stop()
|
||||
self._need_reset = True
|
||||
|
||||
def get_data(self, outfile): # pylint: disable=R0914
|
||||
tempdir = tempfile.mkdtemp(prefix='daq-raw-')
|
||||
self.execute('get_data', output_directory=tempdir)
|
||||
self.tempdir = tempfile.mkdtemp(prefix='daq-raw-')
|
||||
self.daq_client.get_data(self.tempdir)
|
||||
raw_file_map = {}
|
||||
for entry in os.listdir(tempdir):
|
||||
for entry in os.listdir(self.tempdir):
|
||||
site = os.path.splitext(entry)[0]
|
||||
path = os.path.join(tempdir, entry)
|
||||
path = os.path.join(self.tempdir, entry)
|
||||
raw_file_map[site] = path
|
||||
self._raw_files.append(path)
|
||||
|
||||
@@ -116,32 +151,32 @@ class DaqInstrument(Instrument):
|
||||
site_readers[site] = reader
|
||||
file_handles.append(fh)
|
||||
except KeyError:
|
||||
message = 'Could not get DAQ trace for {}; Obtained traces are in {}'
|
||||
raise HostError(message.format(site, tempdir))
|
||||
if not site.startswith("Time"):
|
||||
message = 'Could not get DAQ trace for {}; Obtained traces are in {}'
|
||||
raise HostError(message.format(site, self.tempdir))
|
||||
|
||||
# The first row is the headers
|
||||
channel_order = []
|
||||
channel_order = ['Time_time']
|
||||
for site, reader in site_readers.items():
|
||||
channel_order.extend(['{}_{}'.format(site, kind)
|
||||
for kind in next(reader)])
|
||||
|
||||
def _read_next_rows():
|
||||
parts = []
|
||||
for reader in site_readers.values():
|
||||
try:
|
||||
parts.extend(next(reader))
|
||||
except StopIteration:
|
||||
parts.extend([None, None])
|
||||
return list(chain(parts))
|
||||
def _read_rows():
|
||||
row_iter = zip_longest(*site_readers.values(), fillvalue=(None, None))
|
||||
for raw_row in row_iter:
|
||||
raw_row = list(chain.from_iterable(raw_row))
|
||||
raw_row.insert(0, _read_rows.row_time_s)
|
||||
yield raw_row
|
||||
_read_rows.row_time_s += 1.0 / self.sample_rate_hz
|
||||
|
||||
_read_rows.row_time_s = self.target_boottime_clock_at_start
|
||||
|
||||
with csvwriter(outfile) as writer:
|
||||
field_names = [c.label for c in self.active_channels]
|
||||
writer.writerow(field_names)
|
||||
raw_row = _read_next_rows()
|
||||
while any(raw_row):
|
||||
for raw_row in _read_rows():
|
||||
row = [raw_row[channel_order.index(f)] for f in field_names]
|
||||
writer.writerow(row)
|
||||
raw_row = _read_next_rows()
|
||||
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
finally:
|
||||
@@ -152,8 +187,7 @@ class DaqInstrument(Instrument):
|
||||
return self._raw_files
|
||||
|
||||
def teardown(self):
|
||||
self.execute('close')
|
||||
|
||||
def execute(self, command, **kwargs):
|
||||
return execute_command(self.server_config, command, **kwargs)
|
||||
|
||||
self.daq_client.close()
|
||||
if not self.keep_raw:
|
||||
if self.tempdir and os.path.isdir(self.tempdir):
|
||||
shutil.rmtree(self.tempdir)
|
||||
|
@@ -19,6 +19,7 @@ import tempfile
|
||||
import struct
|
||||
import subprocess
|
||||
import sys
|
||||
from pipes import quote
|
||||
|
||||
from devlib.instrument import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import HostError
|
||||
@@ -33,9 +34,11 @@ class EnergyProbeInstrument(Instrument):
|
||||
def __init__(self, target, resistor_values,
|
||||
labels=None,
|
||||
device_entry='/dev/ttyACM0',
|
||||
keep_raw=False
|
||||
):
|
||||
super(EnergyProbeInstrument, self).__init__(target)
|
||||
self.resistor_values = resistor_values
|
||||
self.keep_raw = keep_raw
|
||||
if labels is not None:
|
||||
self.labels = labels
|
||||
else:
|
||||
@@ -65,7 +68,10 @@ class EnergyProbeInstrument(Instrument):
|
||||
parts = ['-r {}:{} '.format(i, int(1000 * rval))
|
||||
for i, rval in enumerate(self.resistor_values)]
|
||||
rstring = ''.join(parts)
|
||||
self.command = '{} -d {} -l {} {}'.format(self.caiman, self.device_entry, rstring, self.raw_output_directory)
|
||||
self.command = '{} -d {} -l {} {}'.format(
|
||||
quote(self.caiman), quote(self.device_entry),
|
||||
rstring, quote(self.raw_output_directory)
|
||||
)
|
||||
self.raw_data_file = None
|
||||
|
||||
def start(self):
|
||||
@@ -82,8 +88,8 @@ class EnergyProbeInstrument(Instrument):
|
||||
if self.process.returncode is not None:
|
||||
stdout, stderr = self.process.communicate()
|
||||
if sys.version_info[0] == 3:
|
||||
stdout = stdout.decode(sys.stdout.encoding, 'replace')
|
||||
stderr = stderr.decode(sys.stdout.encoding, 'replace')
|
||||
stdout = stdout.decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
stderr = stderr.decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
raise HostError(
|
||||
'Energy Probe: Caiman exited unexpectedly with exit code {}.\n'
|
||||
'stdout:\n{}\nstderr:\n{}'.format(self.process.returncode,
|
||||
@@ -114,7 +120,7 @@ class EnergyProbeInstrument(Instrument):
|
||||
writer.writerow(row)
|
||||
except struct.error:
|
||||
if not_a_full_row_seen:
|
||||
self.logger.warn('possibly missaligned caiman raw data, row contained {} bytes'.format(len(data)))
|
||||
self.logger.warning('possibly missaligned caiman raw data, row contained {} bytes'.format(len(data)))
|
||||
continue
|
||||
else:
|
||||
not_a_full_row_seen = True
|
||||
@@ -122,3 +128,8 @@ class EnergyProbeInstrument(Instrument):
|
||||
|
||||
def get_raw(self):
|
||||
return [self.raw_data_file]
|
||||
|
||||
def teardown(self):
|
||||
if self.keep_raw:
|
||||
if os.path.isfile(self.raw_data_file):
|
||||
os.remove(self.raw_data_file)
|
||||
|
@@ -14,6 +14,8 @@
|
||||
#
|
||||
|
||||
from __future__ import division
|
||||
import os
|
||||
|
||||
from devlib.instrument import (Instrument, CONTINUOUS,
|
||||
MeasurementsCsv, MeasurementType)
|
||||
from devlib.utils.rendering import (GfxinfoFrameCollector,
|
||||
@@ -41,6 +43,7 @@ class FramesInstrument(Instrument):
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(FramesInstrument, self).reset(sites, kinds, channels)
|
||||
# pylint: disable=not-callable
|
||||
self.collector = self.collector_cls(self.target, self.period,
|
||||
self.collector_target, self.header)
|
||||
self._need_reset = False
|
||||
@@ -69,6 +72,11 @@ class FramesInstrument(Instrument):
|
||||
def _init_channels(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def teardown(self):
|
||||
if not self.keep_raw:
|
||||
if os.path.isfile(self._raw_file):
|
||||
os.remove(self._raw_file)
|
||||
|
||||
|
||||
class GfxInfoFramesInstrument(FramesInstrument):
|
||||
|
||||
@@ -81,7 +89,7 @@ class GfxInfoFramesInstrument(FramesInstrument):
|
||||
if entry == 'Flags':
|
||||
self.add_channel('Flags', MeasurementType('flags', 'flags'))
|
||||
else:
|
||||
self.add_channel(entry, 'time_us')
|
||||
self.add_channel(entry, 'time_ns')
|
||||
self.header = [chan.label for chan in self.channels.values()]
|
||||
|
||||
|
||||
|
@@ -13,11 +13,10 @@
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import division
|
||||
import re
|
||||
|
||||
from devlib.platform.gem5 import Gem5SimulationPlatform
|
||||
from devlib.instrument import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.exception import TargetStableError
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
|
||||
|
||||
@@ -37,9 +36,9 @@ class Gem5PowerInstrument(Instrument):
|
||||
system.cluster0.cores0.power_model.static_power
|
||||
'''
|
||||
if not isinstance(target.platform, Gem5SimulationPlatform):
|
||||
raise TargetError('Gem5PowerInstrument requires a gem5 platform')
|
||||
raise TargetStableError('Gem5PowerInstrument requires a gem5 platform')
|
||||
if not target.has('gem5stats'):
|
||||
raise TargetError('Gem5StatsModule is not loaded')
|
||||
raise TargetStableError('Gem5StatsModule is not loaded')
|
||||
super(Gem5PowerInstrument, self).__init__(target)
|
||||
|
||||
# power_sites is assumed to be a list later
|
||||
@@ -69,7 +68,7 @@ class Gem5PowerInstrument(Instrument):
|
||||
with csvwriter(outfile) as writer:
|
||||
writer.writerow([c.label for c in self.active_channels]) # headers
|
||||
sites_to_match = [self.site_mapping.get(s, s) for s in active_sites]
|
||||
for rec, rois in self.target.gem5stats.match_iter(sites_to_match,
|
||||
for rec, _ in self.target.gem5stats.match_iter(sites_to_match,
|
||||
[self.roi_label], self._base_stats_dump):
|
||||
writer.writerow([rec[s] for s in sites_to_match])
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
@@ -77,4 +76,3 @@ class Gem5PowerInstrument(Instrument):
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(Gem5PowerInstrument, self).reset(sites, kinds, channels)
|
||||
self._base_stats_dump = self.target.gem5stats.next_dump_no()
|
||||
|
||||
|
@@ -16,7 +16,7 @@ from __future__ import division
|
||||
import re
|
||||
|
||||
from devlib.instrument import Instrument, Measurement, INSTANTANEOUS
|
||||
from devlib.exception import TargetError
|
||||
from devlib.exception import TargetStableError
|
||||
|
||||
|
||||
class HwmonInstrument(Instrument):
|
||||
@@ -35,7 +35,7 @@ class HwmonInstrument(Instrument):
|
||||
|
||||
def __init__(self, target):
|
||||
if not hasattr(target, 'hwmon'):
|
||||
raise TargetError('Target does not support HWMON')
|
||||
raise TargetStableError('Target does not support HWMON')
|
||||
super(HwmonInstrument, self).__init__(target)
|
||||
|
||||
self.logger.debug('Discovering available HWMON sensors...')
|
||||
|
@@ -21,12 +21,11 @@ from tempfile import NamedTemporaryFile
|
||||
|
||||
from devlib.instrument import Instrument, CONTINUOUS, MeasurementsCsv
|
||||
from devlib.exception import HostError
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
from devlib.utils.misc import which
|
||||
|
||||
|
||||
INSTALL_INSTRUCTIONS="""
|
||||
INSTALL_INSTRUCTIONS = """
|
||||
MonsoonInstrument requires the monsoon.py tool, available from AOSP:
|
||||
|
||||
https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py
|
||||
@@ -68,6 +67,7 @@ class MonsoonInstrument(Instrument):
|
||||
|
||||
self.process = None
|
||||
self.output = None
|
||||
self.buffer_file = None
|
||||
|
||||
self.sample_rate_hz = 500
|
||||
self.add_channel('output', 'power')
|
||||
@@ -101,8 +101,8 @@ class MonsoonInstrument(Instrument):
|
||||
if process.returncode is not None:
|
||||
stdout, stderr = process.communicate()
|
||||
if sys.version_info[0] == 3:
|
||||
stdout = stdout.encode(sys.stdout.encoding)
|
||||
stderr = stderr.encode(sys.stdout.encoding)
|
||||
stdout = stdout.encode(sys.stdout.encoding or 'utf-8')
|
||||
stderr = stderr.encode(sys.stdout.encoding or 'utf-8')
|
||||
raise HostError(
|
||||
'Monsoon script exited unexpectedly with exit code {}.\n'
|
||||
'stdout:\n{}\nstderr:\n{}'.format(process.returncode,
|
||||
@@ -110,7 +110,7 @@ class MonsoonInstrument(Instrument):
|
||||
|
||||
process.send_signal(signal.SIGINT)
|
||||
|
||||
stderr = process.stderr.read()
|
||||
stderr = process.stderr.read()
|
||||
|
||||
self.buffer_file.close()
|
||||
with open(self.buffer_file.name) as f:
|
||||
@@ -124,7 +124,7 @@ class MonsoonInstrument(Instrument):
|
||||
if self.process:
|
||||
raise RuntimeError('`get_data` called before `stop`')
|
||||
|
||||
stdout, stderr = self.output
|
||||
stdout, _ = self.output
|
||||
|
||||
with csvwriter(outfile) as writer:
|
||||
active_sites = [c.site for c in self.active_channels]
|
||||
|
@@ -22,7 +22,7 @@ from collections import defaultdict
|
||||
from future.moves.itertools import zip_longest
|
||||
|
||||
from devlib.instrument import Instrument, MeasurementsCsv, CONTINUOUS
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.exception import TargetStableError, HostError
|
||||
from devlib.utils.android import ApkInfo
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
|
||||
@@ -84,7 +84,7 @@ class NetstatsInstrument(Instrument):
|
||||
|
||||
"""
|
||||
if target.os != 'android':
|
||||
raise TargetError('netstats insturment only supports Android targets')
|
||||
raise TargetStableError('netstats instrument only supports Android targets')
|
||||
if apk is None:
|
||||
apk = os.path.join(THIS_DIR, 'netstats.apk')
|
||||
if not os.path.isfile(apk):
|
||||
@@ -101,6 +101,7 @@ class NetstatsInstrument(Instrument):
|
||||
self.add_channel(package, 'tx')
|
||||
self.add_channel(package, 'rx')
|
||||
|
||||
# pylint: disable=keyword-arg-before-vararg,arguments-differ
|
||||
def setup(self, force=False, *args, **kwargs):
|
||||
if self.target.package_is_installed(self.package):
|
||||
if force:
|
||||
|
@@ -37,6 +37,9 @@ class Module(object):
|
||||
# serial).
|
||||
# 'connected' -- installed when a connection to to the target has been
|
||||
# established. This is the default.
|
||||
# 'setup' -- installed after initial setup of the device has been performed.
|
||||
# This allows the module to utilize assets deployed during the
|
||||
# setup stage for example 'Busybox'.
|
||||
stage = 'connected'
|
||||
|
||||
@staticmethod
|
||||
@@ -61,7 +64,7 @@ class Module(object):
|
||||
self.logger = logging.getLogger(self.name)
|
||||
|
||||
|
||||
class HardRestModule(Module): # pylint: disable=R0921
|
||||
class HardRestModule(Module):
|
||||
|
||||
kind = 'hard_reset'
|
||||
|
||||
@@ -69,7 +72,7 @@ class HardRestModule(Module): # pylint: disable=R0921
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class BootModule(Module): # pylint: disable=R0921
|
||||
class BootModule(Module):
|
||||
|
||||
kind = 'boot'
|
||||
|
||||
@@ -88,7 +91,7 @@ class FlashModule(Module):
|
||||
|
||||
kind = 'flash'
|
||||
|
||||
def __call__(self, image_bundle=None, images=None, boot_config=None):
|
||||
def __call__(self, image_bundle=None, images=None, boot_config=None, connect=True):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
|
@@ -54,7 +54,7 @@ class FastbootFlashModule(FlashModule):
|
||||
def probe(target):
|
||||
return target.os == 'android'
|
||||
|
||||
def __call__(self, image_bundle=None, images=None, bootargs=None):
|
||||
def __call__(self, image_bundle=None, images=None, bootargs=None, connect=True):
|
||||
if bootargs:
|
||||
raise ValueError('{} does not support boot configuration'.format(self.name))
|
||||
self.prelude_done = False
|
||||
@@ -67,7 +67,8 @@ class FastbootFlashModule(FlashModule):
|
||||
self.logger.debug('flashing {}'.format(partition))
|
||||
self._flash_image(self.target, partition, expand_path(image_path))
|
||||
fastboot_command('reboot')
|
||||
self.target.connect(timeout=180)
|
||||
if connect:
|
||||
self.target.connect(timeout=180)
|
||||
|
||||
def _validate_image_bundle(self, image_bundle):
|
||||
if not tarfile.is_tarfile(image_bundle):
|
||||
@@ -125,4 +126,3 @@ def get_mapping(base_dir, partition_file):
|
||||
HostError('file {} was not found in the bundle or was misplaced'.format(pair[1]))
|
||||
mapping[pair[0]] = image_path
|
||||
return mapping
|
||||
|
||||
|
@@ -60,150 +60,150 @@ class BigLittleModule(Module):
|
||||
|
||||
def list_bigs_frequencies(self):
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.list_frequencies(bigs_online[0])
|
||||
|
||||
def list_bigs_governors(self):
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.list_governors(bigs_online[0])
|
||||
|
||||
def list_bigs_governor_tunables(self):
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.list_governor_tunables(bigs_online[0])
|
||||
|
||||
def list_littles_frequencies(self):
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
if littles_online:
|
||||
return self.target.cpufreq.list_frequencies(littles_online[0])
|
||||
|
||||
def list_littles_governors(self):
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
if littles_online:
|
||||
return self.target.cpufreq.list_governors(littles_online[0])
|
||||
|
||||
def list_littles_governor_tunables(self):
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
if littles_online:
|
||||
return self.target.cpufreq.list_governor_tunables(littles_online[0])
|
||||
|
||||
def get_bigs_governor(self):
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.get_governor(bigs_online[0])
|
||||
|
||||
def get_bigs_governor_tunables(self):
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.get_governor_tunables(bigs_online[0])
|
||||
|
||||
def get_bigs_frequency(self):
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.get_frequency(bigs_online[0])
|
||||
|
||||
def get_bigs_min_frequency(self):
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.get_min_frequency(bigs_online[0])
|
||||
|
||||
def get_bigs_max_frequency(self):
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
if bigs_online:
|
||||
return self.target.cpufreq.get_max_frequency(bigs_online[0])
|
||||
|
||||
def get_littles_governor(self):
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
if littles_online:
|
||||
return self.target.cpufreq.get_governor(littles_online[0])
|
||||
|
||||
def get_littles_governor_tunables(self):
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
if littles_online:
|
||||
return self.target.cpufreq.get_governor_tunables(littles_online[0])
|
||||
|
||||
def get_littles_frequency(self):
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
if littles_online:
|
||||
return self.target.cpufreq.get_frequency(littles_online[0])
|
||||
|
||||
def get_littles_min_frequency(self):
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
if littles_online:
|
||||
return self.target.cpufreq.get_min_frequency(littles_online[0])
|
||||
|
||||
def get_littles_max_frequency(self):
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
if littles_online:
|
||||
return self.target.cpufreq.get_max_frequency(littles_online[0])
|
||||
|
||||
def set_bigs_governor(self, governor, **kwargs):
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
if bigs_online:
|
||||
self.target.cpufreq.set_governor(bigs_online[0], governor, **kwargs)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_bigs_governor_tunables(self, governor, **kwargs):
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
if bigs_online:
|
||||
self.target.cpufreq.set_governor_tunables(bigs_online[0], governor, **kwargs)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_bigs_frequency(self, frequency, exact=True):
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
if bigs_online:
|
||||
self.target.cpufreq.set_frequency(bigs_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_bigs_min_frequency(self, frequency, exact=True):
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
if bigs_online:
|
||||
self.target.cpufreq.set_min_frequency(bigs_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_bigs_max_frequency(self, frequency, exact=True):
|
||||
bigs_online = self.bigs_online
|
||||
if len(bigs_online) > 0:
|
||||
if bigs_online:
|
||||
self.target.cpufreq.set_max_frequency(bigs_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All bigs appear to be offline")
|
||||
|
||||
def set_littles_governor(self, governor, **kwargs):
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
if littles_online:
|
||||
self.target.cpufreq.set_governor(littles_online[0], governor, **kwargs)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
||||
def set_littles_governor_tunables(self, governor, **kwargs):
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
if littles_online:
|
||||
self.target.cpufreq.set_governor_tunables(littles_online[0], governor, **kwargs)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
||||
def set_littles_frequency(self, frequency, exact=True):
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
if littles_online:
|
||||
self.target.cpufreq.set_frequency(littles_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
||||
def set_littles_min_frequency(self, frequency, exact=True):
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
if littles_online:
|
||||
self.target.cpufreq.set_min_frequency(littles_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
||||
def set_littles_max_frequency(self, frequency, exact=True):
|
||||
littles_online = self.littles_online
|
||||
if len(littles_online) > 0:
|
||||
if littles_online:
|
||||
self.target.cpufreq.set_max_frequency(littles_online[0], frequency, exact)
|
||||
else:
|
||||
raise ValueError("All littles appear to be offline")
|
||||
|
@@ -18,7 +18,7 @@ import re
|
||||
from collections import namedtuple
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.exception import TargetError
|
||||
from devlib.exception import TargetStableError
|
||||
from devlib.utils.misc import list_to_ranges, isiterable
|
||||
from devlib.utils.types import boolean
|
||||
|
||||
@@ -121,18 +121,19 @@ class Controller(object):
|
||||
cgroups.append(cg)
|
||||
return cgroups
|
||||
|
||||
def move_tasks(self, source, dest, exclude=[]):
|
||||
try:
|
||||
srcg = self._cgroups[source]
|
||||
dstg = self._cgroups[dest]
|
||||
except KeyError as e:
|
||||
raise ValueError('Unkown group: {}'.format(e))
|
||||
output = self.target._execute_util(
|
||||
def move_tasks(self, source, dest, exclude=None):
|
||||
if exclude is None:
|
||||
exclude = []
|
||||
|
||||
srcg = self.cgroup(source)
|
||||
dstg = self.cgroup(dest)
|
||||
|
||||
self.target._execute_util( # pylint: disable=protected-access
|
||||
'cgroups_tasks_move {} {} \'{}\''.format(
|
||||
srcg.directory, dstg.directory, exclude),
|
||||
as_root=True)
|
||||
|
||||
def move_all_tasks_to(self, dest, exclude=[]):
|
||||
def move_all_tasks_to(self, dest, exclude=None):
|
||||
"""
|
||||
Move all the tasks to the specified CGroup
|
||||
|
||||
@@ -145,8 +146,10 @@ class Controller(object):
|
||||
tasks.
|
||||
|
||||
:param exclude: list of commands to keep in the root CGroup
|
||||
:type exlude: list(str)
|
||||
:type exclude: list(str)
|
||||
"""
|
||||
if exclude is None:
|
||||
exclude = []
|
||||
|
||||
if isinstance(exclude, str):
|
||||
exclude = [exclude]
|
||||
@@ -154,21 +157,22 @@ class Controller(object):
|
||||
raise ValueError('wrong type for "exclude" parameter, '
|
||||
'it must be a str or a list')
|
||||
|
||||
logging.debug('Moving all tasks into %s', dest)
|
||||
self.logger.debug('Moving all tasks into %s', dest)
|
||||
|
||||
# Build list of tasks to exclude
|
||||
grep_filters = ''
|
||||
for comm in exclude:
|
||||
grep_filters += '-e {} '.format(comm)
|
||||
logging.debug(' using grep filter: %s', grep_filters)
|
||||
self.logger.debug(' using grep filter: %s', grep_filters)
|
||||
if grep_filters != '':
|
||||
logging.debug(' excluding tasks which name matches:')
|
||||
logging.debug(' %s', ', '.join(exclude))
|
||||
self.logger.debug(' excluding tasks which name matches:')
|
||||
self.logger.debug(' %s', ', '.join(exclude))
|
||||
|
||||
for cgroup in self._cgroups:
|
||||
for cgroup in self.list_all():
|
||||
if cgroup != dest:
|
||||
self.move_tasks(cgroup, dest, grep_filters)
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
def tasks(self, cgroup,
|
||||
filter_tid='',
|
||||
filter_tname='',
|
||||
@@ -203,8 +207,8 @@ class Controller(object):
|
||||
try:
|
||||
cg = self._cgroups[cgroup]
|
||||
except KeyError as e:
|
||||
raise ValueError('Unkown group: {}'.format(e))
|
||||
output = self.target._execute_util(
|
||||
raise ValueError('Unknown group: {}'.format(e))
|
||||
output = self.target._execute_util( # pylint: disable=protected-access
|
||||
'cgroups_tasks_in {}'.format(cg.directory),
|
||||
as_root=True)
|
||||
entries = output.splitlines()
|
||||
@@ -234,7 +238,7 @@ class Controller(object):
|
||||
try:
|
||||
cg = self._cgroups[cgroup]
|
||||
except KeyError as e:
|
||||
raise ValueError('Unkown group: {}'.format(e))
|
||||
raise ValueError('Unknown group: {}'.format(e))
|
||||
output = self.target.execute(
|
||||
'{} wc -l {}/tasks'.format(
|
||||
self.target.busybox, cg.directory),
|
||||
@@ -257,8 +261,9 @@ class CGroup(object):
|
||||
|
||||
# Control cgroup path
|
||||
self.directory = controller.mount_point
|
||||
|
||||
if name != '/':
|
||||
self.directory = self.target.path.join(controller.mount_point, name[1:])
|
||||
self.directory = self.target.path.join(controller.mount_point, name.strip('/'))
|
||||
|
||||
# Setup path for tasks file
|
||||
self.tasks_file = self.target.path.join(self.directory, 'tasks')
|
||||
@@ -276,17 +281,15 @@ class CGroup(object):
|
||||
self.target.execute('[ -d {0} ]'\
|
||||
.format(self.directory), as_root=True)
|
||||
return True
|
||||
except TargetError:
|
||||
except TargetStableError:
|
||||
return False
|
||||
|
||||
def get(self):
|
||||
conf = {}
|
||||
|
||||
logging.debug('Reading %s attributes from:',
|
||||
self.controller.kind)
|
||||
logging.debug(' %s',
|
||||
self.directory)
|
||||
output = self.target._execute_util(
|
||||
self.logger.debug('Reading %s attributes from:', self.controller.kind)
|
||||
self.logger.debug(' %s', self.directory)
|
||||
output = self.target._execute_util( # pylint: disable=protected-access
|
||||
'cgroups_get_attributes {} {}'.format(
|
||||
self.directory, self.controller.kind),
|
||||
as_root=True)
|
||||
@@ -302,7 +305,7 @@ class CGroup(object):
|
||||
if isiterable(attrs[idx]):
|
||||
attrs[idx] = list_to_ranges(attrs[idx])
|
||||
# Build attribute path
|
||||
if self.controller._noprefix:
|
||||
if self.controller._noprefix: # pylint: disable=protected-access
|
||||
attr_name = '{}'.format(idx)
|
||||
else:
|
||||
attr_name = '{}.{}'.format(self.controller.kind, idx)
|
||||
@@ -314,7 +317,7 @@ class CGroup(object):
|
||||
# Set the attribute value
|
||||
try:
|
||||
self.target.write_value(path, attrs[idx])
|
||||
except TargetError:
|
||||
except TargetStableError:
|
||||
# Check if the error is due to a non-existing attribute
|
||||
attrs = self.get()
|
||||
if idx not in attrs:
|
||||
@@ -324,7 +327,7 @@ class CGroup(object):
|
||||
|
||||
def get_tasks(self):
|
||||
task_ids = self.target.read_value(self.tasks_file).split()
|
||||
logging.debug('Tasks: %s', task_ids)
|
||||
self.logger.debug('Tasks: %s', task_ids)
|
||||
return list(map(int, task_ids))
|
||||
|
||||
def add_task(self, tid):
|
||||
@@ -363,7 +366,7 @@ class CgroupsModule(Module):
|
||||
|
||||
# Get the list of the available controllers
|
||||
subsys = self.list_subsystems()
|
||||
if len(subsys) == 0:
|
||||
if not subsys:
|
||||
self.logger.warning('No CGroups controller available')
|
||||
return
|
||||
|
||||
@@ -384,9 +387,9 @@ class CgroupsModule(Module):
|
||||
controller = Controller(ss.name, hid, hierarchy[hid])
|
||||
try:
|
||||
controller.mount(self.target, self.cgroup_root)
|
||||
except TargetError:
|
||||
except TargetStableError:
|
||||
message = 'Failed to mount "{}" controller'
|
||||
raise TargetError(message.format(controller.kind))
|
||||
raise TargetStableError(message.format(controller.kind))
|
||||
self.logger.info(' %-12s : %s', controller.kind,
|
||||
controller.mount_point)
|
||||
self.controllers[ss.name] = controller
|
||||
@@ -420,20 +423,27 @@ class CgroupsModule(Module):
|
||||
:param cgroup: Name of cgroup to run command into
|
||||
:returns: A command to run `cmdline` into `cgroup`
|
||||
"""
|
||||
if not cgroup.startswith('/'):
|
||||
message = 'cgroup name "{}" must start with "/"'.format(cgroup)
|
||||
raise ValueError(message)
|
||||
return 'CGMOUNT={} {} cgroups_run_into {} {}'\
|
||||
.format(self.cgroup_root, self.target.shutils,
|
||||
cgroup, cmdline)
|
||||
|
||||
def run_into(self, cgroup, cmdline):
|
||||
def run_into(self, cgroup, cmdline, as_root=None):
|
||||
"""
|
||||
Run the specified command into the specified CGroup
|
||||
|
||||
:param cmdline: Command to be run into cgroup
|
||||
:param cgroup: Name of cgroup to run command into
|
||||
:param as_root: Specify whether to run the command as root, if not
|
||||
specified will default to whether the target is rooted.
|
||||
:returns: Output of command.
|
||||
"""
|
||||
if as_root is None:
|
||||
as_root = self.target.is_rooted
|
||||
cmd = self.run_into_cmd(cgroup, cmdline)
|
||||
raw_output = self.target.execute(cmd)
|
||||
raw_output = self.target.execute(cmd, as_root=as_root)
|
||||
|
||||
# First line of output comes from shutils; strip it out.
|
||||
return raw_output.split('\n', 1)[1]
|
||||
@@ -444,11 +454,11 @@ class CgroupsModule(Module):
|
||||
A regexps of tasks names can be used to defined tasks which should not
|
||||
be moved.
|
||||
"""
|
||||
return self.target._execute_util(
|
||||
return self.target._execute_util( # pylint: disable=protected-access
|
||||
'cgroups_tasks_move {} {} {}'.format(srcg, dstg, exclude),
|
||||
as_root=True)
|
||||
|
||||
def isolate(self, cpus, exclude=[]):
|
||||
def isolate(self, cpus, exclude=None):
|
||||
"""
|
||||
Remove all userspace tasks from specified CPUs.
|
||||
|
||||
@@ -465,6 +475,8 @@ class CgroupsModule(Module):
|
||||
sandbox is the CGroup of sandboxed CPUs
|
||||
isolated is the CGroup of isolated CPUs
|
||||
"""
|
||||
if exclude is None:
|
||||
exclude = []
|
||||
all_cpus = set(range(self.target.number_of_cpus))
|
||||
sbox_cpus = list(all_cpus - set(cpus))
|
||||
isol_cpus = list(all_cpus - set(sbox_cpus))
|
||||
@@ -483,7 +495,7 @@ class CgroupsModule(Module):
|
||||
|
||||
return sbox_cg, isol_cg
|
||||
|
||||
def freeze(self, exclude=[], thaw=False):
|
||||
def freeze(self, exclude=None, thaw=False):
|
||||
"""
|
||||
Freeze all user-space tasks but the specified ones
|
||||
|
||||
@@ -501,6 +513,9 @@ class CgroupsModule(Module):
|
||||
:type thaw: bool
|
||||
"""
|
||||
|
||||
if exclude is None:
|
||||
exclude = []
|
||||
|
||||
# Create Freezer CGroup
|
||||
freezer = self.controller('freezer')
|
||||
if freezer is None:
|
||||
@@ -509,7 +524,8 @@ class CgroupsModule(Module):
|
||||
cmd = 'cgroups_freezer_set_state {{}} {}'.format(freezer_cg.directory)
|
||||
|
||||
if thaw:
|
||||
# Restart froozen tasks
|
||||
# Restart frozen tasks
|
||||
# pylint: disable=protected-access
|
||||
freezer.target._execute_util(cmd.format('THAWED'), as_root=True)
|
||||
# Remove all tasks from freezer
|
||||
freezer.move_all_tasks_to('/')
|
||||
@@ -522,7 +538,7 @@ class CgroupsModule(Module):
|
||||
tasks = freezer.tasks('/')
|
||||
|
||||
# Freeze all tasks
|
||||
# pylint: disable=protected-access
|
||||
freezer.target._execute_util(cmd.format('FROZEN'), as_root=True)
|
||||
|
||||
return tasks
|
||||
|
||||
|
@@ -37,12 +37,14 @@ class MbedFanActiveCoolingModule(Module):
|
||||
with open_serial_connection(timeout=self.timeout,
|
||||
port=self.port,
|
||||
baudrate=self.baud) as target:
|
||||
# pylint: disable=no-member
|
||||
target.sendline('motor_{}_1'.format(self.fan_pin))
|
||||
|
||||
def stop(self):
|
||||
with open_serial_connection(timeout=self.timeout,
|
||||
port=self.port,
|
||||
baudrate=self.baud) as target:
|
||||
# pylint: disable=no-member
|
||||
target.sendline('motor_{}_0'.format(self.fan_pin))
|
||||
|
||||
|
||||
|
@@ -12,8 +12,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from contextlib import contextmanager
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.exception import TargetError
|
||||
from devlib.exception import TargetStableError
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
|
||||
@@ -82,7 +84,7 @@ class CpufreqModule(Module):
|
||||
Setting the governor on any core in a cluster will also set it on all
|
||||
other cores in that cluster.
|
||||
|
||||
:raises: TargetError if governor is not supported by the CPU, or if,
|
||||
:raises: TargetStableError if governor is not supported by the CPU, or if,
|
||||
for some reason, the governor could not be set.
|
||||
|
||||
"""
|
||||
@@ -90,11 +92,52 @@ class CpufreqModule(Module):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
supported = self.list_governors(cpu)
|
||||
if governor not in supported:
|
||||
raise TargetError('Governor {} not supported for cpu {}'.format(governor, cpu))
|
||||
raise TargetStableError('Governor {} not supported for cpu {}'.format(governor, cpu))
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
|
||||
self.target.write_value(sysfile, governor)
|
||||
self.set_governor_tunables(cpu, governor, **kwargs)
|
||||
|
||||
@contextmanager
|
||||
def use_governor(self, governor, cpus=None, **kwargs):
|
||||
"""
|
||||
Use a given governor, then restore previous governor(s)
|
||||
|
||||
:param governor: Governor to use on all targeted CPUs (see :meth:`set_governor`)
|
||||
:type governor: str
|
||||
|
||||
:param cpus: CPUs affected by the governor change (all by default)
|
||||
:type cpus: list
|
||||
|
||||
:Keyword Arguments: Governor tunables, See :meth:`set_governor_tunables`
|
||||
"""
|
||||
if not cpus:
|
||||
cpus = self.target.list_online_cpus()
|
||||
|
||||
# Setting a governor & tunables for a cpu will set them for all cpus
|
||||
# in the same clock domain, so only manipulating one cpu per domain
|
||||
# is enough
|
||||
domains = set(self.get_affected_cpus(cpu)[0] for cpu in cpus)
|
||||
prev_governors = {cpu : (self.get_governor(cpu), self.get_governor_tunables(cpu))
|
||||
for cpu in domains}
|
||||
|
||||
# Special case for userspace, frequency is not seen as a tunable
|
||||
userspace_freqs = {}
|
||||
for cpu, (prev_gov, _) in prev_governors.items():
|
||||
if prev_gov == "userspace":
|
||||
userspace_freqs[cpu] = self.get_frequency(cpu)
|
||||
|
||||
for cpu in domains:
|
||||
self.set_governor(cpu, governor, **kwargs)
|
||||
|
||||
try:
|
||||
yield
|
||||
|
||||
finally:
|
||||
for cpu, (prev_gov, tunables) in prev_governors.items():
|
||||
self.set_governor(cpu, prev_gov, **tunables)
|
||||
if prev_gov == "userspace":
|
||||
self.set_frequency(cpu, userspace_freqs[cpu])
|
||||
|
||||
def list_governor_tunables(self, cpu):
|
||||
"""Returns a list of tunables available for the governor on the specified CPU."""
|
||||
if isinstance(cpu, int):
|
||||
@@ -104,11 +147,11 @@ class CpufreqModule(Module):
|
||||
try:
|
||||
tunables_path = '/sys/devices/system/cpu/{}/cpufreq/{}'.format(cpu, governor)
|
||||
self._governor_tunables[governor] = self.target.list_directory(tunables_path)
|
||||
except TargetError: # probably an older kernel
|
||||
except TargetStableError: # probably an older kernel
|
||||
try:
|
||||
tunables_path = '/sys/devices/system/cpu/cpufreq/{}'.format(governor)
|
||||
self._governor_tunables[governor] = self.target.list_directory(tunables_path)
|
||||
except TargetError: # governor does not support tunables
|
||||
except TargetStableError: # governor does not support tunables
|
||||
self._governor_tunables[governor] = []
|
||||
return self._governor_tunables[governor]
|
||||
|
||||
@@ -122,7 +165,7 @@ class CpufreqModule(Module):
|
||||
try:
|
||||
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
|
||||
tunables[tunable] = self.target.read_value(path)
|
||||
except TargetError: # May be an older kernel
|
||||
except TargetStableError: # May be an older kernel
|
||||
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
|
||||
tunables[tunable] = self.target.read_value(path)
|
||||
return tunables
|
||||
@@ -140,7 +183,7 @@ class CpufreqModule(Module):
|
||||
The rest should be keyword parameters mapping tunable name onto the value to
|
||||
be set for it.
|
||||
|
||||
:raises: TargetError if governor specified is not a valid governor name, or if
|
||||
:raises: TargetStableError if governor specified is not a valid governor name, or if
|
||||
a tunable specified is not valid for the governor, or if could not set
|
||||
tunable.
|
||||
|
||||
@@ -155,7 +198,7 @@ class CpufreqModule(Module):
|
||||
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
|
||||
try:
|
||||
self.target.write_value(path, value)
|
||||
except TargetError:
|
||||
except TargetStableError:
|
||||
if self.target.file_exists(path):
|
||||
# File exists but we did something wrong
|
||||
raise
|
||||
@@ -165,11 +208,11 @@ class CpufreqModule(Module):
|
||||
else:
|
||||
message = 'Unexpected tunable {} for governor {} on {}.\n'.format(tunable, governor, cpu)
|
||||
message += 'Available tunables are: {}'.format(valid_tunables)
|
||||
raise TargetError(message)
|
||||
raise TargetStableError(message)
|
||||
|
||||
@memoized
|
||||
def list_frequencies(self, cpu):
|
||||
"""Returns a list of frequencies supported by the cpu or an empty list
|
||||
"""Returns a sorted list of frequencies supported by the cpu or an empty list
|
||||
if not could be found."""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
@@ -177,21 +220,21 @@ class CpufreqModule(Module):
|
||||
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_frequencies'.format(cpu)
|
||||
output = self.target.execute(cmd)
|
||||
available_frequencies = list(map(int, output.strip().split())) # pylint: disable=E1103
|
||||
except TargetError:
|
||||
except TargetStableError:
|
||||
# On some devices scaling_frequencies is not generated.
|
||||
# http://adrynalyne-teachtofish.blogspot.co.uk/2011/11/how-to-enable-scalingavailablefrequenci.html
|
||||
# Fall back to parsing stats/time_in_state
|
||||
path = '/sys/devices/system/cpu/{}/cpufreq/stats/time_in_state'.format(cpu)
|
||||
try:
|
||||
out_iter = iter(self.target.read_value(path).split())
|
||||
except TargetError:
|
||||
except TargetStableError:
|
||||
if not self.target.file_exists(path):
|
||||
# Probably intel_pstate. Can't get available freqs.
|
||||
return []
|
||||
raise
|
||||
|
||||
available_frequencies = list(map(int, reversed([f for f, _ in zip(out_iter, out_iter)])))
|
||||
return available_frequencies
|
||||
return sorted(available_frequencies)
|
||||
|
||||
@memoized
|
||||
def get_max_available_frequency(self, cpu):
|
||||
@@ -200,7 +243,7 @@ class CpufreqModule(Module):
|
||||
could not be found.
|
||||
"""
|
||||
freqs = self.list_frequencies(cpu)
|
||||
return freqs and max(freqs) or None
|
||||
return max(freqs) if freqs else None
|
||||
|
||||
@memoized
|
||||
def get_min_available_frequency(self, cpu):
|
||||
@@ -209,7 +252,7 @@ class CpufreqModule(Module):
|
||||
could not be found.
|
||||
"""
|
||||
freqs = self.list_frequencies(cpu)
|
||||
return freqs and min(freqs) or None
|
||||
return min(freqs) if freqs else None
|
||||
|
||||
def get_min_frequency(self, cpu):
|
||||
"""
|
||||
@@ -219,7 +262,7 @@ class CpufreqModule(Module):
|
||||
try to read the minimum frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
@@ -239,7 +282,7 @@ class CpufreqModule(Module):
|
||||
|
||||
on the device.
|
||||
|
||||
:raises: TargetError if the frequency is not supported by the CPU, or if, for
|
||||
:raises: TargetStableError if the frequency is not supported by the CPU, or if, for
|
||||
some reason, frequency could not be set.
|
||||
:raises: ValueError if ``frequency`` is not an integer.
|
||||
|
||||
@@ -250,7 +293,7 @@ class CpufreqModule(Module):
|
||||
try:
|
||||
value = int(frequency)
|
||||
if exact and available_frequencies and value not in available_frequencies:
|
||||
raise TargetError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
value,
|
||||
available_frequencies))
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
|
||||
@@ -258,7 +301,7 @@ class CpufreqModule(Module):
|
||||
except ValueError:
|
||||
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
|
||||
|
||||
def get_frequency(self, cpu):
|
||||
def get_frequency(self, cpu, cpuinfo=False):
|
||||
"""
|
||||
Returns the current frequency currently set for the specified CPU.
|
||||
|
||||
@@ -266,12 +309,18 @@ class CpufreqModule(Module):
|
||||
try to read the current frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
:param cpuinfo: Read the value in the cpuinfo interface that reflects
|
||||
the actual running frequency.
|
||||
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_cur_freq'.format(cpu)
|
||||
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/{}'.format(
|
||||
cpu,
|
||||
'cpuinfo_cur_freq' if cpuinfo else 'scaling_cur_freq')
|
||||
return self.target.read_int(sysfile)
|
||||
|
||||
def set_frequency(self, cpu, frequency, exact=True):
|
||||
@@ -288,7 +337,7 @@ class CpufreqModule(Module):
|
||||
|
||||
on the device (if it exists).
|
||||
|
||||
:raises: TargetError if the frequency is not supported by the CPU, or if, for
|
||||
:raises: TargetStableError if the frequency is not supported by the CPU, or if, for
|
||||
some reason, frequency could not be set.
|
||||
:raises: ValueError if ``frequency`` is not an integer.
|
||||
|
||||
@@ -300,13 +349,17 @@ class CpufreqModule(Module):
|
||||
if exact:
|
||||
available_frequencies = self.list_frequencies(cpu)
|
||||
if available_frequencies and value not in available_frequencies:
|
||||
raise TargetError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
value,
|
||||
available_frequencies))
|
||||
if self.get_governor(cpu) != 'userspace':
|
||||
raise TargetError('Can\'t set {} frequency; governor must be "userspace"'.format(cpu))
|
||||
raise TargetStableError('Can\'t set {} frequency; governor must be "userspace"'.format(cpu))
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_setspeed'.format(cpu)
|
||||
self.target.write_value(sysfile, value, verify=False)
|
||||
cpuinfo = self.get_frequency(cpu, cpuinfo=True)
|
||||
if cpuinfo != value:
|
||||
self.logger.warning(
|
||||
'The cpufreq value has not been applied properly cpuinfo={} request={}'.format(cpuinfo, value))
|
||||
except ValueError:
|
||||
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
|
||||
|
||||
@@ -318,7 +371,7 @@ class CpufreqModule(Module):
|
||||
try to read the maximum frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
@@ -337,7 +390,7 @@ class CpufreqModule(Module):
|
||||
|
||||
on the device.
|
||||
|
||||
:raises: TargetError if the frequency is not supported by the CPU, or if, for
|
||||
:raises: TargetStableError if the frequency is not supported by the CPU, or if, for
|
||||
some reason, frequency could not be set.
|
||||
:raises: ValueError if ``frequency`` is not an integer.
|
||||
|
||||
@@ -348,7 +401,7 @@ class CpufreqModule(Module):
|
||||
try:
|
||||
value = int(frequency)
|
||||
if exact and available_frequencies and value not in available_frequencies:
|
||||
raise TargetError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
value,
|
||||
available_frequencies))
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
|
||||
@@ -380,6 +433,7 @@ class CpufreqModule(Module):
|
||||
"""
|
||||
Set the specified (minimum) frequency for all the (online) CPUs
|
||||
"""
|
||||
# pylint: disable=protected-access
|
||||
return self.target._execute_util(
|
||||
'cpufreq_set_all_frequencies {}'.format(freq),
|
||||
as_root=True)
|
||||
@@ -388,6 +442,7 @@ class CpufreqModule(Module):
|
||||
"""
|
||||
Get the current frequency for all the (online) CPUs
|
||||
"""
|
||||
# pylint: disable=protected-access
|
||||
output = self.target._execute_util(
|
||||
'cpufreq_get_all_frequencies', as_root=True)
|
||||
frequencies = {}
|
||||
@@ -403,16 +458,17 @@ class CpufreqModule(Module):
|
||||
Set the specified governor for all the (online) CPUs
|
||||
"""
|
||||
try:
|
||||
# pylint: disable=protected-access
|
||||
return self.target._execute_util(
|
||||
'cpufreq_set_all_governors {}'.format(governor),
|
||||
as_root=True)
|
||||
except TargetError as e:
|
||||
except TargetStableError as e:
|
||||
if ("echo: I/O error" in str(e) or
|
||||
"write error: Invalid argument" in str(e)):
|
||||
|
||||
cpus_unsupported = [c for c in self.target.list_online_cpus()
|
||||
if governor not in self.list_governors(c)]
|
||||
raise TargetError("Governor {} unsupported for CPUs {}".format(
|
||||
raise TargetStableError("Governor {} unsupported for CPUs {}".format(
|
||||
governor, cpus_unsupported))
|
||||
else:
|
||||
raise
|
||||
@@ -421,6 +477,7 @@ class CpufreqModule(Module):
|
||||
"""
|
||||
Get the current governor for all the (online) CPUs
|
||||
"""
|
||||
# pylint: disable=protected-access
|
||||
output = self.target._execute_util(
|
||||
'cpufreq_get_all_governors', as_root=True)
|
||||
governors = {}
|
||||
@@ -435,6 +492,7 @@ class CpufreqModule(Module):
|
||||
"""
|
||||
Report current frequencies on trace file
|
||||
"""
|
||||
# pylint: disable=protected-access
|
||||
return self.target._execute_util('cpufreq_trace_all_frequencies', as_root=True)
|
||||
|
||||
def get_affected_cpus(self, cpu):
|
||||
@@ -478,7 +536,7 @@ class CpufreqModule(Module):
|
||||
"""
|
||||
cpus = set(range(self.target.number_of_cpus))
|
||||
while cpus:
|
||||
cpu = next(iter(cpus))
|
||||
cpu = next(iter(cpus)) # pylint: disable=stop-iteration-return
|
||||
domain = self.target.cpufreq.get_related_cpus(cpu)
|
||||
yield domain
|
||||
cpus = cpus.difference(domain)
|
||||
|
@@ -15,8 +15,10 @@
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
from past.builtins import basestring
|
||||
|
||||
from operator import attrgetter
|
||||
from pprint import pformat
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.utils.misc import memoized
|
||||
from devlib.utils.types import integer, boolean
|
||||
|
||||
|
||||
@@ -51,6 +53,7 @@ class CpuidleState(object):
|
||||
self.desc = desc
|
||||
self.power = power
|
||||
self.latency = latency
|
||||
self.residency = residency
|
||||
self.id = self.target.path.basename(self.path)
|
||||
self.cpu = self.target.path.basename(self.target.path.dirname(path))
|
||||
|
||||
@@ -96,40 +99,35 @@ class Cpuidle(Module):
|
||||
|
||||
def __init__(self, target):
|
||||
super(Cpuidle, self).__init__(target)
|
||||
self._states = {}
|
||||
|
||||
basepath = '/sys/devices/system/cpu/'
|
||||
values_tree = self.target.read_tree_values(basepath, depth=4, check_exit_code=False)
|
||||
i = 0
|
||||
cpu_id = 'cpu{}'.format(i)
|
||||
while cpu_id in values_tree:
|
||||
cpu_node = values_tree[cpu_id]
|
||||
|
||||
if 'cpuidle' in cpu_node:
|
||||
idle_node = cpu_node['cpuidle']
|
||||
self._states[cpu_id] = []
|
||||
j = 0
|
||||
state_id = 'state{}'.format(j)
|
||||
while state_id in idle_node:
|
||||
state_node = idle_node[state_id]
|
||||
state = CpuidleState(
|
||||
self._states = {
|
||||
cpu_name: sorted(
|
||||
(
|
||||
CpuidleState(
|
||||
self.target,
|
||||
index=j,
|
||||
path=self.target.path.join(basepath, cpu_id, 'cpuidle', state_id),
|
||||
# state_name is formatted as "state42"
|
||||
index=int(state_name[len('state'):]),
|
||||
path=self.target.path.join(basepath, cpu_name, 'cpuidle', state_name),
|
||||
name=state_node['name'],
|
||||
desc=state_node['desc'],
|
||||
power=int(state_node['power']),
|
||||
latency=int(state_node['latency']),
|
||||
residency=int(state_node['residency']) if 'residency' in state_node else None,
|
||||
)
|
||||
msg = 'Adding {} state {}: {} {}'
|
||||
self.logger.debug(msg.format(cpu_id, j, state.name, state.desc))
|
||||
self._states[cpu_id].append(state)
|
||||
j += 1
|
||||
state_id = 'state{}'.format(j)
|
||||
for state_name, state_node in cpu_node['cpuidle'].items()
|
||||
if state_name.startswith('state')
|
||||
),
|
||||
key=attrgetter('index'),
|
||||
)
|
||||
|
||||
i += 1
|
||||
cpu_id = 'cpu{}'.format(i)
|
||||
for cpu_name, cpu_node in values_tree.items()
|
||||
if cpu_name.startswith('cpu') and 'cpuidle' in cpu_node
|
||||
}
|
||||
|
||||
self.logger.debug('Adding cpuidle states:\n{}'.format(pformat(self._states)))
|
||||
|
||||
def get_states(self, cpu=0):
|
||||
if isinstance(cpu, int):
|
||||
@@ -166,10 +164,14 @@ class Cpuidle(Module):
|
||||
"""
|
||||
Momentarily wake each CPU. Ensures cpu_idle events in trace file.
|
||||
"""
|
||||
output = self.target._execute_util('cpuidle_wake_all_cpus')
|
||||
# pylint: disable=protected-access
|
||||
self.target._execute_util('cpuidle_wake_all_cpus')
|
||||
|
||||
def get_driver(self):
|
||||
return self.target.read_value(self.target.path.join(self.root_path, 'current_driver'))
|
||||
|
||||
def get_governor(self):
|
||||
return self.target.read_value(self.target.path.join(self.root_path, 'current_governor_ro'))
|
||||
path = self.target.path.join(self.root_path, 'current_governor_ro')
|
||||
if not self.target.file_exists(path):
|
||||
path = self.target.path.join(self.root_path, 'current_governor')
|
||||
return self.target.read_value(path)
|
||||
|
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
from devlib.module import Module
|
||||
from devlib.exception import TargetError
|
||||
from devlib.exception import TargetStableError
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
class DevfreqModule(Module):
|
||||
@@ -64,13 +64,13 @@ class DevfreqModule(Module):
|
||||
Additional keyword arguments can be used to specify governor tunables for
|
||||
governors that support them.
|
||||
|
||||
:raises: TargetError if governor is not supported by the device, or if,
|
||||
:raises: TargetStableError if governor is not supported by the device, or if,
|
||||
for some reason, the governor could not be set.
|
||||
|
||||
"""
|
||||
supported = self.list_governors(device)
|
||||
if governor not in supported:
|
||||
raise TargetError('Governor {} not supported for device {}'.format(governor, device))
|
||||
raise TargetStableError('Governor {} not supported for device {}'.format(governor, device))
|
||||
sysfile = '/sys/class/devfreq/{}/governor'.format(device)
|
||||
self.target.write_value(sysfile, governor)
|
||||
|
||||
@@ -94,7 +94,7 @@ class DevfreqModule(Module):
|
||||
will try to read the minimum frequency and the following exception will
|
||||
be raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
sysfile = '/sys/class/devfreq/{}/min_freq'.format(device)
|
||||
@@ -112,7 +112,7 @@ class DevfreqModule(Module):
|
||||
|
||||
on the device.
|
||||
|
||||
:raises: TargetError if the frequency is not supported by the device, or if, for
|
||||
:raises: TargetStableError if the frequency is not supported by the device, or if, for
|
||||
some reason, frequency could not be set.
|
||||
:raises: ValueError if ``frequency`` is not an integer.
|
||||
|
||||
@@ -121,7 +121,7 @@ class DevfreqModule(Module):
|
||||
try:
|
||||
value = int(frequency)
|
||||
if exact and available_frequencies and value not in available_frequencies:
|
||||
raise TargetError('Can\'t set {} frequency to {}\nmust be in {}'.format(device,
|
||||
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(device,
|
||||
value,
|
||||
available_frequencies))
|
||||
sysfile = '/sys/class/devfreq/{}/min_freq'.format(device)
|
||||
@@ -137,7 +137,7 @@ class DevfreqModule(Module):
|
||||
will try to read the current frequency and the following exception will
|
||||
be raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
sysfile = '/sys/class/devfreq/{}/cur_freq'.format(device)
|
||||
@@ -151,7 +151,7 @@ class DevfreqModule(Module):
|
||||
try to read the maximum frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
"""
|
||||
sysfile = '/sys/class/devfreq/{}/max_freq'.format(device)
|
||||
return self.target.read_int(sysfile)
|
||||
@@ -168,7 +168,7 @@ class DevfreqModule(Module):
|
||||
|
||||
on the device.
|
||||
|
||||
:raises: TargetError if the frequency is not supported by the device, or
|
||||
:raises: TargetStableError if the frequency is not supported by the device, or
|
||||
if, for some reason, frequency could not be set.
|
||||
:raises: ValueError if ``frequency`` is not an integer.
|
||||
|
||||
@@ -180,7 +180,7 @@ class DevfreqModule(Module):
|
||||
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
|
||||
|
||||
if exact and value not in available_frequencies:
|
||||
raise TargetError('Can\'t set {} frequency to {}\nmust be in {}'.format(device,
|
||||
raise TargetStableError('Can\'t set {} frequency to {}\nmust be in {}'.format(device,
|
||||
value,
|
||||
available_frequencies))
|
||||
sysfile = '/sys/class/devfreq/{}/max_freq'.format(device)
|
||||
@@ -200,15 +200,15 @@ class DevfreqModule(Module):
|
||||
Set the specified governor for all the (available) devices
|
||||
"""
|
||||
try:
|
||||
return self.target._execute_util(
|
||||
return self.target._execute_util( # pylint: disable=protected-access
|
||||
'devfreq_set_all_governors {}'.format(governor), as_root=True)
|
||||
except TargetError as e:
|
||||
except TargetStableError as e:
|
||||
if ("echo: I/O error" in str(e) or
|
||||
"write error: Invalid argument" in str(e)):
|
||||
|
||||
devs_unsupported = [d for d in self.target.list_devices()
|
||||
if governor not in self.list_governors(d)]
|
||||
raise TargetError("Governor {} unsupported for devices {}".format(
|
||||
raise TargetStableError("Governor {} unsupported for devices {}".format(
|
||||
governor, devs_unsupported))
|
||||
else:
|
||||
raise
|
||||
@@ -217,7 +217,7 @@ class DevfreqModule(Module):
|
||||
"""
|
||||
Get the current governor for all the (online) CPUs
|
||||
"""
|
||||
output = self.target._execute_util(
|
||||
output = self.target._execute_util( # pylint: disable=protected-access
|
||||
'devfreq_get_all_governors', as_root=True)
|
||||
governors = {}
|
||||
for x in output.splitlines():
|
||||
@@ -241,7 +241,7 @@ class DevfreqModule(Module):
|
||||
"""
|
||||
Set the specified (minimum) frequency for all the (available) devices
|
||||
"""
|
||||
return self.target._execute_util(
|
||||
return self.target._execute_util( # pylint: disable=protected-access
|
||||
'devfreq_set_all_frequencies {}'.format(freq),
|
||||
as_root=True)
|
||||
|
||||
@@ -249,7 +249,7 @@ class DevfreqModule(Module):
|
||||
"""
|
||||
Get the current frequency for all the (available) devices
|
||||
"""
|
||||
output = self.target._execute_util(
|
||||
output = self.target._execute_util( # pylint: disable=protected-access
|
||||
'devfreq_get_all_frequencies', as_root=True)
|
||||
frequencies = {}
|
||||
for x in output.splitlines():
|
||||
@@ -258,4 +258,3 @@ class DevfreqModule(Module):
|
||||
break
|
||||
frequencies[kv[0]] = kv[1]
|
||||
return frequencies
|
||||
|
||||
|
@@ -14,16 +14,13 @@
|
||||
|
||||
import re
|
||||
import sys
|
||||
import logging
|
||||
import os.path
|
||||
from collections import defaultdict
|
||||
|
||||
import devlib
|
||||
from devlib.exception import TargetError
|
||||
from devlib.exception import TargetStableError, HostError
|
||||
from devlib.module import Module
|
||||
from devlib.platform import Platform
|
||||
from devlib.platform.gem5 import Gem5SimulationPlatform
|
||||
from devlib.utils.gem5 import iter_statistics_dump, GEM5STATS_ROI_NUMBER, GEM5STATS_DUMP_TAIL
|
||||
from devlib.utils.gem5 import iter_statistics_dump, GEM5STATS_ROI_NUMBER
|
||||
|
||||
|
||||
class Gem5ROI:
|
||||
@@ -39,7 +36,7 @@ class Gem5ROI:
|
||||
self.target.execute('m5 roistart {}'.format(self.number))
|
||||
self.running = True
|
||||
return True
|
||||
|
||||
|
||||
def stop(self):
|
||||
if not self.running:
|
||||
return False
|
||||
@@ -49,7 +46,7 @@ class Gem5ROI:
|
||||
|
||||
class Gem5StatsModule(Module):
|
||||
'''
|
||||
Module controlling Region of Interest (ROIs) markers, satistics dump
|
||||
Module controlling Region of Interest (ROIs) markers, satistics dump
|
||||
frequency and parsing statistics log file when using gem5 platforms.
|
||||
|
||||
ROIs are identified by user-defined labels and need to be booked prior to
|
||||
@@ -90,13 +87,13 @@ class Gem5StatsModule(Module):
|
||||
if label not in self.rois:
|
||||
raise KeyError('Incorrect ROI label: {}'.format(label))
|
||||
if not self.rois[label].start():
|
||||
raise TargetError('ROI {} was already running'.format(label))
|
||||
|
||||
raise TargetStableError('ROI {} was already running'.format(label))
|
||||
|
||||
def roi_end(self, label):
|
||||
if label not in self.rois:
|
||||
raise KeyError('Incorrect ROI label: {}'.format(label))
|
||||
if not self.rois[label].stop():
|
||||
raise TargetError('ROI {} was not running'.format(label))
|
||||
raise TargetStableError('ROI {} was not running'.format(label))
|
||||
|
||||
def start_periodic_dump(self, delay_ns=0, period_ns=10000000):
|
||||
# Default period is 10ms because it's roughly what's needed to have
|
||||
@@ -105,7 +102,7 @@ class Gem5StatsModule(Module):
|
||||
msg = 'Delay ({}) and period ({}) for periodic dumps must be positive'
|
||||
raise ValueError(msg.format(delay_ns, period_ns))
|
||||
self.target.execute('m5 dumpresetstats {} {}'.format(delay_ns, period_ns))
|
||||
|
||||
|
||||
def match(self, keys, rois_labels, base_dump=0):
|
||||
'''
|
||||
Extract specific values from the statistics log file of gem5
|
||||
@@ -116,49 +113,49 @@ class Gem5StatsModule(Module):
|
||||
keys.
|
||||
:type keys: list
|
||||
|
||||
:param rois_labels: list of ROIs labels. ``match()`` returns the
|
||||
:param rois_labels: list of ROIs labels. ``match()`` returns the
|
||||
values of the specified fields only during dumps spanned by at
|
||||
least one of these ROIs.
|
||||
:type rois_label: list
|
||||
|
||||
:param base_dump: dump number from which ``match()`` should operate. By
|
||||
specifying a non-zero dump number, one can virtually truncate
|
||||
:param base_dump: dump number from which ``match()`` should operate. By
|
||||
specifying a non-zero dump number, one can virtually truncate
|
||||
the head of the stats file and ignore all dumps before a specific
|
||||
instant. The value of ``base_dump`` will typically (but not
|
||||
instant. The value of ``base_dump`` will typically (but not
|
||||
necessarily) be the result of a previous call to ``next_dump_no``.
|
||||
Default value is 0.
|
||||
:type base_dump: int
|
||||
|
||||
:returns: a dict indexed by key parameters containing a dict indexed by
|
||||
ROI labels containing an in-order list of records for the key under
|
||||
consideration during the active intervals of the ROI.
|
||||
|
||||
consideration during the active intervals of the ROI.
|
||||
|
||||
Example of return value:
|
||||
* Result of match(['sim_'],['roi_1']):
|
||||
{
|
||||
'sim_inst':
|
||||
'sim_inst':
|
||||
{
|
||||
'roi_1': [265300176, 267975881]
|
||||
}
|
||||
'sim_ops':
|
||||
'sim_ops':
|
||||
{
|
||||
'roi_1': [324395787, 327699419]
|
||||
}
|
||||
'sim_seconds':
|
||||
'sim_seconds':
|
||||
{
|
||||
'roi_1': [0.199960, 0.199897]
|
||||
}
|
||||
'sim_freq':
|
||||
'sim_freq':
|
||||
{
|
||||
'roi_1': [1000000000000, 1000000000000]
|
||||
}
|
||||
'sim_ticks':
|
||||
'sim_ticks':
|
||||
{
|
||||
'roi_1': [199960234227, 199896897330]
|
||||
}
|
||||
}
|
||||
'''
|
||||
records = defaultdict(lambda : defaultdict(list))
|
||||
records = defaultdict(lambda: defaultdict(list))
|
||||
for record, active_rois in self.match_iter(keys, rois_labels, base_dump):
|
||||
for key in record:
|
||||
for roi_label in active_rois:
|
||||
@@ -178,15 +175,15 @@ class Gem5StatsModule(Module):
|
||||
|
||||
Example of return value:
|
||||
* Result of match_iter(['sim_'],['roi_1', 'roi_2']).next()
|
||||
(
|
||||
{
|
||||
(
|
||||
{
|
||||
'sim_inst': 265300176,
|
||||
'sim_ops': 324395787,
|
||||
'sim_seconds': 0.199960,
|
||||
'sim_seconds': 0.199960,
|
||||
'sim_freq': 1000000000000,
|
||||
'sim_ticks': 199960234227,
|
||||
},
|
||||
[ 'roi_1 ' ]
|
||||
[ 'roi_1 ' ]
|
||||
)
|
||||
'''
|
||||
for label in rois_labels:
|
||||
@@ -195,11 +192,11 @@ class Gem5StatsModule(Module):
|
||||
if self.rois[label].running:
|
||||
self.logger.warning('Trying to match records in statistics file'
|
||||
' while ROI {} is running'.format(label))
|
||||
|
||||
|
||||
# Construct one large regex that concatenates all keys because
|
||||
# matching one large expression is more efficient than several smaller
|
||||
all_keys_re = re.compile('|'.join(keys))
|
||||
|
||||
|
||||
def roi_active(roi_label, dump):
|
||||
roi = self.rois[roi_label]
|
||||
return (roi.field in dump) and (int(dump[roi.field]) == 1)
|
||||
@@ -215,8 +212,8 @@ class Gem5StatsModule(Module):
|
||||
def next_dump_no(self):
|
||||
'''
|
||||
Returns the number of the next dump to be written to the stats file.
|
||||
|
||||
For example, if next_dump_no is called while there are 5 (0 to 4) full
|
||||
|
||||
For example, if next_dump_no is called while there are 5 (0 to 4) full
|
||||
dumps in the stats file, it will return 5. This will be usefull to know
|
||||
from which dump one should match() in the future to get only data from
|
||||
now on.
|
||||
@@ -224,7 +221,7 @@ class Gem5StatsModule(Module):
|
||||
with open(self._stats_file_path, 'r') as stats_file:
|
||||
# _goto_dump reach EOF and returns the total number of dumps + 1
|
||||
return self._goto_dump(stats_file, sys.maxsize)
|
||||
|
||||
|
||||
def _goto_dump(self, stats_file, target_dump):
|
||||
if target_dump < 0:
|
||||
raise HostError('Cannot go to dump {}'.format(target_dump))
|
||||
@@ -238,12 +235,12 @@ class Gem5StatsModule(Module):
|
||||
curr_dump = max(prev_dumps)
|
||||
curr_pos = self._dump_pos_cache[curr_dump]
|
||||
stats_file.seek(curr_pos)
|
||||
|
||||
|
||||
# And iterate until target_dump
|
||||
dump_iterator = iter_statistics_dump(stats_file)
|
||||
while curr_dump < target_dump:
|
||||
try:
|
||||
dump = next(dump_iterator)
|
||||
next(dump_iterator)
|
||||
except StopIteration:
|
||||
break
|
||||
# End of passed dump is beginning og next one
|
||||
@@ -251,4 +248,3 @@ class Gem5StatsModule(Module):
|
||||
curr_dump += 1
|
||||
self._dump_pos_cache[curr_dump] = curr_pos
|
||||
return curr_dump
|
||||
|
||||
|
@@ -28,9 +28,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
import json
|
||||
from devlib.module import Module
|
||||
from devlib.exception import TargetError
|
||||
from devlib.exception import TargetStableError
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
class GpufreqModule(Module):
|
||||
@@ -57,7 +56,7 @@ class GpufreqModule(Module):
|
||||
|
||||
def set_governor(self, governor):
|
||||
if governor not in self.governors:
|
||||
raise TargetError('Governor {} not supported for gpu {}'.format(governor, cpu))
|
||||
raise TargetStableError('Governor {} not supported for gpu'.format(governor))
|
||||
self.target.write_value("/sys/kernel/gpu/gpu_governor", governor)
|
||||
|
||||
def get_frequencies(self):
|
||||
@@ -74,7 +73,7 @@ class GpufreqModule(Module):
|
||||
try to read the current frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: TargetError if for some reason the frequency could not be read.
|
||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
return int(self.target.read_value("/sys/kernel/gpu/gpu_clock"))
|
||||
@@ -85,6 +84,6 @@ class GpufreqModule(Module):
|
||||
Returns the model name reported by the GPU.
|
||||
"""
|
||||
try:
|
||||
return self.target.read_value("/sys/kernel/gpu/gpu_model")
|
||||
except:
|
||||
return "unknown"
|
||||
return self.target.read_value("/sys/kernel/gpu/gpu_model")
|
||||
except: # pylint: disable=bare-except
|
||||
return "unknown"
|
||||
|
@@ -14,6 +14,7 @@
|
||||
#
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.exception import TargetTransientError
|
||||
|
||||
|
||||
class HotplugModule(Module):
|
||||
@@ -35,9 +36,17 @@ class HotplugModule(Module):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
return target.path.join(cls.base_path, cpu, 'online')
|
||||
|
||||
def online_all(self):
|
||||
self.target._execute_util('hotplug_online_all',
|
||||
def list_hotpluggable_cpus(self):
|
||||
return [cpu for cpu in range(self.target.number_of_cpus)
|
||||
if self.target.file_exists(self._cpu_path(self.target, cpu))]
|
||||
|
||||
def online_all(self, verify=True):
|
||||
self.target._execute_util('hotplug_online_all', # pylint: disable=protected-access
|
||||
as_root=self.target.is_rooted)
|
||||
if verify:
|
||||
offline = set(self.target.list_offline_cpus())
|
||||
if offline:
|
||||
raise TargetTransientError('The following CPUs failed to come back online: {}'.format(offline))
|
||||
|
||||
def online(self, *args):
|
||||
for cpu in args:
|
||||
@@ -54,3 +63,22 @@ class HotplugModule(Module):
|
||||
value = 1 if online else 0
|
||||
self.target.write_value(path, value)
|
||||
|
||||
def _get_path(self, path):
|
||||
return self.target.path.join(self.base_path,
|
||||
path)
|
||||
|
||||
def fail(self, cpu, state):
|
||||
path = self._get_path('cpu{}/hotplug/fail'.format(cpu))
|
||||
return self.target.write_value(path, state)
|
||||
|
||||
def get_state(self, cpu):
|
||||
path = self._get_path('cpu{}/hotplug/state'.format(cpu))
|
||||
return self.target.read_value(path)
|
||||
|
||||
def get_states(self):
|
||||
path = self._get_path('hotplug/states')
|
||||
states_string = self.target.read_value(path)
|
||||
return dict(
|
||||
map(str.strip, string.split(':', 1))
|
||||
for string in states_string.strip().splitlines()
|
||||
)
|
||||
|
@@ -12,11 +12,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
import re
|
||||
from collections import defaultdict
|
||||
|
||||
from devlib import TargetError
|
||||
from devlib import TargetStableError
|
||||
from devlib.module import Module
|
||||
from devlib.utils.types import integer
|
||||
|
||||
@@ -119,7 +118,7 @@ class HwmonModule(Module):
|
||||
def probe(target):
|
||||
try:
|
||||
target.list_directory(HWMON_ROOT, as_root=target.is_rooted)
|
||||
except TargetError:
|
||||
except TargetStableError:
|
||||
# Doesn't exist or no permissions
|
||||
return False
|
||||
return True
|
||||
@@ -138,7 +137,7 @@ class HwmonModule(Module):
|
||||
self.scan()
|
||||
|
||||
def scan(self):
|
||||
values_tree = self.target.read_tree_values(self.root, depth=3)
|
||||
values_tree = self.target.read_tree_values(self.root, depth=3, tar=True)
|
||||
for entry_id, fields in values_tree.items():
|
||||
path = self.target.path.join(self.root, entry_id)
|
||||
name = fields.pop('name', None)
|
||||
@@ -147,4 +146,3 @@ class HwmonModule(Module):
|
||||
self.logger.debug('Adding device {}'.format(name))
|
||||
device = HwmonDevice(self.target, path, name, fields)
|
||||
self.devices.append(device)
|
||||
|
||||
|
@@ -13,28 +13,15 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Copyright 2018 Arm Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import re
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.utils.misc import memoized
|
||||
from devlib.utils.types import boolean
|
||||
from devlib.exception import TargetStableError
|
||||
|
||||
class SchedProcFSNode(object):
|
||||
"""
|
||||
@@ -62,7 +49,13 @@ class SchedProcFSNode(object):
|
||||
MC
|
||||
"""
|
||||
|
||||
_re_procfs_node = re.compile(r"(?P<name>.*)(?P<digits>\d+)$")
|
||||
_re_procfs_node = re.compile(r"(?P<name>.*\D)(?P<digits>\d+)$")
|
||||
|
||||
PACKABLE_ENTRIES = [
|
||||
"cpu",
|
||||
"domain",
|
||||
"group"
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _ends_with_digits(node):
|
||||
@@ -83,18 +76,19 @@ class SchedProcFSNode(object):
|
||||
"""
|
||||
:returns: The name of the procfs node
|
||||
"""
|
||||
return re.search(SchedProcFSNode._re_procfs_node, node).group("name")
|
||||
match = re.search(SchedProcFSNode._re_procfs_node, node)
|
||||
if match:
|
||||
return match.group("name")
|
||||
|
||||
@staticmethod
|
||||
def _packable(node, entries):
|
||||
return node
|
||||
|
||||
@classmethod
|
||||
def _packable(cls, node):
|
||||
"""
|
||||
:returns: Whether it makes sense to pack a node into a common entry
|
||||
"""
|
||||
return (SchedProcFSNode._ends_with_digits(node) and
|
||||
any([SchedProcFSNode._ends_with_digits(x) and
|
||||
SchedProcFSNode._node_digits(x) != SchedProcFSNode._node_digits(node) and
|
||||
SchedProcFSNode._node_name(x) == SchedProcFSNode._node_name(node)
|
||||
for x in entries]))
|
||||
SchedProcFSNode._node_name(node) in cls.PACKABLE_ENTRIES)
|
||||
|
||||
@staticmethod
|
||||
def _build_directory(node_name, node_data):
|
||||
@@ -104,7 +98,7 @@ class SchedProcFSNode(object):
|
||||
return SchedProcFSNode(node_data)
|
||||
|
||||
@staticmethod
|
||||
def _build_entry(node_name, node_data):
|
||||
def _build_entry(node_data):
|
||||
value = node_data
|
||||
|
||||
# Most nodes just contain numerical data, try to convert
|
||||
@@ -120,7 +114,7 @@ class SchedProcFSNode(object):
|
||||
if isinstance(node_data, dict):
|
||||
return SchedProcFSNode._build_directory(node_name, node_data)
|
||||
else:
|
||||
return SchedProcFSNode._build_entry(node_name, node_data)
|
||||
return SchedProcFSNode._build_entry(node_data)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return self._dyn_attrs[name]
|
||||
@@ -131,7 +125,7 @@ class SchedProcFSNode(object):
|
||||
# Find which entries can be packed into a common entry
|
||||
packables = {
|
||||
node : SchedProcFSNode._node_name(node) + "s"
|
||||
for node in list(nodes.keys()) if SchedProcFSNode._packable(node, list(nodes.keys()))
|
||||
for node in list(nodes.keys()) if SchedProcFSNode._packable(node)
|
||||
}
|
||||
|
||||
self._dyn_attrs = {}
|
||||
@@ -152,33 +146,41 @@ class SchedProcFSNode(object):
|
||||
self._dyn_attrs[key] = self._build_node(key, nodes[key])
|
||||
|
||||
|
||||
class SchedDomain(SchedProcFSNode):
|
||||
class _SchedDomainFlag:
|
||||
"""
|
||||
Represents a sched domain as seen through procfs
|
||||
Backward-compatible emulation of the former :class:`enum.Enum` that will
|
||||
work on recent kernels with dynamic sched domain flags name and no value
|
||||
exposed.
|
||||
"""
|
||||
# Domain flags obtained from include/linux/sched/topology.h on v4.17
|
||||
# https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux/+/v4.17/include/linux/sched/topology.h#20
|
||||
SD_LOAD_BALANCE = 0x0001 # Do load balancing on this domain.
|
||||
SD_BALANCE_NEWIDLE = 0x0002 # Balance when about to become idle
|
||||
SD_BALANCE_EXEC = 0x0004 # Balance on exec
|
||||
SD_BALANCE_FORK = 0x0008 # Balance on fork, clone
|
||||
SD_BALANCE_WAKE = 0x0010 # Balance on wakeup
|
||||
SD_WAKE_AFFINE = 0x0020 # Wake task to waking CPU
|
||||
SD_ASYM_CPUCAPACITY = 0x0040 # Groups have different max cpu capacities
|
||||
SD_SHARE_CPUCAPACITY = 0x0080 # Domain members share cpu capacity
|
||||
SD_SHARE_POWERDOMAIN = 0x0100 # Domain members share power domain
|
||||
SD_SHARE_PKG_RESOURCES = 0x0200 # Domain members share cpu pkg resources
|
||||
SD_SERIALIZE = 0x0400 # Only a single load balancing instance
|
||||
SD_ASYM_PACKING = 0x0800 # Place busy groups earlier in the domain
|
||||
SD_PREFER_SIBLING = 0x1000 # Prefer to place tasks in a sibling domain
|
||||
SD_OVERLAP = 0x2000 # sched_domains of this level overlap
|
||||
SD_NUMA = 0x4000 # cross-node balancing
|
||||
# Only defined in Android
|
||||
# https://android.googlesource.com/kernel/common/+/android-4.14/include/linux/sched/topology.h#29
|
||||
SD_SHARE_CAP_STATES = 0x8000 # Domain members share capacity state
|
||||
|
||||
# Checked to be valid from v4.4
|
||||
SD_FLAGS_REF_PARTS = (4, 4, 0)
|
||||
_INSTANCES = {}
|
||||
"""
|
||||
Dictionary storing the instances so that they can be compared with ``is``
|
||||
operator.
|
||||
"""
|
||||
|
||||
def __new__(cls, name, value, doc=None):
|
||||
self = super().__new__(cls)
|
||||
self.name = name
|
||||
self._value = value
|
||||
self.__doc__ = doc
|
||||
return cls._INSTANCES.setdefault(self, self)
|
||||
|
||||
def __eq__(self, other):
|
||||
# We *have to* check for "value" as well, otherwise it will be
|
||||
# impossible to keep in the same set 2 instances with differing values.
|
||||
return self.name == other.name and self._value == other._value
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.name, self._value))
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
value = self._value
|
||||
if value is None:
|
||||
raise AttributeError('The kernel does not expose the sched domain flag values')
|
||||
else:
|
||||
return value
|
||||
|
||||
@staticmethod
|
||||
def check_version(target, logger):
|
||||
@@ -186,33 +188,162 @@ class SchedDomain(SchedProcFSNode):
|
||||
Check the target and see if its kernel version matches our view of the world
|
||||
"""
|
||||
parts = target.kernel_version.parts
|
||||
if parts < SchedDomain.SD_FLAGS_REF_PARTS:
|
||||
# Checked to be valid from v4.4
|
||||
# Not saved as a class attribute else it'll be converted to an enum
|
||||
ref_parts = (4, 4, 0)
|
||||
if parts < ref_parts:
|
||||
logger.warn(
|
||||
"Sched domain flags are defined for kernels v{} and up, "
|
||||
"but target is running v{}".format(SchedDomain.SD_FLAGS_REF_PARTS, parts)
|
||||
"but target is running v{}".format(ref_parts, parts)
|
||||
)
|
||||
|
||||
def has_flags(self, flags):
|
||||
"""
|
||||
:returns: Whether 'flags' are set on this sched domain
|
||||
"""
|
||||
return self.flags & flags == flags
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def __repr__(self):
|
||||
return '<SchedDomainFlag: {}>'.format(self.name)
|
||||
|
||||
|
||||
class _SchedDomainFlagMeta(type):
|
||||
"""
|
||||
Metaclass of :class:`SchedDomainFlag`.
|
||||
|
||||
Provides some level of emulation of :class:`enum.Enum` behavior for
|
||||
backward compatibility.
|
||||
"""
|
||||
@property
|
||||
def _flags(self):
|
||||
return [
|
||||
attr
|
||||
for name, attr in self.__dict__.items()
|
||||
if name.startswith('SD_')
|
||||
]
|
||||
|
||||
def __getitem__(self, i):
|
||||
return self._flags[i]
|
||||
|
||||
def __len__(self):
|
||||
return len(self._flags)
|
||||
|
||||
# These would be provided by collections.abc.Sequence, but using it on a
|
||||
# metaclass seems to have issues around __init_subclass__
|
||||
def __iter__(self):
|
||||
return iter(self._flags)
|
||||
|
||||
def __reversed__(self):
|
||||
return reversed(self._flags)
|
||||
|
||||
def __contains__(self, x):
|
||||
return x in self._flags
|
||||
|
||||
@property
|
||||
def __members__(self):
|
||||
return {flag.name: flag for flag in self._flags}
|
||||
|
||||
|
||||
class SchedDomainFlag(_SchedDomainFlag, metaclass=_SchedDomainFlagMeta):
|
||||
"""
|
||||
Represents a sched domain flag.
|
||||
|
||||
.. note:: ``SD_*`` class attributes are deprecated, new code should never
|
||||
test a given flag against one of these attributes with ``is`` (.e.g ``x
|
||||
is SchedDomainFlag.SD_LOAD_BALANCE``. This is because the
|
||||
``SD_LOAD_BALANCE`` flag exists in two flavors that are not equal: one
|
||||
with a value (the class attribute) and one without (dynamically created
|
||||
when parsing flags for new kernels). Old code ran on old kernels should
|
||||
work fine though.
|
||||
"""
|
||||
# pylint: disable=bad-whitespace
|
||||
# Domain flags obtained from include/linux/sched/topology.h on v4.17
|
||||
# https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux/+/v4.17/include/linux/sched/topology.h#20
|
||||
SD_LOAD_BALANCE = _SchedDomainFlag("SD_LOAD_BALANCE", 0x0001, "Do load balancing on this domain")
|
||||
SD_BALANCE_NEWIDLE = _SchedDomainFlag("SD_BALANCE_NEWIDLE", 0x0002, "Balance when about to become idle")
|
||||
SD_BALANCE_EXEC = _SchedDomainFlag("SD_BALANCE_EXEC", 0x0004, "Balance on exec")
|
||||
SD_BALANCE_FORK = _SchedDomainFlag("SD_BALANCE_FORK", 0x0008, "Balance on fork, clone")
|
||||
SD_BALANCE_WAKE = _SchedDomainFlag("SD_BALANCE_WAKE", 0x0010, "Balance on wakeup")
|
||||
SD_WAKE_AFFINE = _SchedDomainFlag("SD_WAKE_AFFINE", 0x0020, "Wake task to waking CPU")
|
||||
SD_ASYM_CPUCAPACITY = _SchedDomainFlag("SD_ASYM_CPUCAPACITY", 0x0040, "Groups have different max cpu capacities")
|
||||
SD_SHARE_CPUCAPACITY = _SchedDomainFlag("SD_SHARE_CPUCAPACITY", 0x0080, "Domain members share cpu capacity")
|
||||
SD_SHARE_POWERDOMAIN = _SchedDomainFlag("SD_SHARE_POWERDOMAIN", 0x0100, "Domain members share power domain")
|
||||
SD_SHARE_PKG_RESOURCES = _SchedDomainFlag("SD_SHARE_PKG_RESOURCES", 0x0200, "Domain members share cpu pkg resources")
|
||||
SD_SERIALIZE = _SchedDomainFlag("SD_SERIALIZE", 0x0400, "Only a single load balancing instance")
|
||||
SD_ASYM_PACKING = _SchedDomainFlag("SD_ASYM_PACKING", 0x0800, "Place busy groups earlier in the domain")
|
||||
SD_PREFER_SIBLING = _SchedDomainFlag("SD_PREFER_SIBLING", 0x1000, "Prefer to place tasks in a sibling domain")
|
||||
SD_OVERLAP = _SchedDomainFlag("SD_OVERLAP", 0x2000, "Sched_domains of this level overlap")
|
||||
SD_NUMA = _SchedDomainFlag("SD_NUMA", 0x4000, "Cross-node balancing")
|
||||
# Only defined in Android
|
||||
# https://android.googlesource.com/kernel/common/+/android-4.14/include/linux/sched/topology.h#29
|
||||
SD_SHARE_CAP_STATES = _SchedDomainFlag("SD_SHARE_CAP_STATES", 0x8000, "(Android only) Domain members share capacity state")
|
||||
|
||||
|
||||
class SchedDomain(SchedProcFSNode):
|
||||
"""
|
||||
Represents a sched domain as seen through procfs
|
||||
"""
|
||||
def __init__(self, nodes):
|
||||
super().__init__(nodes)
|
||||
|
||||
flags = self.flags
|
||||
# Recent kernels now have a space-separated list of flags instead of a
|
||||
# packed bitfield
|
||||
if isinstance(flags, str):
|
||||
flags = {
|
||||
_SchedDomainFlag(name=name, value=None)
|
||||
for name in flags.split()
|
||||
}
|
||||
else:
|
||||
def has_flag(flags, flag):
|
||||
return flags & flag.value == flag.value
|
||||
|
||||
flags = {
|
||||
flag
|
||||
for flag in SchedDomainFlag
|
||||
if has_flag(flags, flag)
|
||||
}
|
||||
|
||||
self.flags = flags
|
||||
|
||||
def _select_path(target, paths, name):
|
||||
for p in paths:
|
||||
if target.file_exists(p):
|
||||
return p
|
||||
|
||||
raise TargetStableError('No {} found. Tried: {}'.format(name, ', '.join(paths)))
|
||||
|
||||
class SchedProcFSData(SchedProcFSNode):
|
||||
"""
|
||||
Root class for creating & storing SchedProcFSNode instances
|
||||
"""
|
||||
_read_depth = 6
|
||||
sched_domain_root = '/proc/sys/kernel/sched_domain'
|
||||
|
||||
@classmethod
|
||||
def get_data_root(cls, target):
|
||||
# Location differs depending on kernel version
|
||||
paths = ['/sys/kernel/debug/sched/domains/', '/proc/sys/kernel/sched_domain']
|
||||
return _select_path(target, paths, "sched_domain debug directory")
|
||||
|
||||
@staticmethod
|
||||
def available(target):
|
||||
return target.directory_exists(SchedProcFSData.sched_domain_root)
|
||||
try:
|
||||
path = SchedProcFSData.get_data_root(target)
|
||||
except TargetStableError:
|
||||
return False
|
||||
|
||||
cpus = target.list_directory(path)
|
||||
if not cpus:
|
||||
return False
|
||||
|
||||
# Even if we have a CPU entry, it can be empty (e.g. hotplugged out)
|
||||
# Make sure some data is there
|
||||
for cpu in cpus:
|
||||
if target.file_exists(target.path.join(path, cpu, "domain0", "flags")):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def __init__(self, target, path=None):
|
||||
if not path:
|
||||
path = self.sched_domain_root
|
||||
if path is None:
|
||||
path = SchedProcFSData.get_data_root(target)
|
||||
|
||||
procfs = target.read_tree_values(path, depth=self._read_depth)
|
||||
super(SchedProcFSData, self).__init__(procfs)
|
||||
@@ -227,16 +358,142 @@ class SchedModule(Module):
|
||||
@staticmethod
|
||||
def probe(target):
|
||||
logger = logging.getLogger(SchedModule.name)
|
||||
SchedDomain.check_version(target, logger)
|
||||
SchedDomainFlag.check_version(target, logger)
|
||||
|
||||
return SchedProcFSData.available(target)
|
||||
# It makes sense to load this module if at least one of those
|
||||
# functionalities is enabled
|
||||
schedproc = SchedProcFSData.available(target)
|
||||
debug = SchedModule.target_has_debug(target)
|
||||
dmips = any([target.file_exists(SchedModule.cpu_dmips_capacity_path(target, cpu))
|
||||
for cpu in target.list_online_cpus()])
|
||||
|
||||
logger.info("Scheduler sched_domain procfs entries %s",
|
||||
"found" if schedproc else "not found")
|
||||
logger.info("Detected kernel compiled with SCHED_DEBUG=%s",
|
||||
"y" if debug else "n")
|
||||
logger.info("CPU capacity sysfs entries %s",
|
||||
"found" if dmips else "not found")
|
||||
|
||||
return schedproc or debug or dmips
|
||||
|
||||
def __init__(self, target):
|
||||
super().__init__(target)
|
||||
|
||||
@classmethod
|
||||
def get_sched_features_path(cls, target):
|
||||
# Location differs depending on kernel version
|
||||
paths = ['/sys/kernel/debug/sched/features', '/sys/kernel/debug/sched_features']
|
||||
return _select_path(target, paths, "sched_features file")
|
||||
|
||||
def get_kernel_attributes(self, matching=None, check_exit_code=True):
|
||||
"""
|
||||
Get the value of scheduler attributes.
|
||||
|
||||
:param matching: an (optional) substring to filter the scheduler
|
||||
attributes to be returned.
|
||||
|
||||
The scheduler exposes a list of tunable attributes under:
|
||||
/proc/sys/kernel
|
||||
all starting with the "sched_" prefix.
|
||||
|
||||
This method returns a dictionary of all the "sched_" attributes exposed
|
||||
by the target kernel, within the prefix removed.
|
||||
It's possible to restrict the list of attributes by specifying a
|
||||
substring to be matched.
|
||||
|
||||
returns: a dictionary of scheduler tunables
|
||||
"""
|
||||
command = 'sched_get_kernel_attributes {}'.format(
|
||||
matching if matching else ''
|
||||
)
|
||||
output = self.target._execute_util(command, as_root=self.target.is_rooted,
|
||||
check_exit_code=check_exit_code)
|
||||
result = {}
|
||||
for entry in output.strip().split('\n'):
|
||||
if ':' not in entry:
|
||||
continue
|
||||
path, value = entry.strip().split(':', 1)
|
||||
if value in ['0', '1']:
|
||||
value = bool(int(value))
|
||||
elif value.isdigit():
|
||||
value = int(value)
|
||||
result[path] = value
|
||||
return result
|
||||
|
||||
def set_kernel_attribute(self, attr, value, verify=True):
|
||||
"""
|
||||
Set the value of a scheduler attribute.
|
||||
|
||||
:param attr: the attribute to set, without the "sched_" prefix
|
||||
:param value: the value to set
|
||||
:param verify: true to check that the requested value has been set
|
||||
|
||||
:raise TargetError: if the attribute cannot be set
|
||||
"""
|
||||
if isinstance(value, bool):
|
||||
value = '1' if value else '0'
|
||||
elif isinstance(value, int):
|
||||
value = str(value)
|
||||
path = '/proc/sys/kernel/sched_' + attr
|
||||
self.target.write_value(path, value, verify)
|
||||
|
||||
@classmethod
|
||||
def target_has_debug(cls, target):
|
||||
if target.config.get('SCHED_DEBUG') != 'y':
|
||||
return False
|
||||
|
||||
try:
|
||||
cls.get_sched_features_path(target)
|
||||
return True
|
||||
except TargetStableError:
|
||||
return False
|
||||
|
||||
def get_features(self):
|
||||
"""
|
||||
Get the status of each sched feature
|
||||
|
||||
:returns: a dictionary of features and their "is enabled" status
|
||||
"""
|
||||
feats = self.target.read_value(self.get_sched_features_path(self.target))
|
||||
features = {}
|
||||
for feat in feats.split():
|
||||
value = True
|
||||
if feat.startswith('NO'):
|
||||
feat = feat.replace('NO_', '', 1)
|
||||
value = False
|
||||
features[feat] = value
|
||||
return features
|
||||
|
||||
def set_feature(self, feature, enable, verify=True):
|
||||
"""
|
||||
Set the status of a specified scheduler feature
|
||||
|
||||
:param feature: the feature name to set
|
||||
:param enable: true to enable the feature, false otherwise
|
||||
|
||||
:raise ValueError: if the specified enable value is not bool
|
||||
:raise RuntimeError: if the specified feature cannot be set
|
||||
"""
|
||||
feature = feature.upper()
|
||||
feat_value = feature
|
||||
if not boolean(enable):
|
||||
feat_value = 'NO_' + feat_value
|
||||
self.target.write_value(self.get_sched_features_path(self.target),
|
||||
feat_value, verify=False)
|
||||
if not verify:
|
||||
return
|
||||
msg = 'Failed to set {}, feature not supported?'.format(feat_value)
|
||||
features = self.get_features()
|
||||
feat_value = features.get(feature, not enable)
|
||||
if feat_value != enable:
|
||||
raise RuntimeError(msg)
|
||||
|
||||
def get_cpu_sd_info(self, cpu):
|
||||
"""
|
||||
:returns: An object view of /proc/sys/kernel/sched_domain/cpu<cpu>/*
|
||||
:returns: An object view of the sched_domain debug directory of 'cpu'
|
||||
"""
|
||||
path = self.target.path.join(
|
||||
SchedProcFSData.sched_domain_root,
|
||||
SchedProcFSData.get_data_root(self.target),
|
||||
"cpu{}".format(cpu)
|
||||
)
|
||||
|
||||
@@ -244,7 +501,7 @@ class SchedModule(Module):
|
||||
|
||||
def get_sd_info(self):
|
||||
"""
|
||||
:returns: An object view of /proc/sys/kernel/sched_domain/*
|
||||
:returns: An object view of the entire sched_domain debug directory
|
||||
"""
|
||||
return SchedProcFSData(self.target)
|
||||
|
||||
@@ -260,17 +517,26 @@ class SchedModule(Module):
|
||||
:returns: Whether energy model data is available for 'cpu'
|
||||
"""
|
||||
if not sd:
|
||||
sd = SchedProcFSData(self.target, cpu)
|
||||
sd = self.get_cpu_sd_info(cpu)
|
||||
|
||||
return sd.procfs["domain0"].get("group0", {}).get("energy", {}).get("cap_states") != None
|
||||
|
||||
@classmethod
|
||||
def cpu_dmips_capacity_path(cls, target, cpu):
|
||||
"""
|
||||
:returns: The target sysfs path where the dmips capacity data should be
|
||||
"""
|
||||
return target.path.join(
|
||||
cls.cpu_sysfs_root,
|
||||
'cpu{}/cpu_capacity'.format(cpu))
|
||||
|
||||
@memoized
|
||||
def has_dmips_capacity(self, cpu):
|
||||
"""
|
||||
:returns: Whether dmips capacity data is available for 'cpu'
|
||||
"""
|
||||
return self.target.file_exists(
|
||||
self.target.path.join(self.cpu_sysfs_root, 'cpu{}/cpu_capacity'.format(cpu))
|
||||
self.cpu_dmips_capacity_path(self.target, cpu)
|
||||
)
|
||||
|
||||
@memoized
|
||||
@@ -279,10 +545,13 @@ class SchedModule(Module):
|
||||
:returns: The maximum capacity value exposed by the EAS energy model
|
||||
"""
|
||||
if not sd:
|
||||
sd = SchedProcFSData(self.target, cpu)
|
||||
sd = self.get_cpu_sd_info(cpu)
|
||||
|
||||
cap_states = sd.domains[0].groups[0].energy.cap_states
|
||||
return int(cap_states.split('\t')[-2])
|
||||
cap_states_list = cap_states.split('\t')
|
||||
num_cap_states = sd.domains[0].groups[0].energy.nr_cap_states
|
||||
max_cap_index = -1 * int(len(cap_states_list) / num_cap_states)
|
||||
return int(cap_states_list[max_cap_index])
|
||||
|
||||
@memoized
|
||||
def get_dmips_capacity(self, cpu):
|
||||
@@ -290,14 +559,9 @@ class SchedModule(Module):
|
||||
:returns: The capacity value generated from the capacity-dmips-mhz DT entry
|
||||
"""
|
||||
return self.target.read_value(
|
||||
self.target.path.join(
|
||||
self.cpu_sysfs_root,
|
||||
'cpu{}/cpu_capacity'.format(cpu)
|
||||
),
|
||||
int
|
||||
self.cpu_dmips_capacity_path(self.target, cpu), int
|
||||
)
|
||||
|
||||
@memoized
|
||||
def get_capacities(self, default=None):
|
||||
"""
|
||||
:param default: Default capacity value to find if no data is
|
||||
@@ -308,16 +572,30 @@ class SchedModule(Module):
|
||||
:raises RuntimeError: Raised when no capacity information is
|
||||
found and 'default' is None
|
||||
"""
|
||||
cpus = list(range(self.target.number_of_cpus))
|
||||
cpus = self.target.list_online_cpus()
|
||||
|
||||
capacities = {}
|
||||
sd_info = self.get_sd_info()
|
||||
|
||||
for cpu in cpus:
|
||||
if self.has_dmips_capacity(cpu):
|
||||
capacities[cpu] = self.get_dmips_capacity(cpu)
|
||||
|
||||
missing_cpus = set(cpus).difference(capacities.keys())
|
||||
if not missing_cpus:
|
||||
return capacities
|
||||
|
||||
if not SchedProcFSData.available(self.target):
|
||||
if default != None:
|
||||
capacities.update({cpu : default for cpu in missing_cpus})
|
||||
return capacities
|
||||
else:
|
||||
raise RuntimeError(
|
||||
'No capacity data for cpus {}'.format(sorted(missing_cpus)))
|
||||
|
||||
sd_info = self.get_sd_info()
|
||||
for cpu in missing_cpus:
|
||||
if self.has_em(cpu, sd_info.cpus[cpu]):
|
||||
capacities[cpu] = self.get_em_capacity(cpu, sd_info.cpus[cpu])
|
||||
elif self.has_dmips_capacity(cpu):
|
||||
capacities[cpu] = self.get_dmips_capacity(cpu)
|
||||
else:
|
||||
if default != None:
|
||||
capacities[cpu] = default
|
||||
|
@@ -48,7 +48,7 @@ class ThermalZone(object):
|
||||
self.path = target.path.join(root, self.name)
|
||||
self.trip_points = {}
|
||||
|
||||
for entry in self.target.list_directory(self.path):
|
||||
for entry in self.target.list_directory(self.path, as_root=target.is_rooted):
|
||||
re_match = re.match('^trip_point_([0-9]+)_temp', entry)
|
||||
if re_match is not None:
|
||||
self.add_trip_point(re_match.group(1))
|
||||
@@ -88,6 +88,9 @@ class ThermalModule(Module):
|
||||
|
||||
for entry in target.list_directory(self.thermal_root):
|
||||
re_match = re.match('^(thermal_zone|cooling_device)([0-9]+)', entry)
|
||||
if not re_match:
|
||||
self.logger.warning('unknown thermal entry: %s', entry)
|
||||
continue
|
||||
|
||||
if re_match.group(1) == 'thermal_zone':
|
||||
self.add_thermal_zone(re_match.group(2))
|
||||
|
@@ -20,7 +20,7 @@ import shutil
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
from devlib.module import HardRestModule, BootModule, FlashModule
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.exception import TargetError, TargetStableError, HostError
|
||||
from devlib.utils.serial_port import open_serial_connection, pulse_dtr, write_characters
|
||||
from devlib.utils.uefi import UefiMenu, UefiConfig
|
||||
from devlib.utils.uboot import UbootMenu
|
||||
@@ -89,7 +89,7 @@ class VexpressReboottxtHardReset(HardRestModule):
|
||||
try:
|
||||
if self.target.is_connected:
|
||||
self.target.execute('sync')
|
||||
except TargetError:
|
||||
except (TargetError, CalledProcessError):
|
||||
pass
|
||||
|
||||
if not os.path.exists(self.path):
|
||||
@@ -130,7 +130,7 @@ class VexpressBootModule(BootModule):
|
||||
init_dtr=0) as tty:
|
||||
self.get_through_early_boot(tty)
|
||||
self.perform_boot_sequence(tty)
|
||||
self.wait_for_android_prompt(tty)
|
||||
self.wait_for_shell_prompt(tty)
|
||||
|
||||
def perform_boot_sequence(self, tty):
|
||||
raise NotImplementedError()
|
||||
@@ -159,8 +159,8 @@ class VexpressBootModule(BootModule):
|
||||
menu.wait(timeout=self.timeout)
|
||||
return menu
|
||||
|
||||
def wait_for_android_prompt(self, tty):
|
||||
self.logger.debug('Waiting for the Android prompt.')
|
||||
def wait_for_shell_prompt(self, tty):
|
||||
self.logger.debug('Waiting for the shell prompt.')
|
||||
tty.expect(self.target.shell_prompt, timeout=self.timeout)
|
||||
# This delay is needed to allow the platform some time to finish
|
||||
# initilizing; querying the ip address too early from connect() may
|
||||
@@ -209,6 +209,7 @@ class VexpressUefiShellBoot(VexpressBootModule):
|
||||
|
||||
name = 'vexpress-uefi-shell'
|
||||
|
||||
# pylint: disable=keyword-arg-before-vararg
|
||||
def __init__(self, target, uefi_entry='^Shell$',
|
||||
efi_shell_prompt='Shell>',
|
||||
image='kernel', bootargs=None,
|
||||
@@ -224,7 +225,7 @@ class VexpressUefiShellBoot(VexpressBootModule):
|
||||
try:
|
||||
menu.select(self.uefi_entry)
|
||||
except LookupError:
|
||||
raise TargetError('Did not see "{}" UEFI entry.'.format(self.uefi_entry))
|
||||
raise TargetStableError('Did not see "{}" UEFI entry.'.format(self.uefi_entry))
|
||||
tty.expect(self.efi_shell_prompt, timeout=self.timeout)
|
||||
if self.bootargs:
|
||||
tty.sendline('') # stop default boot
|
||||
@@ -239,6 +240,7 @@ class VexpressUBoot(VexpressBootModule):
|
||||
|
||||
name = 'vexpress-u-boot'
|
||||
|
||||
# pylint: disable=keyword-arg-before-vararg
|
||||
def __init__(self, target, env=None,
|
||||
*args, **kwargs):
|
||||
super(VexpressUBoot, self).__init__(target, *args, **kwargs)
|
||||
@@ -260,6 +262,7 @@ class VexpressBootmon(VexpressBootModule):
|
||||
|
||||
name = 'vexpress-bootmon'
|
||||
|
||||
# pylint: disable=keyword-arg-before-vararg
|
||||
def __init__(self, target,
|
||||
image, fdt, initrd, bootargs,
|
||||
uses_bootscript=False,
|
||||
@@ -282,11 +285,11 @@ class VexpressBootmon(VexpressBootModule):
|
||||
with open_serial_connection(port=self.port,
|
||||
baudrate=self.baudrate,
|
||||
timeout=self.timeout,
|
||||
init_dtr=0) as tty:
|
||||
write_characters(tty, 'fl linux fdt {}'.format(self.fdt))
|
||||
write_characters(tty, 'fl linux initrd {}'.format(self.initrd))
|
||||
write_characters(tty, 'fl linux boot {} {}'.format(self.image,
|
||||
self.bootargs))
|
||||
init_dtr=0) as tty_conn:
|
||||
write_characters(tty_conn, 'fl linux fdt {}'.format(self.fdt))
|
||||
write_characters(tty_conn, 'fl linux initrd {}'.format(self.initrd))
|
||||
write_characters(tty_conn, 'fl linux boot {} {}'.format(self.image,
|
||||
self.bootargs))
|
||||
|
||||
|
||||
class VersatileExpressFlashModule(FlashModule):
|
||||
@@ -322,15 +325,16 @@ class VersatileExpressFlashModule(FlashModule):
|
||||
self.timeout = timeout
|
||||
self.short_delay = short_delay
|
||||
|
||||
def __call__(self, image_bundle=None, images=None, bootargs=None):
|
||||
def __call__(self, image_bundle=None, images=None, bootargs=None, connect=True):
|
||||
self.target.hard_reset()
|
||||
with open_serial_connection(port=self.target.platform.serial_port,
|
||||
baudrate=self.target.platform.baudrate,
|
||||
timeout=self.timeout,
|
||||
init_dtr=0) as tty:
|
||||
# pylint: disable=no-member
|
||||
i = tty.expect([self.mcc_prompt, AUTOSTART_MESSAGE, OLD_AUTOSTART_MESSAGE])
|
||||
if i:
|
||||
tty.sendline('')
|
||||
tty.sendline('') # pylint: disable=no-member
|
||||
wait_for_vemsd(self.vemsd_mount, tty, self.mcc_prompt, self.short_delay)
|
||||
try:
|
||||
if image_bundle:
|
||||
@@ -340,9 +344,10 @@ class VersatileExpressFlashModule(FlashModule):
|
||||
os.system('sync')
|
||||
except (IOError, OSError) as e:
|
||||
msg = 'Could not deploy images to {}; got: {}'
|
||||
raise TargetError(msg.format(self.vemsd_mount, e))
|
||||
raise TargetStableError(msg.format(self.vemsd_mount, e))
|
||||
self.target.boot()
|
||||
self.target.connect(timeout=30)
|
||||
if connect:
|
||||
self.target.connect(timeout=30)
|
||||
|
||||
def _deploy_image_bundle(self, bundle):
|
||||
self.logger.debug('Validating {}'.format(bundle))
|
||||
@@ -386,5 +391,4 @@ def wait_for_vemsd(vemsd_mount, tty, mcc_prompt=DEFAULT_MCC_PROMPT, short_delay=
|
||||
time.sleep(short_delay * 3)
|
||||
if os.path.exists(path):
|
||||
return
|
||||
raise TargetError('Could not mount {}'.format(vemsd_mount))
|
||||
|
||||
raise TargetStableError('Could not mount {}'.format(vemsd_mount))
|
||||
|
@@ -78,7 +78,16 @@ class Platform(object):
|
||||
|
||||
def _set_model_from_target(self, target):
|
||||
if target.os == 'android':
|
||||
self.model = target.getprop('ro.product.model')
|
||||
try:
|
||||
self.model = target.getprop(prop='ro.product.device')
|
||||
except KeyError:
|
||||
self.model = target.getprop('ro.product.model')
|
||||
elif target.file_exists("/proc/device-tree/model"):
|
||||
# There is currently no better way to do this cross platform.
|
||||
# ARM does not have dmidecode
|
||||
raw_model = target.execute("cat /proc/device-tree/model")
|
||||
device_model_to_return = '_'.join(raw_model.split()[:2])
|
||||
return device_model_to_return.rstrip(' \t\r\n\0')
|
||||
elif target.is_rooted:
|
||||
try:
|
||||
self.model = target.execute('dmidecode -s system-version',
|
||||
|
@@ -19,10 +19,11 @@ import tempfile
|
||||
import time
|
||||
import pexpect
|
||||
|
||||
from devlib.platform import Platform
|
||||
from devlib.instrument import Instrument, InstrumentChannel, MeasurementsCsv, Measurement, CONTINUOUS, INSTANTANEOUS
|
||||
from devlib.exception import TargetError, HostError
|
||||
from devlib.exception import HostError, TargetTransientError
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.instrument import (Instrument, InstrumentChannel, MeasurementsCsv,
|
||||
Measurement, CONTINUOUS, INSTANTANEOUS)
|
||||
from devlib.platform import Platform
|
||||
from devlib.utils.csvutil import csvreader, csvwriter
|
||||
from devlib.utils.serial_port import open_serial_connection
|
||||
|
||||
@@ -89,9 +90,6 @@ class VersatileExpressPlatform(Platform):
|
||||
def _init_android_target(self, target):
|
||||
if target.connection_settings.get('device') is None:
|
||||
addr = self._get_target_ip_address(target)
|
||||
if sys.version_info[0] == 3:
|
||||
# Convert bytes to string for Python3 compatibility
|
||||
addr = addr.decode("utf-8")
|
||||
target.connection_settings['device'] = addr + ':5555'
|
||||
|
||||
def _init_linux_target(self, target):
|
||||
@@ -99,6 +97,7 @@ class VersatileExpressPlatform(Platform):
|
||||
addr = self._get_target_ip_address(target)
|
||||
target.connection_settings['host'] = addr
|
||||
|
||||
# pylint: disable=no-member
|
||||
def _get_target_ip_address(self, target):
|
||||
with open_serial_connection(port=self.serial_port,
|
||||
baudrate=self.baudrate,
|
||||
@@ -106,7 +105,7 @@ class VersatileExpressPlatform(Platform):
|
||||
init_dtr=0) as tty:
|
||||
tty.sendline('su') # this is, apprently, required to query network device
|
||||
# info by name on recent Juno builds...
|
||||
self.logger.debug('Waiting for the Android shell prompt.')
|
||||
self.logger.debug('Waiting for the shell prompt.')
|
||||
tty.expect(target.shell_prompt)
|
||||
|
||||
self.logger.debug('Waiting for IP address...')
|
||||
@@ -117,11 +116,11 @@ class VersatileExpressPlatform(Platform):
|
||||
time.sleep(1)
|
||||
try:
|
||||
tty.expect(r'inet ([1-9]\d*.\d+.\d+.\d+)', timeout=10)
|
||||
return tty.match.group(1)
|
||||
return tty.match.group(1).decode('utf-8')
|
||||
except pexpect.TIMEOUT:
|
||||
pass # We have our own timeout -- see below.
|
||||
if (time.time() - wait_start_time) > self.ready_timeout:
|
||||
raise TargetError('Could not acquire IP address.')
|
||||
raise TargetTransientError('Could not acquire IP address.')
|
||||
finally:
|
||||
tty.sendline('exit') # exit shell created by "su" call at the start
|
||||
|
||||
@@ -250,7 +249,7 @@ class JunoEnergyInstrument(Instrument):
|
||||
self.command = '{} -o {}'.format(self.binary, self.on_target_file)
|
||||
self.command2 = '{}'.format(self.binary)
|
||||
|
||||
def setup(self):
|
||||
def setup(self): # pylint: disable=arguments-differ
|
||||
self.binary = self.target.install(os.path.join(PACKAGE_BIN_DIRECTORY,
|
||||
self.target.abi, self.binname))
|
||||
self.command = '{} -o {}'.format(self.binary, self.on_target_file)
|
||||
@@ -266,6 +265,7 @@ class JunoEnergyInstrument(Instrument):
|
||||
def stop(self):
|
||||
self.target.killall(self.binname, signal='TERM', as_root=True)
|
||||
|
||||
# pylint: disable=arguments-differ
|
||||
def get_data(self, output_file):
|
||||
temp_file = tempfile.mktemp()
|
||||
self.target.pull(self.on_target_file, temp_file)
|
||||
@@ -296,10 +296,9 @@ class JunoEnergyInstrument(Instrument):
|
||||
result = []
|
||||
output = self.target.execute(self.command2).split()
|
||||
with csvreader(output) as reader:
|
||||
headings=next(reader)
|
||||
headings = next(reader)
|
||||
values = next(reader)
|
||||
for chan in self.active_channels:
|
||||
value = values[headings.index(chan.name)]
|
||||
result.append(Measurement(value, chan))
|
||||
return result
|
||||
|
||||
|
@@ -15,12 +15,13 @@
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import shutil
|
||||
import time
|
||||
import types
|
||||
import shlex
|
||||
from pipes import quote
|
||||
|
||||
from devlib.exception import TargetError
|
||||
from devlib.exception import TargetStableError
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.platform import Platform
|
||||
from devlib.utils.ssh import AndroidGem5Connection, LinuxGem5Connection
|
||||
@@ -55,7 +56,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
self.stdout_file = None
|
||||
self.stderr_file = None
|
||||
self.stderr_filename = None
|
||||
if self.gem5_port is None:
|
||||
if self.gem5_port is None: # pylint: disable=simplifiable-if-statement
|
||||
# Allows devlib to pick up already running simulations
|
||||
self.start_gem5_simulation = True
|
||||
else:
|
||||
@@ -87,12 +88,12 @@ class Gem5SimulationPlatform(Platform):
|
||||
Check if the command to start gem5 makes sense
|
||||
"""
|
||||
if self.gem5args_binary is None:
|
||||
raise TargetError('Please specify a gem5 binary.')
|
||||
raise TargetStableError('Please specify a gem5 binary.')
|
||||
if self.gem5args_args is None:
|
||||
raise TargetError('Please specify the arguments passed on to gem5.')
|
||||
raise TargetStableError('Please specify the arguments passed on to gem5.')
|
||||
self.gem5args_virtio = str(self.gem5args_virtio).format(self.gem5_interact_dir)
|
||||
if self.gem5args_virtio is None:
|
||||
raise TargetError('Please specify arguments needed for virtIO.')
|
||||
raise TargetStableError('Please specify arguments needed for virtIO.')
|
||||
|
||||
def _start_interaction_gem5(self):
|
||||
"""
|
||||
@@ -111,7 +112,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
if not os.path.exists(self.stats_directory):
|
||||
os.mkdir(self.stats_directory)
|
||||
if os.path.exists(self.gem5_out_dir):
|
||||
raise TargetError("The gem5 stats directory {} already "
|
||||
raise TargetStableError("The gem5 stats directory {} already "
|
||||
"exists.".format(self.gem5_out_dir))
|
||||
else:
|
||||
os.mkdir(self.gem5_out_dir)
|
||||
@@ -130,11 +131,11 @@ class Gem5SimulationPlatform(Platform):
|
||||
self.logger.info("Starting the gem5 simulator")
|
||||
|
||||
command_line = "{} --outdir={} {} {}".format(self.gem5args_binary,
|
||||
self.gem5_out_dir,
|
||||
quote(self.gem5_out_dir),
|
||||
self.gem5args_args,
|
||||
self.gem5args_virtio)
|
||||
self.logger.debug("gem5 command line: {}".format(command_line))
|
||||
self.gem5 = subprocess.Popen(command_line.split(),
|
||||
self.gem5 = subprocess.Popen(shlex.split(command_line),
|
||||
stdout=self.stdout_file,
|
||||
stderr=self.stderr_file)
|
||||
|
||||
@@ -154,7 +155,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
e.g. pid, input directory etc
|
||||
"""
|
||||
self.logger("This functionality is not yet implemented")
|
||||
raise TargetError()
|
||||
raise TargetStableError()
|
||||
|
||||
def _intercept_telnet_port(self):
|
||||
"""
|
||||
@@ -162,13 +163,13 @@ class Gem5SimulationPlatform(Platform):
|
||||
"""
|
||||
|
||||
if self.gem5 is None:
|
||||
raise TargetError('The platform has no gem5 simulation! '
|
||||
raise TargetStableError('The platform has no gem5 simulation! '
|
||||
'Something went wrong')
|
||||
while self.gem5_port is None:
|
||||
# Check that gem5 is running!
|
||||
if self.gem5.poll():
|
||||
message = "The gem5 process has crashed with error code {}!\n\tPlease see {} for details."
|
||||
raise TargetError(message.format(self.gem5.poll(), self.stderr_file.name))
|
||||
raise TargetStableError(message.format(self.gem5.poll(), self.stderr_file.name))
|
||||
|
||||
# Open the stderr file
|
||||
with open(self.stderr_filename, 'r') as f:
|
||||
@@ -186,7 +187,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
# Check if the sockets are not disabled
|
||||
m = re.search(r"Sockets disabled, not accepting terminal connections", line)
|
||||
if m:
|
||||
raise TargetError("The sockets have been disabled!"
|
||||
raise TargetStableError("The sockets have been disabled!"
|
||||
"Pass --listener-mode=on to gem5")
|
||||
else:
|
||||
time.sleep(1)
|
||||
@@ -234,6 +235,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
# Call the general update_from_target implementation
|
||||
super(Gem5SimulationPlatform, self).update_from_target(target)
|
||||
|
||||
|
||||
def gem5_capture_screen(self, filepath):
|
||||
file_list = os.listdir(self.gem5_out_dir)
|
||||
screen_caps = []
|
||||
@@ -243,6 +245,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
|
||||
if '{ts}' in filepath:
|
||||
cmd = '{} date -u -Iseconds'
|
||||
# pylint: disable=no-member
|
||||
ts = self.target.execute(cmd.format(self.target.busybox)).strip()
|
||||
filepath = filepath.format(ts=ts)
|
||||
|
||||
@@ -258,6 +261,7 @@ class Gem5SimulationPlatform(Platform):
|
||||
im.save(temp_image, "PNG")
|
||||
shutil.copy(temp_image, filepath)
|
||||
os.remove(temp_image)
|
||||
# pylint: disable=undefined-variable
|
||||
gem5_logger.info("capture_screen: using gem5 screencap")
|
||||
successful_capture = True
|
||||
|
||||
@@ -266,12 +270,14 @@ class Gem5SimulationPlatform(Platform):
|
||||
|
||||
return successful_capture
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def _deploy_m5(self, target):
|
||||
# m5 is not yet installed so install it
|
||||
host_executable = os.path.join(PACKAGE_BIN_DIRECTORY,
|
||||
target.abi, 'm5')
|
||||
return target.install(host_executable)
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def _resize_shell(self, target):
|
||||
"""
|
||||
Resize the shell to avoid line wrapping issues.
|
||||
@@ -282,18 +288,16 @@ class Gem5SimulationPlatform(Platform):
|
||||
target.execute('reset', check_exit_code=False)
|
||||
|
||||
# Methods that will be monkey-patched onto the target
|
||||
def _overwritten_reset(self):
|
||||
raise TargetError('Resetting is not allowed on gem5 platforms!')
|
||||
def _overwritten_reset(self): # pylint: disable=unused-argument
|
||||
raise TargetStableError('Resetting is not allowed on gem5 platforms!')
|
||||
|
||||
def _overwritten_reboot(self):
|
||||
raise TargetError('Rebooting is not allowed on gem5 platforms!')
|
||||
def _overwritten_reboot(self): # pylint: disable=unused-argument
|
||||
raise TargetStableError('Rebooting is not allowed on gem5 platforms!')
|
||||
|
||||
def _overwritten_capture_screen(self, filepath):
|
||||
connection_screencapped = self.platform.gem5_capture_screen(filepath)
|
||||
if connection_screencapped == False:
|
||||
if not connection_screencapped:
|
||||
# The connection was not able to capture the screen so use the target
|
||||
# implementation
|
||||
self.logger.debug('{} was not able to screen cap, using the original target implementation'.format(self.platform.__class__.__name__))
|
||||
self.target_impl_capture_screen(filepath)
|
||||
|
||||
|
||||
|
1179
devlib/target.py
1179
devlib/target.py
File diff suppressed because it is too large
Load Diff
@@ -1,35 +0,0 @@
|
||||
# Copyright 2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
class TraceCollector(object):
|
||||
|
||||
def __init__(self, target):
|
||||
self.target = target
|
||||
self.logger = logging.getLogger(self.__class__.__name__)
|
||||
|
||||
def reset(self):
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
def get_trace(self, outfile):
|
||||
pass
|
@@ -12,5 +12,3 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
|
@@ -19,33 +19,43 @@ Utility functions for working with Android devices through adb.
|
||||
|
||||
"""
|
||||
# pylint: disable=E1103
|
||||
import glob
|
||||
import logging
|
||||
import os
|
||||
import pexpect
|
||||
import time
|
||||
import subprocess
|
||||
import logging
|
||||
import re
|
||||
import threading
|
||||
import tempfile
|
||||
import queue
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
import tempfile
|
||||
import time
|
||||
import uuid
|
||||
import zipfile
|
||||
|
||||
from devlib.exception import TargetError, HostError, DevlibError
|
||||
from devlib.utils.misc import check_output, which, memoized, ABI_MAP
|
||||
from devlib.utils.misc import escape_single_quotes, escape_double_quotes
|
||||
from devlib import host
|
||||
from collections import defaultdict
|
||||
from io import StringIO
|
||||
from lxml import etree
|
||||
|
||||
try:
|
||||
from shlex import quote
|
||||
except ImportError:
|
||||
from pipes import quote
|
||||
|
||||
from devlib.exception import TargetTransientError, TargetStableError, HostError
|
||||
from devlib.utils.misc import check_output, which, ABI_MAP, redirect_streams, get_subprocess
|
||||
from devlib.connection import ConnectionBase, AdbBackgroundCommand, PopenBackgroundCommand, PopenTransferManager
|
||||
|
||||
|
||||
logger = logging.getLogger('android')
|
||||
|
||||
MAX_ATTEMPTS = 5
|
||||
AM_START_ERROR = re.compile(r"Error: Activity.*")
|
||||
AAPT_BADGING_OUTPUT = re.compile(r"no dump ((file)|(apk)) specified", re.IGNORECASE)
|
||||
|
||||
# See:
|
||||
# http://developer.android.com/guide/topics/manifest/uses-sdk-element.html#ApiLevels
|
||||
ANDROID_VERSION_MAP = {
|
||||
28: 'P',
|
||||
29: 'Q',
|
||||
28: 'PIE',
|
||||
27: 'OREO_MR1',
|
||||
26: 'OREO',
|
||||
25: 'NOUGAT_MR1',
|
||||
@@ -87,6 +97,7 @@ android_home = None
|
||||
platform_tools = None
|
||||
adb = None
|
||||
aapt = None
|
||||
aapt_version = None
|
||||
fastboot = None
|
||||
|
||||
|
||||
@@ -117,6 +128,7 @@ class AdbDevice(object):
|
||||
self.name = name
|
||||
self.status = status
|
||||
|
||||
# pylint: disable=undefined-variable
|
||||
def __cmp__(self, other):
|
||||
if isinstance(other, AdbDevice):
|
||||
return cmp(self.name, other.name)
|
||||
@@ -134,6 +146,7 @@ class ApkInfo(object):
|
||||
version_regex = re.compile(r"name='(?P<name>[^']+)' versionCode='(?P<vcode>[^']+)' versionName='(?P<vname>[^']+)'")
|
||||
name_regex = re.compile(r"name='(?P<name>[^']+)'")
|
||||
permission_regex = re.compile(r"name='(?P<permission>[^']+)'")
|
||||
activity_regex = re.compile(r'\s*A:\s*android:name\(0x\d+\)=".(?P<name>\w+)"')
|
||||
|
||||
def __init__(self, path=None):
|
||||
self.path = path
|
||||
@@ -144,19 +157,16 @@ class ApkInfo(object):
|
||||
self.version_code = None
|
||||
self.native_code = None
|
||||
self.permissions = []
|
||||
self.parse(path)
|
||||
self._apk_path = None
|
||||
self._activities = None
|
||||
self._methods = None
|
||||
if path:
|
||||
self.parse(path)
|
||||
|
||||
# pylint: disable=too-many-branches
|
||||
def parse(self, apk_path):
|
||||
_check_env()
|
||||
command = [aapt, 'dump', 'badging', apk_path]
|
||||
logger.debug(' '.join(command))
|
||||
try:
|
||||
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
|
||||
if sys.version_info[0] == 3:
|
||||
output = output.decode(sys.stdout.encoding, 'replace')
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise HostError('Error parsing APK file {}. `aapt` says:\n{}'
|
||||
.format(apk_path, e.output))
|
||||
output = self._run([aapt, 'dump', 'badging', apk_path])
|
||||
for line in output.split('\n'):
|
||||
if line.startswith('application-label:'):
|
||||
self.label = line.split(':')[1].strip().replace('\'', '')
|
||||
@@ -189,19 +199,190 @@ class ApkInfo(object):
|
||||
else:
|
||||
pass # not interested
|
||||
|
||||
self._apk_path = apk_path
|
||||
self._activities = None
|
||||
self._methods = None
|
||||
|
||||
class AdbConnection(object):
|
||||
@property
|
||||
def activities(self):
|
||||
if self._activities is None:
|
||||
cmd = [aapt, 'dump', 'xmltree', self._apk_path]
|
||||
if aapt_version == 2:
|
||||
cmd += ['--file']
|
||||
cmd += ['AndroidManifest.xml']
|
||||
matched_activities = self.activity_regex.finditer(self._run(cmd))
|
||||
self._activities = [m.group('name') for m in matched_activities]
|
||||
return self._activities
|
||||
|
||||
@property
|
||||
def methods(self):
|
||||
if self._methods is None:
|
||||
# Only try to extract once
|
||||
self._methods = []
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
with zipfile.ZipFile(self._apk_path, 'r') as z:
|
||||
try:
|
||||
extracted = z.extract('classes.dex', tmp_dir)
|
||||
except KeyError:
|
||||
return []
|
||||
dexdump = os.path.join(os.path.dirname(aapt), 'dexdump')
|
||||
command = [dexdump, '-l', 'xml', extracted]
|
||||
dump = self._run(command)
|
||||
|
||||
# Dexdump from build tools v30.0.X does not seem to produce
|
||||
# valid xml from certain APKs so ignore errors and attempt to recover.
|
||||
parser = etree.XMLParser(encoding='utf-8', recover=True)
|
||||
xml_tree = etree.parse(StringIO(dump), parser)
|
||||
|
||||
package = next((i for i in xml_tree.iter('package')
|
||||
if i.attrib['name'] == self.package), None)
|
||||
|
||||
self._methods = [(meth.attrib['name'], klass.attrib['name'])
|
||||
for klass in package.iter('class')
|
||||
for meth in klass.iter('method')] if package else []
|
||||
return self._methods
|
||||
|
||||
def _run(self, command):
|
||||
logger.debug(' '.join(command))
|
||||
try:
|
||||
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
|
||||
if sys.version_info[0] == 3:
|
||||
output = output.decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise HostError('Error while running "{}":\n{}'
|
||||
.format(command, e.output))
|
||||
return output
|
||||
|
||||
|
||||
class AdbConnection(ConnectionBase):
|
||||
|
||||
# maintains the count of parallel active connections to a device, so that
|
||||
# adb disconnect is not invoked untill all connections are closed
|
||||
active_connections = defaultdict(int)
|
||||
# Track connected as root status per device
|
||||
_connected_as_root = defaultdict(lambda: None)
|
||||
default_timeout = 10
|
||||
ls_command = 'ls'
|
||||
su_cmd = 'su -c {}'
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.device
|
||||
|
||||
@property
|
||||
def connected_as_root(self):
|
||||
if self._connected_as_root[self.device] is None:
|
||||
result = self.execute('id')
|
||||
self._connected_as_root[self.device] = 'uid=0(' in result
|
||||
return self._connected_as_root[self.device]
|
||||
|
||||
@connected_as_root.setter
|
||||
def connected_as_root(self, state):
|
||||
self._connected_as_root[self.device] = state
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def __init__(self, device=None, timeout=None, platform=None, adb_server=None,
|
||||
adb_as_root=False, connection_attempts=MAX_ATTEMPTS,
|
||||
poll_transfers=False,
|
||||
start_transfer_poll_delay=30,
|
||||
total_transfer_timeout=3600,
|
||||
transfer_poll_period=30,):
|
||||
super().__init__()
|
||||
self.timeout = timeout if timeout is not None else self.default_timeout
|
||||
if device is None:
|
||||
device = adb_get_device(timeout=timeout, adb_server=adb_server)
|
||||
self.device = device
|
||||
self.adb_server = adb_server
|
||||
self.adb_as_root = adb_as_root
|
||||
self.poll_transfers = poll_transfers
|
||||
if poll_transfers:
|
||||
transfer_opts = {'start_transfer_poll_delay': start_transfer_poll_delay,
|
||||
'total_timeout': total_transfer_timeout,
|
||||
'poll_period': transfer_poll_period,
|
||||
}
|
||||
self.transfer_mgr = PopenTransferManager(self, **transfer_opts) if poll_transfers else None
|
||||
if self.adb_as_root:
|
||||
self.adb_root(enable=True)
|
||||
adb_connect(self.device, adb_server=self.adb_server, attempts=connection_attempts)
|
||||
AdbConnection.active_connections[self.device] += 1
|
||||
self._setup_ls()
|
||||
self._setup_su()
|
||||
|
||||
def push(self, sources, dest, timeout=None):
|
||||
return self._push_pull('push', sources, dest, timeout)
|
||||
|
||||
def pull(self, sources, dest, timeout=None):
|
||||
return self._push_pull('pull', sources, dest, timeout)
|
||||
|
||||
def _push_pull(self, action, sources, dest, timeout):
|
||||
paths = sources + [dest]
|
||||
|
||||
# Quote twice to avoid expansion by host shell, then ADB globbing
|
||||
do_quote = lambda x: quote(glob.escape(x))
|
||||
paths = ' '.join(map(do_quote, paths))
|
||||
|
||||
command = "{} {}".format(action, paths)
|
||||
if timeout or not self.poll_transfers:
|
||||
adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
else:
|
||||
with self.transfer_mgr.manage(sources, dest, action):
|
||||
bg_cmd = adb_command_background(self.device, command, adb_server=self.adb_server)
|
||||
self.transfer_mgr.set_transfer_and_wait(bg_cmd)
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def execute(self, command, timeout=None, check_exit_code=False,
|
||||
as_root=False, strip_colors=True, will_succeed=False):
|
||||
try:
|
||||
return adb_shell(self.device, command, timeout, check_exit_code,
|
||||
as_root, adb_server=self.adb_server, su_cmd=self.su_cmd)
|
||||
except TargetStableError as e:
|
||||
if will_succeed:
|
||||
raise TargetTransientError(e)
|
||||
else:
|
||||
raise
|
||||
|
||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
|
||||
bg_cmd = self._background(command, stdout, stderr, as_root)
|
||||
self._current_bg_cmds.add(bg_cmd)
|
||||
return bg_cmd
|
||||
|
||||
def _background(self, command, stdout, stderr, as_root):
|
||||
adb_shell, pid = adb_background_shell(self, command, stdout, stderr, as_root)
|
||||
bg_cmd = AdbBackgroundCommand(
|
||||
conn=self,
|
||||
adb_popen=adb_shell,
|
||||
pid=pid,
|
||||
as_root=as_root
|
||||
)
|
||||
return bg_cmd
|
||||
|
||||
def _close(self):
|
||||
AdbConnection.active_connections[self.device] -= 1
|
||||
if AdbConnection.active_connections[self.device] <= 0:
|
||||
if self.adb_as_root:
|
||||
self.adb_root(enable=False)
|
||||
adb_disconnect(self.device, self.adb_server)
|
||||
del AdbConnection.active_connections[self.device]
|
||||
|
||||
def cancel_running_command(self):
|
||||
# adbd multiplexes commands so that they don't interfer with each
|
||||
# other, so there is no need to explicitly cancel a running command
|
||||
# before the next one can be issued.
|
||||
pass
|
||||
|
||||
def adb_root(self, enable=True):
|
||||
cmd = 'root' if enable else 'unroot'
|
||||
output = adb_command(self.device, cmd, timeout=30, adb_server=self.adb_server)
|
||||
if 'cannot run as root in production builds' in output:
|
||||
raise TargetStableError(output)
|
||||
AdbConnection._connected_as_root[self.device] = enable
|
||||
|
||||
def wait_for_device(self, timeout=30):
|
||||
adb_command(self.device, 'wait-for-device', timeout, self.adb_server)
|
||||
|
||||
def reboot_bootloader(self, timeout=30):
|
||||
adb_command(self.device, 'reboot-bootloader', timeout, self.adb_server)
|
||||
|
||||
# Again, we need to handle boards where the default output format from ls is
|
||||
# single column *and* boards where the default output is multi-column.
|
||||
# We need to do this purely because the '-1' option causes errors on older
|
||||
@@ -222,63 +403,21 @@ class AdbConnection(object):
|
||||
self.ls_command = 'ls'
|
||||
logger.debug("ls command is set to {}".format(self.ls_command))
|
||||
|
||||
def __init__(self, device=None, timeout=None, platform=None, adb_server=None):
|
||||
self.timeout = timeout if timeout is not None else self.default_timeout
|
||||
if device is None:
|
||||
device = adb_get_device(timeout=timeout, adb_server=adb_server)
|
||||
self.device = device
|
||||
self.adb_server = adb_server
|
||||
adb_connect(self.device)
|
||||
AdbConnection.active_connections[self.device] += 1
|
||||
self._setup_ls()
|
||||
|
||||
def push(self, source, dest, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
command = "push '{}' '{}'".format(source, dest)
|
||||
if not os.path.exists(source):
|
||||
raise HostError('No such file "{}"'.format(source))
|
||||
return adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
|
||||
def pull(self, source, dest, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
# Pull all files matching a wildcard expression
|
||||
if os.path.isdir(dest) and \
|
||||
('*' in source or '?' in source):
|
||||
command = 'shell {} {}'.format(self.ls_command, source)
|
||||
output = adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
for line in output.splitlines():
|
||||
command = "pull '{}' '{}'".format(line.strip(), dest)
|
||||
adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
return
|
||||
command = "pull '{}' '{}'".format(source, dest)
|
||||
return adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
|
||||
def execute(self, command, timeout=None, check_exit_code=False,
|
||||
as_root=False, strip_colors=True):
|
||||
return adb_shell(self.device, command, timeout, check_exit_code,
|
||||
as_root, adb_server=self.adb_server)
|
||||
|
||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
|
||||
return adb_background_shell(self.device, command, stdout, stderr, as_root)
|
||||
|
||||
def close(self):
|
||||
AdbConnection.active_connections[self.device] -= 1
|
||||
if AdbConnection.active_connections[self.device] <= 0:
|
||||
adb_disconnect(self.device)
|
||||
del AdbConnection.active_connections[self.device]
|
||||
|
||||
def cancel_running_command(self):
|
||||
# adbd multiplexes commands so that they don't interfer with each
|
||||
# other, so there is no need to explicitly cancel a running command
|
||||
# before the next one can be issued.
|
||||
pass
|
||||
def _setup_su(self):
|
||||
try:
|
||||
# Try the new style of invoking `su`
|
||||
self.execute('ls', timeout=self.timeout, as_root=True,
|
||||
check_exit_code=True)
|
||||
# If failure assume either old style or unrooted. Here we will assume
|
||||
# old style and root status will be verified later.
|
||||
except (TargetStableError, TargetTransientError, TimeoutError):
|
||||
self.su_cmd = 'echo {} | su'
|
||||
logger.debug("su command is set to {}".format(quote(self.su_cmd)))
|
||||
|
||||
|
||||
def fastboot_command(command, timeout=None, device=None):
|
||||
_check_env()
|
||||
target = '-s {}'.format(device) if device else ''
|
||||
target = '-s {}'.format(quote(device)) if device else ''
|
||||
full_command = 'fastboot {} {}'.format(target, command)
|
||||
logger.debug(full_command)
|
||||
output, _ = check_output(full_command, timeout, shell=True)
|
||||
@@ -286,7 +425,7 @@ def fastboot_command(command, timeout=None, device=None):
|
||||
|
||||
|
||||
def fastboot_flash_partition(partition, path_to_image):
|
||||
command = 'flash {} {}'.format(partition, path_to_image)
|
||||
command = 'flash {} {}'.format(quote(partition), quote(path_to_image))
|
||||
fastboot_command(command)
|
||||
|
||||
|
||||
@@ -326,7 +465,7 @@ def adb_get_device(timeout=None, adb_server=None):
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def adb_connect(device, timeout=None, attempts=MAX_ATTEMPTS):
|
||||
def adb_connect(device, timeout=None, attempts=MAX_ATTEMPTS, adb_server=None):
|
||||
_check_env()
|
||||
tries = 0
|
||||
output = None
|
||||
@@ -334,10 +473,17 @@ def adb_connect(device, timeout=None, attempts=MAX_ATTEMPTS):
|
||||
tries += 1
|
||||
if device:
|
||||
if "." in device: # Connect is required only for ADB-over-IP
|
||||
command = 'adb connect {}'.format(device)
|
||||
# ADB does not automatically remove a network device from it's
|
||||
# devices list when the connection is broken by the remote, so the
|
||||
# adb connection may have gone "stale", resulting in adb blocking
|
||||
# indefinitely when making calls to the device. To avoid this,
|
||||
# always disconnect first.
|
||||
adb_disconnect(device, adb_server)
|
||||
adb_cmd = get_adb_command(None, 'connect', adb_server)
|
||||
command = '{} {}'.format(adb_cmd, quote(device))
|
||||
logger.debug(command)
|
||||
output, _ = check_output(command, shell=True, timeout=timeout)
|
||||
if _ping(device):
|
||||
if _ping(device, adb_server):
|
||||
break
|
||||
time.sleep(10)
|
||||
else: # did not connect to the device
|
||||
@@ -347,52 +493,56 @@ def adb_connect(device, timeout=None, attempts=MAX_ATTEMPTS):
|
||||
raise HostError(message)
|
||||
|
||||
|
||||
def adb_disconnect(device):
|
||||
def adb_disconnect(device, adb_server=None):
|
||||
_check_env()
|
||||
if not device:
|
||||
return
|
||||
if ":" in device and device in adb_list_devices():
|
||||
command = "adb disconnect " + device
|
||||
if ":" in device and device in adb_list_devices(adb_server):
|
||||
adb_cmd = get_adb_command(None, 'disconnect', adb_server)
|
||||
command = "{} {}".format(adb_cmd, device)
|
||||
logger.debug(command)
|
||||
retval = subprocess.call(command, stdout=open(os.devnull, 'wb'), shell=True)
|
||||
if retval:
|
||||
raise TargetError('"{}" returned {}'.format(command, retval))
|
||||
raise TargetTransientError('"{}" returned {}'.format(command, retval))
|
||||
|
||||
|
||||
def _ping(device):
|
||||
def _ping(device, adb_server=None):
|
||||
_check_env()
|
||||
device_string = ' -s {}'.format(device) if device else ''
|
||||
command = "adb{} shell \"ls /data/local/tmp > /dev/null\"".format(device_string)
|
||||
adb_cmd = get_adb_command(device, 'shell', adb_server)
|
||||
command = "{} {}".format(adb_cmd, quote('ls /data/local/tmp > /dev/null'))
|
||||
logger.debug(command)
|
||||
result = subprocess.call(command, stderr=subprocess.PIPE, shell=True)
|
||||
if not result:
|
||||
if not result: # pylint: disable=simplifiable-if-statement
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
def adb_shell(device, command, timeout=None, check_exit_code=False,
|
||||
as_root=False, adb_server=None): # NOQA
|
||||
as_root=False, adb_server=None, su_cmd='su -c {}'): # NOQA
|
||||
_check_env()
|
||||
if as_root:
|
||||
command = 'echo \'{}\' | su'.format(escape_single_quotes(command))
|
||||
device_part = []
|
||||
if adb_server:
|
||||
device_part = ['-H', adb_server]
|
||||
device_part += ['-s', device] if device else []
|
||||
|
||||
# On older combinations of ADB/Android versions, the adb host command always
|
||||
# exits with 0 if it was able to run the command on the target, even if the
|
||||
# command failed (https://code.google.com/p/android/issues/detail?id=3254).
|
||||
# Homogenise this behaviour by running the command then echoing the exit
|
||||
# code.
|
||||
adb_shell_command = '({}); echo \"\n$?\"'.format(command)
|
||||
actual_command = ['adb'] + device_part + ['shell', adb_shell_command]
|
||||
logger.debug('adb {} shell {}'.format(' '.join(device_part), command))
|
||||
# code of the executed command itself.
|
||||
command = r'({}); echo "\n$?"'.format(command)
|
||||
|
||||
parts = ['adb']
|
||||
if adb_server is not None:
|
||||
parts += ['-H', adb_server]
|
||||
if device is not None:
|
||||
parts += ['-s', device]
|
||||
parts += ['shell',
|
||||
command if not as_root else su_cmd.format(quote(command))]
|
||||
|
||||
logger.debug(' '.join(quote(part) for part in parts))
|
||||
try:
|
||||
raw_output, _ = check_output(actual_command, timeout, shell=False, combined_output=True)
|
||||
raw_output, error = check_output(parts, timeout, shell=False)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise TargetError(str(e))
|
||||
raise TargetStableError(str(e))
|
||||
|
||||
if raw_output:
|
||||
try:
|
||||
@@ -410,40 +560,65 @@ def adb_shell(device, command, timeout=None, check_exit_code=False,
|
||||
if exit_code.isdigit():
|
||||
if int(exit_code):
|
||||
message = ('Got exit code {}\nfrom target command: {}\n'
|
||||
'OUTPUT: {}')
|
||||
raise TargetError(message.format(exit_code, command, output))
|
||||
'OUTPUT: {}\nSTDERR: {}\n')
|
||||
raise TargetStableError(message.format(exit_code, command, output, error))
|
||||
elif re_search:
|
||||
message = 'Could not start activity; got the following:\n{}'
|
||||
raise TargetError(message.format(re_search[0]))
|
||||
raise TargetStableError(message.format(re_search[0]))
|
||||
else: # not all digits
|
||||
if re_search:
|
||||
message = 'Could not start activity; got the following:\n{}'
|
||||
raise TargetError(message.format(re_search[0]))
|
||||
raise TargetStableError(message.format(re_search[0]))
|
||||
else:
|
||||
message = 'adb has returned early; did not get an exit code. '\
|
||||
'Was kill-server invoked?\nOUTPUT:\n-----\n{}\n'\
|
||||
'-----'
|
||||
raise TargetError(message.format(raw_output))
|
||||
'-----\nSTDERR:\n-----\n{}\n-----'
|
||||
raise TargetTransientError(message.format(raw_output, error))
|
||||
|
||||
return output
|
||||
return output + error
|
||||
|
||||
|
||||
def adb_background_shell(device, command,
|
||||
def adb_background_shell(conn, command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
as_root=False):
|
||||
"""Runs the sepcified command in a subprocess, returning the the Popen object."""
|
||||
_check_env()
|
||||
if as_root:
|
||||
command = 'echo \'{}\' | su'.format(escape_single_quotes(command))
|
||||
device_string = ' -s {}'.format(device) if device else ''
|
||||
full_command = 'adb{} shell "{}"'.format(device_string, escape_double_quotes(command))
|
||||
logger.debug(full_command)
|
||||
return subprocess.Popen(full_command, stdout=stdout, stderr=stderr, shell=True)
|
||||
"""Runs the specified command in a subprocess, returning the the Popen object."""
|
||||
device = conn.device
|
||||
adb_server = conn.adb_server
|
||||
|
||||
_check_env()
|
||||
stdout, stderr, command = redirect_streams(stdout, stderr, command)
|
||||
if as_root:
|
||||
command = 'echo {} | su'.format(quote(command))
|
||||
|
||||
# Attach a unique UUID to the command line so it can be looked for without
|
||||
# any ambiguity with ps
|
||||
uuid_ = uuid.uuid4().hex
|
||||
uuid_var = 'BACKGROUND_COMMAND_UUID={}'.format(uuid_)
|
||||
command = "{} sh -c {}".format(uuid_var, quote(command))
|
||||
|
||||
adb_cmd = get_adb_command(device, 'shell', adb_server)
|
||||
full_command = '{} {}'.format(adb_cmd, quote(command))
|
||||
logger.debug(full_command)
|
||||
p = subprocess.Popen(full_command, stdout=stdout, stderr=stderr, shell=True)
|
||||
|
||||
# Out of band PID lookup, to avoid conflicting needs with stdout redirection
|
||||
find_pid = '{} ps -A -o pid,args | grep {}'.format(conn.busybox, quote(uuid_var))
|
||||
ps_out = conn.execute(find_pid)
|
||||
pids = [
|
||||
int(line.strip().split(' ', 1)[0])
|
||||
for line in ps_out.splitlines()
|
||||
]
|
||||
# The line we are looking for is the first one, since it was started before
|
||||
# any look up command
|
||||
pid = sorted(pids)[0]
|
||||
return (p, pid)
|
||||
|
||||
def adb_kill_server(timeout=30, adb_server=None):
|
||||
adb_command(None, 'kill-server', timeout, adb_server)
|
||||
|
||||
def adb_list_devices(adb_server=None):
|
||||
output = adb_command(None, 'devices',adb_server=adb_server)
|
||||
output = adb_command(None, 'devices', adb_server=adb_server)
|
||||
devices = []
|
||||
for line in output.splitlines():
|
||||
parts = [p.strip() for p in line.split()]
|
||||
@@ -452,7 +627,7 @@ def adb_list_devices(adb_server=None):
|
||||
return devices
|
||||
|
||||
|
||||
def get_adb_command(device, command, timeout=None,adb_server=None):
|
||||
def get_adb_command(device, command, adb_server=None):
|
||||
_check_env()
|
||||
device_string = ""
|
||||
if adb_server != None:
|
||||
@@ -460,12 +635,22 @@ def get_adb_command(device, command, timeout=None,adb_server=None):
|
||||
device_string += ' -s {}'.format(device) if device else ''
|
||||
return "adb{} {}".format(device_string, command)
|
||||
|
||||
def adb_command(device, command, timeout=None,adb_server=None):
|
||||
full_command = get_adb_command(device, command, timeout, adb_server)
|
||||
|
||||
def adb_command(device, command, timeout=None, adb_server=None):
|
||||
full_command = get_adb_command(device, command, adb_server)
|
||||
logger.debug(full_command)
|
||||
output, _ = check_output(full_command, timeout, shell=True)
|
||||
return output
|
||||
|
||||
|
||||
def adb_command_background(device, command, adb_server=None):
|
||||
full_command = get_adb_command(device, command, adb_server)
|
||||
logger.debug(full_command)
|
||||
proc = get_subprocess(full_command, shell=True)
|
||||
cmd = PopenBackgroundCommand(proc)
|
||||
return cmd
|
||||
|
||||
|
||||
def grant_app_permissions(target, package):
|
||||
"""
|
||||
Grant an app all the permissions it may ask for
|
||||
@@ -473,7 +658,7 @@ def grant_app_permissions(target, package):
|
||||
dumpsys = target.execute('dumpsys package {}'.format(package))
|
||||
|
||||
permissions = re.search(
|
||||
'requested permissions:\s*(?P<permissions>(android.permission.+\s*)+)', dumpsys
|
||||
r'requested permissions:\s*(?P<permissions>(android.permission.+\s*)+)', dumpsys
|
||||
)
|
||||
if permissions is None:
|
||||
return
|
||||
@@ -482,7 +667,7 @@ def grant_app_permissions(target, package):
|
||||
for permission in permissions:
|
||||
try:
|
||||
target.execute('pm grant {} {}'.format(package, permission))
|
||||
except TargetError:
|
||||
except TargetStableError:
|
||||
logger.debug('Cannot grant {}'.format(permission))
|
||||
|
||||
|
||||
@@ -493,8 +678,10 @@ class _AndroidEnvironment(object):
|
||||
def __init__(self):
|
||||
self.android_home = None
|
||||
self.platform_tools = None
|
||||
self.build_tools = None
|
||||
self.adb = None
|
||||
self.aapt = None
|
||||
self.aapt_version = None
|
||||
self.fastboot = None
|
||||
|
||||
|
||||
@@ -520,28 +707,75 @@ def _initialize_without_android_home(env):
|
||||
_init_common(env)
|
||||
return env
|
||||
|
||||
|
||||
def _init_common(env):
|
||||
_discover_build_tools(env)
|
||||
_discover_aapt(env)
|
||||
|
||||
def _discover_build_tools(env):
|
||||
logger.debug('ANDROID_HOME: {}'.format(env.android_home))
|
||||
build_tools_directory = os.path.join(env.android_home, 'build-tools')
|
||||
if not os.path.isdir(build_tools_directory):
|
||||
msg = '''ANDROID_HOME ({}) does not appear to have valid Android SDK install
|
||||
(cannot find build-tools)'''
|
||||
raise HostError(msg.format(env.android_home))
|
||||
versions = os.listdir(build_tools_directory)
|
||||
for version in reversed(sorted(versions)):
|
||||
aapt_path = os.path.join(build_tools_directory, version, 'aapt')
|
||||
if os.path.isfile(aapt_path):
|
||||
logger.debug('Using aapt for version {}'.format(version))
|
||||
env.aapt = aapt_path
|
||||
break
|
||||
else:
|
||||
raise HostError('aapt not found. Please make sure at least one Android '
|
||||
'platform is installed.')
|
||||
if os.path.isdir(build_tools_directory):
|
||||
env.build_tools = build_tools_directory
|
||||
|
||||
def _check_supported_aapt2(binary):
|
||||
# At time of writing the version argument of aapt2 is not helpful as
|
||||
# the output is only a placeholder that does not distinguish between versions
|
||||
# with and without support for badging. Unfortunately aapt has been
|
||||
# deprecated and fails to parse some valid apks so we will try to favour
|
||||
# aapt2 if possible else will fall back to aapt.
|
||||
# Try to execute the badging command and check if we get an expected error
|
||||
# message as opposed to an unknown command error to determine if we have a
|
||||
# suitable version.
|
||||
cmd = '{} dump badging'.format(binary)
|
||||
result = subprocess.run(cmd.encode('utf-8'), shell=True, stderr=subprocess.PIPE)
|
||||
supported = bool(AAPT_BADGING_OUTPUT.search(result.stderr.decode('utf-8')))
|
||||
msg = 'Found a {} aapt2 binary at: {}'
|
||||
logger.debug(msg.format('supported' if supported else 'unsupported', binary))
|
||||
return supported
|
||||
|
||||
def _discover_aapt(env):
|
||||
if env.build_tools:
|
||||
aapt_path = ''
|
||||
aapt2_path = ''
|
||||
versions = os.listdir(env.build_tools)
|
||||
for version in reversed(sorted(versions)):
|
||||
if not os.path.isfile(aapt2_path):
|
||||
aapt2_path = os.path.join(env.build_tools, version, 'aapt2')
|
||||
if not os.path.isfile(aapt_path):
|
||||
aapt_path = os.path.join(env.build_tools, version, 'aapt')
|
||||
aapt_version = 1
|
||||
# Use latest available version for aapt/appt2 but ensure at least one is valid.
|
||||
if os.path.isfile(aapt2_path) or os.path.isfile(aapt_path):
|
||||
break
|
||||
|
||||
# Use aapt2 only if present and we have a suitable version
|
||||
if aapt2_path and _check_supported_aapt2(aapt2_path):
|
||||
aapt_path = aapt2_path
|
||||
aapt_version = 2
|
||||
|
||||
# Use the aapt version discoverted from build tools.
|
||||
if aapt_path:
|
||||
logger.debug('Using {} for version {}'.format(aapt_path, version))
|
||||
env.aapt = aapt_path
|
||||
env.aapt_version = aapt_version
|
||||
return
|
||||
|
||||
# Try detecting aapt2 and aapt from PATH
|
||||
if not env.aapt:
|
||||
aapt2_path = which('aapt2')
|
||||
if _check_supported_aapt2(aapt2_path):
|
||||
env.aapt = aapt2_path
|
||||
env.aapt_version = 2
|
||||
else:
|
||||
env.aapt = which('aapt')
|
||||
env.aapt_version = 1
|
||||
|
||||
if not env.aapt:
|
||||
raise HostError('aapt/aapt2 not found. Please make sure it is avaliable in PATH'
|
||||
' or at least one Android platform is installed')
|
||||
|
||||
def _check_env():
|
||||
global android_home, platform_tools, adb, aapt # pylint: disable=W0603
|
||||
global android_home, platform_tools, adb, aapt, aapt_version # pylint: disable=W0603
|
||||
if not android_home:
|
||||
android_home = os.getenv('ANDROID_HOME')
|
||||
if android_home:
|
||||
@@ -552,6 +786,7 @@ def _check_env():
|
||||
platform_tools = _env.platform_tools
|
||||
adb = _env.adb
|
||||
aapt = _env.aapt
|
||||
aapt_version = _env.aapt_version
|
||||
|
||||
class LogcatMonitor(object):
|
||||
"""
|
||||
@@ -570,11 +805,14 @@ class LogcatMonitor(object):
|
||||
def logfile(self):
|
||||
return self._logfile
|
||||
|
||||
def __init__(self, target, regexps=None):
|
||||
def __init__(self, target, regexps=None, logcat_format=None):
|
||||
super(LogcatMonitor, self).__init__()
|
||||
|
||||
self.target = target
|
||||
self._regexps = regexps
|
||||
self._logcat_format = logcat_format
|
||||
self._logcat = None
|
||||
self._logfile = None
|
||||
|
||||
def start(self, outfile=None):
|
||||
"""
|
||||
@@ -586,7 +824,7 @@ class LogcatMonitor(object):
|
||||
if outfile:
|
||||
self._logfile = open(outfile, 'w')
|
||||
else:
|
||||
self._logfile = tempfile.NamedTemporaryFile()
|
||||
self._logfile = tempfile.NamedTemporaryFile(mode='w')
|
||||
|
||||
self.target.clear_logcat()
|
||||
|
||||
@@ -600,16 +838,20 @@ class LogcatMonitor(object):
|
||||
# Logcat on older version of android do not support the -e argument
|
||||
# so fall back to using grep.
|
||||
if self.target.get_sdk_version() > 23:
|
||||
logcat_cmd = '{} -e "{}"'.format(logcat_cmd, regexp)
|
||||
logcat_cmd = '{} -e {}'.format(logcat_cmd, quote(regexp))
|
||||
else:
|
||||
logcat_cmd = '{} | grep "{}"'.format(logcat_cmd, regexp)
|
||||
logcat_cmd = '{} | grep {}'.format(logcat_cmd, quote(regexp))
|
||||
|
||||
logcat_cmd = get_adb_command(self.target.conn.device, logcat_cmd)
|
||||
if self._logcat_format:
|
||||
logcat_cmd = "{} -v {}".format(logcat_cmd, quote(self._logcat_format))
|
||||
|
||||
logcat_cmd = get_adb_command(self.target.conn.device, logcat_cmd, self.target.adb_server)
|
||||
|
||||
logger.debug('logcat command ="{}"'.format(logcat_cmd))
|
||||
self._logcat = pexpect.spawn(logcat_cmd, logfile=self._logfile)
|
||||
self._logcat = pexpect.spawn(logcat_cmd, logfile=self._logfile, encoding='utf-8')
|
||||
|
||||
def stop(self):
|
||||
self.flush_log()
|
||||
self._logcat.terminate()
|
||||
self._logfile.close()
|
||||
|
||||
@@ -617,6 +859,12 @@ class LogcatMonitor(object):
|
||||
"""
|
||||
Return the list of lines found by the monitor
|
||||
"""
|
||||
self.flush_log()
|
||||
|
||||
with open(self._logfile.name) as fh:
|
||||
return [line for line in fh]
|
||||
|
||||
def flush_log(self):
|
||||
# Unless we tell pexect to 'expect' something, it won't read from
|
||||
# logcat's buffer or write into our logfile. We'll need to force it to
|
||||
# read any pending logcat output.
|
||||
@@ -647,11 +895,8 @@ class LogcatMonitor(object):
|
||||
# printed anything since pexpect last read from its buffer.
|
||||
break
|
||||
|
||||
with open(self._logfile.name) as fh:
|
||||
return [line for line in fh]
|
||||
|
||||
def clear_log(self):
|
||||
with open(self._logfile.name, 'w') as fh:
|
||||
with open(self._logfile.name, 'w') as _:
|
||||
pass
|
||||
|
||||
def search(self, regexp):
|
||||
@@ -679,7 +924,7 @@ class LogcatMonitor(object):
|
||||
res = [line for line in log if re.match(regexp, line)]
|
||||
|
||||
# Found some matches, return them
|
||||
if len(res) > 0:
|
||||
if res:
|
||||
return res
|
||||
|
||||
# Store the number of lines we've searched already, so we don't have to
|
||||
|
@@ -18,7 +18,7 @@ import logging
|
||||
from devlib.utils.types import numeric
|
||||
|
||||
|
||||
GEM5STATS_FIELD_REGEX = re.compile("^(?P<key>[^- ]\S*) +(?P<value>[^#]+).+$")
|
||||
GEM5STATS_FIELD_REGEX = re.compile(r"^(?P<key>[^- ]\S*) +(?P<value>[^#]+).+$")
|
||||
GEM5STATS_DUMP_HEAD = '---------- Begin Simulation Statistics ----------'
|
||||
GEM5STATS_DUMP_TAIL = '---------- End Simulation Statistics ----------'
|
||||
GEM5STATS_ROI_NUMBER = 8
|
||||
@@ -28,7 +28,7 @@ logger = logging.getLogger('gem5')
|
||||
|
||||
def iter_statistics_dump(stats_file):
|
||||
'''
|
||||
Yields statistics dumps as dicts. The parameter is assumed to be a stream
|
||||
Yields statistics dumps as dicts. The parameter is assumed to be a stream
|
||||
reading from the statistics log file.
|
||||
'''
|
||||
cur_dump = {}
|
||||
@@ -40,14 +40,13 @@ def iter_statistics_dump(stats_file):
|
||||
yield cur_dump
|
||||
cur_dump = {}
|
||||
else:
|
||||
res = GEM5STATS_FIELD_REGEX.match(line)
|
||||
res = GEM5STATS_FIELD_REGEX.match(line)
|
||||
if res:
|
||||
k = res.group("key")
|
||||
vtext = res.group("value")
|
||||
try:
|
||||
v = list(map(numeric, vtext.split()))
|
||||
cur_dump[k] = v[0] if len(v)==1 else set(v)
|
||||
cur_dump[k] = v[0] if len(v) == 1 else set(v)
|
||||
except ValueError:
|
||||
msg = 'Found non-numeric entry in gem5 stats ({}: {})'
|
||||
logger.warning(msg.format(k, vtext))
|
||||
|
||||
|
@@ -19,27 +19,43 @@ Miscellaneous functions that don't fit anywhere else.
|
||||
|
||||
"""
|
||||
from __future__ import division
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import string
|
||||
import threading
|
||||
import signal
|
||||
import subprocess
|
||||
import pkgutil
|
||||
import logging
|
||||
import random
|
||||
import ctypes
|
||||
import threading
|
||||
from operator import itemgetter
|
||||
from contextlib import contextmanager
|
||||
from functools import partial, reduce, wraps
|
||||
from itertools import groupby
|
||||
from functools import partial
|
||||
from operator import itemgetter
|
||||
from weakref import WeakKeyDictionary, WeakSet
|
||||
|
||||
import ctypes
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
import pkgutil
|
||||
import random
|
||||
import re
|
||||
import signal
|
||||
import string
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import types
|
||||
import wrapt
|
||||
import warnings
|
||||
|
||||
|
||||
try:
|
||||
from contextlib import ExitStack
|
||||
except AttributeError:
|
||||
from contextlib2 import ExitStack
|
||||
|
||||
try:
|
||||
from shlex import quote
|
||||
except ImportError:
|
||||
from pipes import quote
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
# pylint: disable=redefined-builtin
|
||||
from devlib.exception import HostError, TimeoutError
|
||||
from functools import reduce
|
||||
|
||||
|
||||
# ABI --> architectures list
|
||||
@@ -127,9 +143,6 @@ def get_cpu_name(implementer, part, variant):
|
||||
|
||||
|
||||
def preexec_function():
|
||||
# Ignore the SIGINT signal by setting the handler to the standard
|
||||
# signal handler SIG_IGN.
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
# Change process group in case we have to kill the subprocess and all of
|
||||
# its children later.
|
||||
# TODO: this is Unix-specific; would be good to find an OS-agnostic way
|
||||
@@ -140,13 +153,25 @@ def preexec_function():
|
||||
check_output_logger = logging.getLogger('check_output')
|
||||
# Popen is not thread safe. If two threads attempt to call it at the same time,
|
||||
# one may lock up. See https://bugs.python.org/issue12739.
|
||||
check_output_lock = threading.Lock()
|
||||
check_output_lock = threading.RLock()
|
||||
|
||||
|
||||
def check_output(command, timeout=None, ignore=None, inputtext=None,
|
||||
combined_output=False, **kwargs):
|
||||
"""This is a version of subprocess.check_output that adds a timeout parameter to kill
|
||||
the subprocess if it does not return within the specified time."""
|
||||
def get_subprocess(command, **kwargs):
|
||||
if 'stdout' in kwargs:
|
||||
raise ValueError('stdout argument not allowed, it will be overridden.')
|
||||
with check_output_lock:
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
preexec_fn=preexec_function,
|
||||
**kwargs)
|
||||
return process
|
||||
|
||||
|
||||
def check_subprocess_output(process, timeout=None, ignore=None, inputtext=None):
|
||||
output = None
|
||||
error = None
|
||||
# pylint: disable=too-many-branches
|
||||
if ignore is None:
|
||||
ignore = []
|
||||
@@ -155,49 +180,35 @@ def check_output(command, timeout=None, ignore=None, inputtext=None,
|
||||
elif not isinstance(ignore, list) and ignore != 'all':
|
||||
message = 'Invalid value for ignore parameter: "{}"; must be an int or a list'
|
||||
raise ValueError(message.format(ignore))
|
||||
if 'stdout' in kwargs:
|
||||
raise ValueError('stdout argument not allowed, it will be overridden.')
|
||||
|
||||
def callback(pid):
|
||||
try:
|
||||
check_output_logger.debug('{} timed out; sending SIGKILL'.format(pid))
|
||||
os.killpg(pid, signal.SIGKILL)
|
||||
except OSError:
|
||||
pass # process may have already terminated.
|
||||
|
||||
with check_output_lock:
|
||||
stderr = subprocess.STDOUT if combined_output else subprocess.PIPE
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=stderr,
|
||||
stdin=subprocess.PIPE,
|
||||
preexec_fn=preexec_function,
|
||||
**kwargs)
|
||||
|
||||
if timeout:
|
||||
timer = threading.Timer(timeout, callback, [process.pid, ])
|
||||
timer.start()
|
||||
|
||||
try:
|
||||
output, error = process.communicate(inputtext)
|
||||
if sys.version_info[0] == 3:
|
||||
# Currently errors=replace is needed as 0x8c throws an error
|
||||
output = output.decode(sys.stdout.encoding, "replace")
|
||||
if error:
|
||||
error = error.decode(sys.stderr.encoding, "replace")
|
||||
finally:
|
||||
if timeout:
|
||||
timer.cancel()
|
||||
output, error = process.communicate(inputtext, timeout=timeout)
|
||||
except subprocess.TimeoutExpired as e:
|
||||
timeout_expired = e
|
||||
else:
|
||||
timeout_expired = None
|
||||
|
||||
# Currently errors=replace is needed as 0x8c throws an error
|
||||
output = output.decode(sys.stdout.encoding or 'utf-8', "replace") if output else ''
|
||||
error = error.decode(sys.stderr.encoding or 'utf-8', "replace") if error else ''
|
||||
|
||||
if timeout_expired:
|
||||
raise TimeoutError(process.args, output='\n'.join([output, error]))
|
||||
|
||||
retcode = process.poll()
|
||||
if retcode:
|
||||
if retcode == -9: # killed, assume due to timeout callback
|
||||
raise TimeoutError(command, output='\n'.join([output or '', error or '']))
|
||||
elif ignore != 'all' and retcode not in ignore:
|
||||
raise subprocess.CalledProcessError(retcode, command, output='\n'.join([output or '', error or '']))
|
||||
if retcode and ignore != 'all' and retcode not in ignore:
|
||||
raise subprocess.CalledProcessError(retcode, process.args, output='\n'.join([output, error]))
|
||||
|
||||
return output, error
|
||||
|
||||
|
||||
def check_output(command, timeout=None, ignore=None, inputtext=None, **kwargs):
|
||||
"""This is a version of subprocess.check_output that adds a timeout parameter to kill
|
||||
the subprocess if it does not return within the specified time."""
|
||||
process = get_subprocess(command, **kwargs)
|
||||
return check_subprocess_output(process, timeout=timeout, ignore=ignore, inputtext=inputtext)
|
||||
|
||||
|
||||
def walk_modules(path):
|
||||
"""
|
||||
Given package name, return a list of all modules (including submodules, etc)
|
||||
@@ -235,6 +246,32 @@ def walk_modules(path):
|
||||
mods.append(submod)
|
||||
return mods
|
||||
|
||||
def redirect_streams(stdout, stderr, command):
|
||||
"""
|
||||
Update a command to redirect a given stream to /dev/null if it's
|
||||
``subprocess.DEVNULL``.
|
||||
|
||||
:return: A tuple (stdout, stderr, command) with stream set to ``subprocess.PIPE``
|
||||
if the `stream` parameter was set to ``subprocess.DEVNULL``.
|
||||
"""
|
||||
def redirect(stream, redirection):
|
||||
if stream == subprocess.DEVNULL:
|
||||
suffix = '{}/dev/null'.format(redirection)
|
||||
elif stream == subprocess.STDOUT:
|
||||
suffix = '{}&1'.format(redirection)
|
||||
# Indicate that there is nothing to monitor for stderr anymore
|
||||
# since it's merged into stdout
|
||||
stream = subprocess.DEVNULL
|
||||
else:
|
||||
suffix = ''
|
||||
|
||||
return (stream, suffix)
|
||||
|
||||
stdout, suffix1 = redirect(stdout, '>')
|
||||
stderr, suffix2 = redirect(stderr, '2>')
|
||||
|
||||
command = 'sh -c {} {} {}'.format(quote(command), suffix1, suffix2)
|
||||
return (stdout, stderr, command)
|
||||
|
||||
def ensure_directory_exists(dirpath):
|
||||
"""A filter for directory paths to ensure they exist."""
|
||||
@@ -415,25 +452,51 @@ def convert_new_lines(text):
|
||||
""" Convert new lines to a common format. """
|
||||
return text.replace('\r\n', '\n').replace('\r', '\n')
|
||||
|
||||
def sanitize_cmd_template(cmd):
|
||||
msg = (
|
||||
'''Quoted placeholder should not be used, as it will result in quoting the text twice. {} should be used instead of '{}' or "{}" in the template: '''
|
||||
)
|
||||
for unwanted in ('"{}"', "'{}'"):
|
||||
if unwanted in cmd:
|
||||
warnings.warn(msg + cmd, stacklevel=2)
|
||||
cmd = cmd.replace(unwanted, '{}')
|
||||
|
||||
return cmd
|
||||
|
||||
def escape_quotes(text):
|
||||
"""Escape quotes, and escaped quotes, in the specified text."""
|
||||
"""
|
||||
Escape quotes, and escaped quotes, in the specified text.
|
||||
|
||||
.. note:: :func:`pipes.quote` should be favored where possible.
|
||||
"""
|
||||
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\\\'').replace('\"', '\\\"')
|
||||
|
||||
|
||||
def escape_single_quotes(text):
|
||||
"""Escape single quotes, and escaped single quotes, in the specified text."""
|
||||
"""
|
||||
Escape single quotes, and escaped single quotes, in the specified text.
|
||||
|
||||
.. note:: :func:`pipes.quote` should be favored where possible.
|
||||
"""
|
||||
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\'\\\'\'')
|
||||
|
||||
|
||||
def escape_double_quotes(text):
|
||||
"""Escape double quotes, and escaped double quotes, in the specified text."""
|
||||
"""
|
||||
Escape double quotes, and escaped double quotes, in the specified text.
|
||||
|
||||
.. note:: :func:`pipes.quote` should be favored where possible.
|
||||
"""
|
||||
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\"', '\\\"')
|
||||
|
||||
|
||||
def escape_spaces(text):
|
||||
"""Escape spaces in the specified text"""
|
||||
return text.replace(' ', '\ ')
|
||||
"""
|
||||
Escape spaces in the specified text
|
||||
|
||||
.. note:: :func:`pipes.quote` should be favored where possible.
|
||||
"""
|
||||
return text.replace(' ', '\\ ')
|
||||
|
||||
|
||||
def getch(count=1):
|
||||
@@ -523,6 +586,12 @@ def get_random_string(length):
|
||||
|
||||
class LoadSyntaxError(Exception):
|
||||
|
||||
@property
|
||||
def message(self):
|
||||
if self.args:
|
||||
return self.args[0]
|
||||
return str(self)
|
||||
|
||||
def __init__(self, message, filepath, lineno):
|
||||
super(LoadSyntaxError, self).__init__(message)
|
||||
self.filepath = filepath
|
||||
@@ -535,6 +604,7 @@ class LoadSyntaxError(Exception):
|
||||
|
||||
RAND_MOD_NAME_LEN = 30
|
||||
BAD_CHARS = string.punctuation + string.whitespace
|
||||
# pylint: disable=no-member
|
||||
if sys.version_info[0] == 3:
|
||||
TRANS_TABLE = str.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
|
||||
else:
|
||||
@@ -639,13 +709,21 @@ def __get_memo_id(obj):
|
||||
|
||||
|
||||
@wrapt.decorator
|
||||
def memoized(wrapped, instance, args, kwargs):
|
||||
"""A decorator for memoizing functions and methods."""
|
||||
def memoized(wrapped, instance, args, kwargs): # pylint: disable=unused-argument
|
||||
"""
|
||||
A decorator for memoizing functions and methods.
|
||||
|
||||
.. warning:: this may not detect changes to mutable types. As long as the
|
||||
memoized function was used with an object as an argument
|
||||
before, the cached result will be returned, even if the
|
||||
structure of the object (e.g. a list) has changed in the mean time.
|
||||
|
||||
"""
|
||||
func_id = repr(wrapped)
|
||||
|
||||
def memoize_wrapper(*args, **kwargs):
|
||||
id_string = func_id + ','.join([__get_memo_id(a) for a in args])
|
||||
id_string += ','.join('{}={}'.format(k, v)
|
||||
id_string += ','.join('{}={}'.format(k, __get_memo_id(v))
|
||||
for k, v in kwargs.items())
|
||||
if id_string not in __memo_cache:
|
||||
__memo_cache[id_string] = wrapped(*args, **kwargs)
|
||||
@@ -653,3 +731,243 @@ def memoized(wrapped, instance, args, kwargs):
|
||||
|
||||
return memoize_wrapper(*args, **kwargs)
|
||||
|
||||
@contextmanager
|
||||
def batch_contextmanager(f, kwargs_list):
|
||||
"""
|
||||
Return a context manager that will call the ``f`` callable with the keyword
|
||||
arguments dict in the given list, in one go.
|
||||
|
||||
:param f: Callable expected to return a context manager.
|
||||
|
||||
:param kwargs_list: list of kwargs dictionaries to be used to call ``f``.
|
||||
:type kwargs_list: list(dict)
|
||||
"""
|
||||
with ExitStack() as stack:
|
||||
for kwargs in kwargs_list:
|
||||
stack.enter_context(f(**kwargs))
|
||||
yield
|
||||
|
||||
|
||||
@contextmanager
|
||||
def nullcontext(enter_result=None):
|
||||
"""
|
||||
Backport of Python 3.7 ``contextlib.nullcontext``
|
||||
|
||||
This context manager does nothing, so it can be used as a default
|
||||
placeholder for code that needs to select at runtime what context manager
|
||||
to use.
|
||||
|
||||
:param enter_result: Object that will be bound to the target of the with
|
||||
statement, or `None` if nothing is specified.
|
||||
:type enter_result: object
|
||||
"""
|
||||
yield enter_result
|
||||
|
||||
|
||||
class tls_property:
|
||||
"""
|
||||
Use it like `property` decorator, but the result will be memoized per
|
||||
thread. When the owning thread dies, the values for that thread will be
|
||||
destroyed.
|
||||
|
||||
In order to get the values, it's necessary to call the object
|
||||
given by the property. This is necessary in order to be able to add methods
|
||||
to that object, like :meth:`_BoundTLSProperty.get_all_values`.
|
||||
|
||||
Values can be set and deleted as well, which will be a thread-local set.
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.factory.__name__
|
||||
|
||||
def __init__(self, factory):
|
||||
self.factory = factory
|
||||
# Lock accesses to shared WeakKeyDictionary and WeakSet
|
||||
self.lock = threading.RLock()
|
||||
|
||||
def __get__(self, instance, owner=None):
|
||||
return _BoundTLSProperty(self, instance, owner)
|
||||
|
||||
def _get_value(self, instance, owner):
|
||||
tls, values = self._get_tls(instance)
|
||||
try:
|
||||
return tls.value
|
||||
except AttributeError:
|
||||
# Bind the method to `instance`
|
||||
f = self.factory.__get__(instance, owner)
|
||||
obj = f()
|
||||
tls.value = obj
|
||||
# Since that's a WeakSet, values will be removed automatically once
|
||||
# the threading.local variable that holds them is destroyed
|
||||
with self.lock:
|
||||
values.add(obj)
|
||||
return obj
|
||||
|
||||
def _get_all_values(self, instance, owner):
|
||||
with self.lock:
|
||||
# Grab a reference to all the objects at the time of the call by
|
||||
# using a regular set
|
||||
tls, values = self._get_tls(instance=instance)
|
||||
return set(values)
|
||||
|
||||
def __set__(self, instance, value):
|
||||
tls, values = self._get_tls(instance)
|
||||
tls.value = value
|
||||
with self.lock:
|
||||
values.add(value)
|
||||
|
||||
def __delete__(self, instance):
|
||||
tls, values = self._get_tls(instance)
|
||||
with self.lock:
|
||||
values.discard(tls.value)
|
||||
del tls.value
|
||||
|
||||
def _get_tls(self, instance):
|
||||
dct = instance.__dict__
|
||||
name = self.name
|
||||
try:
|
||||
# Using instance.__dict__[self.name] is safe as
|
||||
# getattr(instance, name) will return the property instead, as
|
||||
# the property is a descriptor
|
||||
tls = dct[name]
|
||||
except KeyError:
|
||||
with self.lock:
|
||||
# Double check after taking the lock to avoid a race
|
||||
if name not in dct:
|
||||
tls = (threading.local(), WeakSet())
|
||||
dct[name] = tls
|
||||
|
||||
return tls
|
||||
|
||||
@property
|
||||
def basic_property(self):
|
||||
"""
|
||||
Return a basic property that can be used to access the TLS value
|
||||
without having to call it first.
|
||||
|
||||
The drawback is that it's not possible to do anything over than
|
||||
getting/setting/deleting.
|
||||
"""
|
||||
def getter(instance, owner=None):
|
||||
prop = self.__get__(instance, owner)
|
||||
return prop()
|
||||
|
||||
return property(getter, self.__set__, self.__delete__)
|
||||
|
||||
class _BoundTLSProperty:
|
||||
"""
|
||||
Simple proxy object to allow either calling it to get the TLS value, or get
|
||||
some other informations by calling methods.
|
||||
"""
|
||||
def __init__(self, tls_property, instance, owner):
|
||||
self.tls_property = tls_property
|
||||
self.instance = instance
|
||||
self.owner = owner
|
||||
|
||||
def __call__(self):
|
||||
return self.tls_property._get_value(
|
||||
instance=self.instance,
|
||||
owner=self.owner,
|
||||
)
|
||||
|
||||
def get_all_values(self):
|
||||
"""
|
||||
Returns all the thread-local values currently in use in the process for
|
||||
that property for that instance.
|
||||
"""
|
||||
return self.tls_property._get_all_values(
|
||||
instance=self.instance,
|
||||
owner=self.owner,
|
||||
)
|
||||
|
||||
|
||||
class InitCheckpointMeta(type):
|
||||
"""
|
||||
Metaclass providing an ``initialized`` and ``is_in_use`` boolean attributes
|
||||
on instances.
|
||||
|
||||
``initialized`` is set to ``True`` once the ``__init__`` constructor has
|
||||
returned. It will deal cleanly with nested calls to ``super().__init__``.
|
||||
|
||||
``is_in_use`` is set to ``True`` when an instance method is being called.
|
||||
This allows to detect reentrance.
|
||||
"""
|
||||
def __new__(metacls, name, bases, dct, **kwargs):
|
||||
cls = super().__new__(metacls, name, bases, dct, **kwargs)
|
||||
init_f = cls.__init__
|
||||
|
||||
@wraps(init_f)
|
||||
def init_wrapper(self, *args, **kwargs):
|
||||
self.initialized = False
|
||||
self.is_in_use = False
|
||||
|
||||
# Track the nesting of super()__init__ to set initialized=True only
|
||||
# when the outer level is finished
|
||||
try:
|
||||
stack = self._init_stack
|
||||
except AttributeError:
|
||||
stack = []
|
||||
self._init_stack = stack
|
||||
|
||||
stack.append(init_f)
|
||||
try:
|
||||
x = init_f(self, *args, **kwargs)
|
||||
finally:
|
||||
stack.pop()
|
||||
|
||||
if not stack:
|
||||
self.initialized = True
|
||||
del self._init_stack
|
||||
|
||||
return x
|
||||
|
||||
cls.__init__ = init_wrapper
|
||||
|
||||
# Set the is_in_use attribute to allow external code to detect if the
|
||||
# methods are about to be re-entered.
|
||||
def make_wrapper(f):
|
||||
if f is None:
|
||||
return None
|
||||
|
||||
@wraps(f)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
f_ = f.__get__(self, self.__class__)
|
||||
initial_state = self.is_in_use
|
||||
try:
|
||||
self.is_in_use = True
|
||||
return f_(*args, **kwargs)
|
||||
finally:
|
||||
self.is_in_use = initial_state
|
||||
|
||||
return wrapper
|
||||
|
||||
# This will not decorate methods defined in base classes, but we cannot
|
||||
# use inspect.getmembers() as it uses __get__ to bind the attributes to
|
||||
# the class, making staticmethod indistinguishible from instance
|
||||
# methods.
|
||||
for name, attr in cls.__dict__.items():
|
||||
# Only wrap the methods (exposed as functions), not things like
|
||||
# classmethod or staticmethod
|
||||
if (
|
||||
name not in ('__init__', '__new__') and
|
||||
isinstance(attr, types.FunctionType)
|
||||
):
|
||||
setattr(cls, name, make_wrapper(attr))
|
||||
elif isinstance(attr, property):
|
||||
prop = property(
|
||||
fget=make_wrapper(attr.fget),
|
||||
fset=make_wrapper(attr.fset),
|
||||
fdel=make_wrapper(attr.fdel),
|
||||
doc=attr.__doc__,
|
||||
)
|
||||
setattr(cls, name, prop)
|
||||
|
||||
return cls
|
||||
|
||||
|
||||
class InitCheckpoint(metaclass=InitCheckpointMeta):
|
||||
"""
|
||||
Inherit from this class to set the :class:`InitCheckpointMeta` metaclass.
|
||||
"""
|
||||
pass
|
||||
|
@@ -28,18 +28,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import getopt
|
||||
import subprocess
|
||||
import logging
|
||||
import signal
|
||||
import serial
|
||||
import time
|
||||
import math
|
||||
import sys
|
||||
|
||||
logger = logging.getLogger('aep-parser')
|
||||
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
class AepParser(object):
|
||||
prepared = False
|
||||
|
||||
@@ -94,7 +90,7 @@ class AepParser(object):
|
||||
continue
|
||||
|
||||
if parent not in virtual:
|
||||
virtual[parent] = { supply : index }
|
||||
virtual[parent] = {supply : index}
|
||||
|
||||
virtual[parent][supply] = index
|
||||
|
||||
@@ -102,7 +98,7 @@ class AepParser(object):
|
||||
# child
|
||||
for supply in list(virtual.keys()):
|
||||
if len(virtual[supply]) == 1:
|
||||
del virtual[supply];
|
||||
del virtual[supply]
|
||||
|
||||
for supply in list(virtual.keys()):
|
||||
# Add label, hide and duplicate columns for virtual domains
|
||||
@@ -121,7 +117,7 @@ class AepParser(object):
|
||||
|
||||
label[0] = array[0]
|
||||
unit[0] = "(S)"
|
||||
for i in range(1,len(array)):
|
||||
for i in range(1, len(array)):
|
||||
label[i] = array[i][:-3]
|
||||
unit[i] = array[i][-3:]
|
||||
|
||||
@@ -138,7 +134,7 @@ class AepParser(object):
|
||||
# By default we assume that there is no child
|
||||
duplicate = [0] * len(label)
|
||||
|
||||
for i in range(len(label)):
|
||||
for i in range(len(label)): # pylint: disable=consider-using-enumerate
|
||||
# We only care about time and Watt
|
||||
if label[i] == 'time':
|
||||
hide[i] = 0
|
||||
@@ -167,7 +163,7 @@ class AepParser(object):
|
||||
@staticmethod
|
||||
def parse_text(array, hide):
|
||||
data = [0]*len(array)
|
||||
for i in range(len(array)):
|
||||
for i in range(len(array)): # pylint: disable=consider-using-enumerate
|
||||
if hide[i]:
|
||||
continue
|
||||
|
||||
@@ -193,18 +189,18 @@ class AepParser(object):
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def delta_nrj(array, delta, min, max, hide):
|
||||
def delta_nrj(array, delta, minimu, maximum, hide):
|
||||
# Compute the energy consumed in this time slice and add it
|
||||
# delta[0] is used to save the last time stamp
|
||||
|
||||
if (delta[0] < 0):
|
||||
if delta[0] < 0:
|
||||
delta[0] = array[0]
|
||||
|
||||
time = array[0] - delta[0]
|
||||
if (time <= 0):
|
||||
if time <= 0:
|
||||
return delta
|
||||
|
||||
for i in range(len(array)):
|
||||
for i in range(len(array)): # pylint: disable=consider-using-enumerate
|
||||
if hide[i]:
|
||||
continue
|
||||
|
||||
@@ -213,10 +209,10 @@ class AepParser(object):
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if (data < min[i]):
|
||||
min[i] = data
|
||||
if (data > max[i]):
|
||||
max[i] = data
|
||||
if data < minimu[i]:
|
||||
minimu[i] = data
|
||||
if data > maximum[i]:
|
||||
maximum[i] = data
|
||||
delta[i] += time * data
|
||||
|
||||
# save last time stamp
|
||||
@@ -225,11 +221,11 @@ class AepParser(object):
|
||||
return delta
|
||||
|
||||
def output_label(self, label, hide):
|
||||
self.fo.write(label[0]+"(uS)")
|
||||
self.fo.write(label[0] + "(uS)")
|
||||
for i in range(1, len(label)):
|
||||
if hide[i]:
|
||||
continue
|
||||
self.fo.write(" "+label[i]+"(uW)")
|
||||
self.fo.write(" " + label[i] + "(uW)")
|
||||
|
||||
self.fo.write("\n")
|
||||
|
||||
@@ -248,34 +244,34 @@ class AepParser(object):
|
||||
|
||||
self.fo.write("\n")
|
||||
|
||||
def prepare(self, infile, outfile, summaryfile):
|
||||
|
||||
# pylint: disable-redefined-outer-name,
|
||||
def prepare(self, input_file, outfile, summaryfile):
|
||||
try:
|
||||
self.fi = open(infile, "r")
|
||||
self.fi = open(input_file, "r")
|
||||
except IOError:
|
||||
logger.warn('Unable to open input file {}'.format(infile))
|
||||
logger.warn('Usage: parse_arp.py -i <inputfile> [-o <outputfile>]')
|
||||
logger.warning('Unable to open input file {}'.format(input_file))
|
||||
logger.warning('Usage: parse_arp.py -i <inputfile> [-o <outputfile>]')
|
||||
sys.exit(2)
|
||||
|
||||
self.parse = True
|
||||
if len(outfile) > 0:
|
||||
if outfile:
|
||||
try:
|
||||
self.fo = open(outfile, "w")
|
||||
except IOError:
|
||||
logger.warn('Unable to create {}'.format(outfile))
|
||||
logger.warning('Unable to create {}'.format(outfile))
|
||||
self.parse = False
|
||||
else:
|
||||
self.parse = False
|
||||
self.parse = False
|
||||
|
||||
self.summary = True
|
||||
if len(summaryfile) > 0:
|
||||
if summaryfile:
|
||||
try:
|
||||
self.fs = open(summaryfile, "w")
|
||||
except IOError:
|
||||
logger.warn('Unable to create {}'.format(summaryfile))
|
||||
logger.warning('Unable to create {}'.format(summaryfile))
|
||||
self.fs = sys.stdout
|
||||
else:
|
||||
self.fs = sys.stdout
|
||||
self.fs = sys.stdout
|
||||
|
||||
self.prepared = True
|
||||
|
||||
@@ -291,7 +287,8 @@ class AepParser(object):
|
||||
|
||||
self.prepared = False
|
||||
|
||||
def parse_aep(self, start=0, lenght=-1):
|
||||
# pylint: disable=too-many-branches,too-many-statements,redefined-outer-name,too-many-locals
|
||||
def parse_aep(self, start=0, length=-1):
|
||||
# Parse aep data and calculate the energy consumed
|
||||
begin = 0
|
||||
|
||||
@@ -302,7 +299,7 @@ class AepParser(object):
|
||||
lines = self.fi.readlines()
|
||||
|
||||
for myline in lines:
|
||||
array = myline.split()
|
||||
array = myline.split()
|
||||
|
||||
if "#" in myline:
|
||||
# update power topology
|
||||
@@ -331,8 +328,8 @@ class AepParser(object):
|
||||
|
||||
# Init arrays
|
||||
nrj = [0]*len(label)
|
||||
min = [100000000]*len(label)
|
||||
max = [0]*len(label)
|
||||
minimum = [100000000]*len(label)
|
||||
maximum = [0]*len(label)
|
||||
offset = [0]*len(label)
|
||||
|
||||
continue
|
||||
@@ -342,21 +339,21 @@ class AepParser(object):
|
||||
|
||||
# get 1st time stamp
|
||||
if begin <= 0:
|
||||
being = data[0]
|
||||
begin = data[0]
|
||||
|
||||
# skip data before start
|
||||
if (data[0]-begin) < start:
|
||||
continue
|
||||
|
||||
# stop after lenght
|
||||
if lenght >= 0 and (data[0]-begin) > (start + lenght):
|
||||
# stop after length
|
||||
if length >= 0 and (data[0]-begin) > (start + length):
|
||||
continue
|
||||
|
||||
# add virtual domains
|
||||
data = self.add_virtual_data(data, virtual)
|
||||
|
||||
# extract power figures
|
||||
self.delta_nrj(data, nrj, min, max, hide)
|
||||
self.delta_nrj(data, nrj, minimum, maximum, hide)
|
||||
|
||||
# write data into new file
|
||||
if self.parse:
|
||||
@@ -365,7 +362,6 @@ class AepParser(object):
|
||||
# if there is no data just return
|
||||
if label_line or len(nrj) == 1:
|
||||
raise ValueError('No data found in the data file. Please check the Arm Energy Probe')
|
||||
return
|
||||
|
||||
# display energy consumption of each channel and total energy consumption
|
||||
total = 0
|
||||
@@ -377,27 +373,33 @@ class AepParser(object):
|
||||
nrj[i] -= offset[i] * nrj[0]
|
||||
|
||||
total_nrj = nrj[i]/1000000000000.0
|
||||
duration = (max[0]-min[0])/1000000.0
|
||||
duration = (maximum[0]-minimum[0])/1000000.0
|
||||
channel_name = label[i]
|
||||
average_power = total_nrj/duration
|
||||
|
||||
self.fs.write("Total nrj: %8.3f J for %s -- duration %8.3f sec -- min %8.3f W -- max %8.3f W\n" % (nrj[i]/1000000000000.0, label[i], (max[0]-min[0])/1000000.0, min[i]/1000000.0, max[i]/1000000.0))
|
||||
total = nrj[i]/1000000000000.0
|
||||
duration = (maximum[0]-minimum[0])/1000000.0
|
||||
min_power = minimum[i]/1000000.0
|
||||
max_power = maximum[i]/1000000.0
|
||||
output = "Total nrj: %8.3f J for %s -- duration %8.3f sec -- min %8.3f W -- max %8.3f W\n"
|
||||
self.fs.write(output.format(total, label[i], duration, min_power, max_power))
|
||||
|
||||
# store each AEP channel info except Platform in the results table
|
||||
results_table[channel_name] = total_nrj, average_power
|
||||
|
||||
if (min[i] < offset[i]):
|
||||
self.fs.write ("!!! Min below offset\n")
|
||||
if minimum[i] < offset[i]:
|
||||
self.fs.write("!!! Min below offset\n")
|
||||
|
||||
if duplicate[i]:
|
||||
continue
|
||||
|
||||
total += nrj[i]
|
||||
|
||||
self.fs.write ("Total nrj: %8.3f J for %s -- duration %8.3f sec\n" % (total/1000000000000.0, "Platform ", (max[0]-min[0])/1000000.0))
|
||||
output = "Total nrj: %8.3f J for Platform -- duration %8.3f sec\n"
|
||||
self.fs.write(output.format(total/1000000000000.0, (maximum[0]-minimum[0])/1000000.0))
|
||||
|
||||
total_nrj = total/1000000000000.0
|
||||
duration = (max[0]-min[0])/1000000.0
|
||||
duration = (maximum[0]-minimum[0])/1000000.0
|
||||
average_power = total_nrj/duration
|
||||
|
||||
# store AEP Platform channel info in the results table
|
||||
@@ -405,11 +407,12 @@ class AepParser(object):
|
||||
|
||||
return results_table
|
||||
|
||||
# pylint: disable=too-many-branches,no-self-use,too-many-locals
|
||||
def topology_from_config(self, topofile):
|
||||
try:
|
||||
ft = open(topofile, "r")
|
||||
except IOError:
|
||||
logger.warn('Unable to open config file {}'.format(topofile))
|
||||
logger.warning('Unable to open config file {}'.format(topofile))
|
||||
return
|
||||
lines = ft.readlines()
|
||||
|
||||
@@ -451,10 +454,11 @@ class AepParser(object):
|
||||
topo[items[0]] = info
|
||||
|
||||
# Increase index
|
||||
index +=1
|
||||
index += 1
|
||||
|
||||
|
||||
# Create an entry for each virtual parent
|
||||
# pylint: disable=consider-iterating-dictionary
|
||||
for supply in topo.keys():
|
||||
# Parent is in the topology
|
||||
parent = topo[supply]['parent']
|
||||
@@ -462,23 +466,25 @@ class AepParser(object):
|
||||
continue
|
||||
|
||||
if parent not in virtual:
|
||||
virtual[parent] = { supply : topo[supply]['index'] }
|
||||
virtual[parent] = {supply : topo[supply]['index']}
|
||||
|
||||
virtual[parent][supply] = topo[supply]['index']
|
||||
|
||||
|
||||
# Remove parent with 1 child as they don't give more information than their
|
||||
# child
|
||||
# pylint: disable=consider-iterating-dictionary
|
||||
for supply in list(virtual.keys()):
|
||||
if len(virtual[supply]) == 1:
|
||||
del virtual[supply];
|
||||
del virtual[supply]
|
||||
|
||||
topo_list = ['']*(1+len(topo)+len(virtual))
|
||||
topo_list[0] = 'time'
|
||||
# pylint: disable=consider-iterating-dictionary
|
||||
for chnl in topo.keys():
|
||||
topo_list[topo[chnl]['index']] = chnl
|
||||
for chnl in virtual.keys():
|
||||
index +=1
|
||||
index += 1
|
||||
topo_list[index] = chnl
|
||||
|
||||
ft.close()
|
||||
@@ -490,6 +496,7 @@ class AepParser(object):
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def handleSigTERM(signum, frame):
|
||||
sys.exit(2)
|
||||
|
||||
@@ -501,11 +508,11 @@ if __name__ == '__main__':
|
||||
ch.setLevel(logging.DEBUG)
|
||||
logger.addHandler(ch)
|
||||
|
||||
infile = ""
|
||||
outfile = ""
|
||||
in_file = ""
|
||||
out_file = ""
|
||||
figurefile = ""
|
||||
start = 0
|
||||
lenght = -1
|
||||
length = -1
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "i:vo:s:l:t:")
|
||||
@@ -515,22 +522,22 @@ if __name__ == '__main__':
|
||||
|
||||
for o, a in opts:
|
||||
if o == "-i":
|
||||
infile = a
|
||||
in_file = a
|
||||
if o == "-v":
|
||||
logger.setLevel(logging.DEBUG)
|
||||
if o == "-o":
|
||||
parse = True
|
||||
outfile = a
|
||||
out_file = a
|
||||
if o == "-s":
|
||||
start = int(float(a)*1000000)
|
||||
if o == "-l":
|
||||
lenght = int(float(a)*1000000)
|
||||
length = int(float(a)*1000000)
|
||||
if o == "-t":
|
||||
topofile = a
|
||||
topfile = a
|
||||
parser = AepParser()
|
||||
print(parser.topology_from_config(topofile))
|
||||
print(parser.topology_from_config(topfile))
|
||||
exit(0)
|
||||
|
||||
parser = AepParser()
|
||||
parser.prepare(infile, outfile, figurefile)
|
||||
parser.parse_aep(start, lenght)
|
||||
parser.prepare(in_file, out_file, figurefile)
|
||||
parser.parse_aep(start, length)
|
||||
|
@@ -15,15 +15,15 @@
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
from collections import namedtuple, OrderedDict
|
||||
from distutils.version import LooseVersion
|
||||
from collections import namedtuple
|
||||
from pipes import quote
|
||||
|
||||
# pylint: disable=redefined-builtin
|
||||
from devlib.exception import WorkerThreadError, TargetNotRespondingError, TimeoutError
|
||||
from devlib.utils.csvutil import csvwriter
|
||||
|
||||
@@ -49,12 +49,12 @@ class FrameCollector(threading.Thread):
|
||||
self.refresh_period = None
|
||||
self.drop_threshold = None
|
||||
self.unresponsive_count = 0
|
||||
self.last_ready_time = None
|
||||
self.last_ready_time = 0
|
||||
self.exc = None
|
||||
self.header = None
|
||||
|
||||
def run(self):
|
||||
logger.debug('Surface flinger frame data collection started.')
|
||||
logger.debug('Frame data collection started.')
|
||||
try:
|
||||
self.stop_signal.clear()
|
||||
fd, self.temp_file = tempfile.mkstemp()
|
||||
@@ -71,7 +71,7 @@ class FrameCollector(threading.Thread):
|
||||
except Exception as e: # pylint: disable=W0703
|
||||
logger.warning('Exception on collector thread: {}({})'.format(e.__class__.__name__, e))
|
||||
self.exc = WorkerThreadError(self.name, sys.exc_info())
|
||||
logger.debug('Surface flinger frame data collection stopped.')
|
||||
logger.debug('Frame data collection stopped.')
|
||||
|
||||
def stop(self):
|
||||
self.stop_signal.set()
|
||||
@@ -133,46 +133,58 @@ class SurfaceFlingerFrameCollector(FrameCollector):
|
||||
def collect_frames(self, wfh):
|
||||
for activity in self.list():
|
||||
if activity == self.view:
|
||||
wfh.write(self.get_latencies(activity))
|
||||
wfh.write(self.get_latencies(activity).encode('utf-8'))
|
||||
|
||||
def clear(self):
|
||||
self.target.execute('dumpsys SurfaceFlinger --latency-clear ')
|
||||
|
||||
def get_latencies(self, activity):
|
||||
cmd = 'dumpsys SurfaceFlinger --latency "{}"'
|
||||
return self.target.execute(cmd.format(activity))
|
||||
cmd = 'dumpsys SurfaceFlinger --latency {}'
|
||||
return self.target.execute(cmd.format(quote(activity)))
|
||||
|
||||
def list(self):
|
||||
text = self.target.execute('dumpsys SurfaceFlinger --list')
|
||||
return text.replace('\r\n', '\n').replace('\r', '\n').split('\n')
|
||||
|
||||
def _process_raw_file(self, fh):
|
||||
found = False
|
||||
text = fh.read().replace('\r\n', '\n').replace('\r', '\n')
|
||||
for line in text.split('\n'):
|
||||
line = line.strip()
|
||||
if line:
|
||||
self._process_trace_line(line)
|
||||
if not line:
|
||||
continue
|
||||
if 'SurfaceFlinger appears to be unresponsive, dumping anyways' in line:
|
||||
self.unresponsive_count += 1
|
||||
continue
|
||||
parts = line.split()
|
||||
# We only want numerical data, ignore textual data.
|
||||
try:
|
||||
parts = list(map(int, parts))
|
||||
except ValueError:
|
||||
continue
|
||||
found = True
|
||||
self._process_trace_parts(parts)
|
||||
if not found:
|
||||
logger.warning('Could not find expected SurfaceFlinger output.')
|
||||
|
||||
def _process_trace_line(self, line):
|
||||
parts = line.split()
|
||||
def _process_trace_parts(self, parts):
|
||||
if len(parts) == 3:
|
||||
frame = SurfaceFlingerFrame(*list(map(int, parts)))
|
||||
frame = SurfaceFlingerFrame(*parts)
|
||||
if not frame.frame_ready_time:
|
||||
return # "null" frame
|
||||
if frame.frame_ready_time <= self.last_ready_time:
|
||||
return # duplicate frame
|
||||
if (frame.frame_ready_time - frame.desired_present_time) > self.drop_threshold:
|
||||
logger.debug('Dropping bogus frame {}.'.format(line))
|
||||
logger.debug('Dropping bogus frame {}.'.format(' '.join(map(str, parts))))
|
||||
return # bogus data
|
||||
self.last_ready_time = frame.frame_ready_time
|
||||
self.frames.append(frame)
|
||||
elif len(parts) == 1:
|
||||
self.refresh_period = int(parts[0])
|
||||
self.refresh_period = parts[0]
|
||||
self.drop_threshold = self.refresh_period * 1000
|
||||
elif 'SurfaceFlinger appears to be unresponsive, dumping anyways' in line:
|
||||
self.unresponsive_count += 1
|
||||
else:
|
||||
logger.warning('Unexpected SurfaceFlinger dump output: {}'.format(line))
|
||||
msg = 'Unexpected SurfaceFlinger dump output: {}'.format(' '.join(map(str, parts)))
|
||||
logger.warning(msg)
|
||||
|
||||
|
||||
def read_gfxinfo_columns(target):
|
||||
@@ -190,12 +202,16 @@ class GfxinfoFrameCollector(FrameCollector):
|
||||
def __init__(self, target, period, package, header=None):
|
||||
super(GfxinfoFrameCollector, self).__init__(target, period)
|
||||
self.package = package
|
||||
self.header = None
|
||||
self.header = None
|
||||
self._init_header(header)
|
||||
|
||||
def collect_frames(self, wfh):
|
||||
cmd = 'dumpsys gfxinfo {} framestats'
|
||||
wfh.write(self.target.execute(cmd.format(self.package)))
|
||||
result = self.target.execute(cmd.format(self.package))
|
||||
if sys.version_info[0] == 3:
|
||||
wfh.write(result.encode('utf-8'))
|
||||
else:
|
||||
wfh.write(result)
|
||||
|
||||
def clear(self):
|
||||
pass
|
||||
@@ -261,7 +277,7 @@ def gfxinfo_get_last_dump(filepath):
|
||||
|
||||
ix = buf.find(' **\n')
|
||||
if ix >= 0:
|
||||
buf = next(fh_iter) + buf
|
||||
buf = next(fh_iter) + buf
|
||||
ix = buf.find('** Graphics')
|
||||
if ix < 0:
|
||||
msg = '"{}" appears to be corrupted'
|
||||
|
@@ -20,6 +20,7 @@ from logging import Logger
|
||||
|
||||
import serial
|
||||
|
||||
# pylint: disable=import-error,wrong-import-position,ungrouped-imports,wrong-import-order
|
||||
import pexpect
|
||||
from distutils.version import StrictVersion as V
|
||||
if V(pexpect.__version__) < V('4.0.0'):
|
||||
@@ -48,6 +49,7 @@ def pulse_dtr(conn, state=True, duration=0.1):
|
||||
conn.setDTR(not state)
|
||||
|
||||
|
||||
# pylint: disable=keyword-arg-before-vararg
|
||||
def get_connection(timeout, init_dtr=None, logcls=SerialLogger,
|
||||
logfile=None, *args, **kwargs):
|
||||
if init_dtr is not None:
|
||||
@@ -89,6 +91,7 @@ def write_characters(conn, line, delay=0.05):
|
||||
conn.sendline('')
|
||||
|
||||
|
||||
# pylint: disable=keyword-arg-before-vararg
|
||||
@contextmanager
|
||||
def open_serial_connection(timeout, get_conn=False, init_dtr=None,
|
||||
logcls=SerialLogger, *args, **kwargs):
|
||||
@@ -111,11 +114,13 @@ def open_serial_connection(timeout, get_conn=False, init_dtr=None,
|
||||
"""
|
||||
target, conn = get_connection(timeout, init_dtr=init_dtr,
|
||||
logcls=logcls, *args, **kwargs)
|
||||
|
||||
if get_conn:
|
||||
yield target, conn
|
||||
target_and_conn = (target, conn)
|
||||
else:
|
||||
yield target
|
||||
|
||||
target.close() # Closes the file descriptor used by the conn.
|
||||
del conn
|
||||
target_and_conn = target
|
||||
|
||||
try:
|
||||
yield target_and_conn
|
||||
finally:
|
||||
target.close() # Closes the file descriptor used by the conn.
|
||||
|
1068
devlib/utils/ssh.py
1068
devlib/utils/ssh.py
File diff suppressed because it is too large
Load Diff
@@ -153,11 +153,11 @@ if sys.version_info[0] == 3:
|
||||
if isinstance(value, regex_type):
|
||||
if isinstance(value.pattern, bytes):
|
||||
return value
|
||||
return re.compile(value.pattern.encode(sys.stdout.encoding),
|
||||
return re.compile(value.pattern.encode(sys.stdout.encoding or 'utf-8'),
|
||||
value.flags & ~re.UNICODE)
|
||||
else:
|
||||
if isinstance(value, str):
|
||||
value = value.encode(sys.stdout.encoding)
|
||||
value = value.encode(sys.stdout.encoding or 'utf-8')
|
||||
return re.compile(value)
|
||||
else:
|
||||
def regex(value):
|
||||
|
@@ -113,4 +113,3 @@ class UbootMenu(object):
|
||||
except TIMEOUT:
|
||||
pass
|
||||
self.conn.buffer = ''
|
||||
|
||||
|
@@ -237,5 +237,3 @@ class UefiMenu(object):
|
||||
self.options = {}
|
||||
self.prompt = None
|
||||
self.empty_buffer()
|
||||
|
||||
|
||||
|
@@ -15,16 +15,34 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
|
||||
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision', 'dev'])
|
||||
|
||||
version = VersionTuple(1, 3, 2, '')
|
||||
|
||||
|
||||
def get_devlib_version():
|
||||
version_string = '{}.{}.{}'.format(
|
||||
version.major, version.minor, version.revision)
|
||||
if version.dev:
|
||||
version_string += '.{}'.format(version.dev)
|
||||
return version_string
|
||||
|
||||
|
||||
def get_commit():
|
||||
p = Popen(['git', 'rev-parse', 'HEAD'], cwd=os.path.dirname(__file__),
|
||||
stdout=PIPE, stderr=PIPE)
|
||||
try:
|
||||
p = Popen(['git', 'rev-parse', 'HEAD'], cwd=os.path.dirname(__file__),
|
||||
stdout=PIPE, stderr=PIPE)
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
std, _ = p.communicate()
|
||||
p.wait()
|
||||
if p.returncode:
|
||||
return None
|
||||
if sys.version_info[0] == 3:
|
||||
return std[:8].decode(sys.stdout.encoding, 'replace')
|
||||
if sys.version_info[0] == 3 and isinstance(std, bytes):
|
||||
return std[:8].decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
else:
|
||||
return std[:8]
|
||||
|
153
doc/collectors.rst
Normal file
153
doc/collectors.rst
Normal file
@@ -0,0 +1,153 @@
|
||||
.. _collector:
|
||||
|
||||
Collectors
|
||||
==========
|
||||
|
||||
The ``Collector`` API provide a consistent way of collecting arbitrary data from
|
||||
a target. Data is collected via an instance of a class derived from
|
||||
:class:`CollectorBase`.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
The following example shows how to use a collector to read the logcat output
|
||||
from an Android target.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# import and instantiate the Target and the collector
|
||||
# (note: this assumes exactly one android target connected
|
||||
# to the host machine).
|
||||
In [1]: from devlib import AndroidTarget, LogcatCollector
|
||||
|
||||
In [2]: t = AndroidTarget()
|
||||
|
||||
# Set up the collector on the Target.
|
||||
|
||||
In [3]: collector = LogcatCollector(t)
|
||||
|
||||
# Configure the output file path for the collector to use.
|
||||
In [4]: collector.set_output('adb_log.txt')
|
||||
|
||||
# Reset the Collector to preform any required configuration or preparation.
|
||||
In [5]: collector.reset()
|
||||
|
||||
# Start Collecting
|
||||
In [6]: collector.start()
|
||||
|
||||
# Wait for some output to be generated
|
||||
In [7]: sleep(10)
|
||||
|
||||
# Stop Collecting
|
||||
In [8]: collector.stop()
|
||||
|
||||
# Retrieved the collected data
|
||||
In [9]: output = collector.get_data()
|
||||
|
||||
# Display the returned ``CollectorOutput`` Object.
|
||||
In [10]: output
|
||||
Out[10]: [<adb_log.txt (file)>]
|
||||
|
||||
In [11] log_file = output[0]
|
||||
|
||||
# Get the path kind of the the returned CollectorOutputEntry.
|
||||
In [12]: log_file.path_kind
|
||||
Out[12]: 'file'
|
||||
|
||||
# Get the path of the returned CollectorOutputEntry.
|
||||
In [13]: log_file.path
|
||||
Out[13]: 'adb_log.txt'
|
||||
|
||||
# Find the full path to the log file.
|
||||
In [14]: os.path.join(os.getcwd(), logfile)
|
||||
Out[14]: '/tmp/adb_log.txt'
|
||||
|
||||
|
||||
API
|
||||
---
|
||||
.. collector:
|
||||
|
||||
.. module:: devlib.collector
|
||||
|
||||
|
||||
CollectorBase
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
.. class:: CollectorBase(target, \*\*kwargs)
|
||||
|
||||
A ``CollectorBase`` is the the base class and API that should be
|
||||
implemented to allowing collecting various data from a traget e.g. traces,
|
||||
logs etc.
|
||||
|
||||
.. method:: Collector.setup(\*args, \*\*kwargs)
|
||||
|
||||
This will set up the collector on the target. Parameters this method takes
|
||||
are particular to subclasses (see documentation for specific collectors
|
||||
below). What actions are performed by this method are also
|
||||
collector-specific. Usually these will be things like installing
|
||||
executables, starting services, deploying assets, etc. Typically, this method
|
||||
needs to be invoked at most once per reboot of the target (unless
|
||||
``teardown()`` has been called), but see documentation for the collector
|
||||
you're interested in.
|
||||
|
||||
.. method:: CollectorBase.reset()
|
||||
|
||||
This can be used to configure a collector for collection. This must be invoked
|
||||
before ``start()`` is called to begin collection.
|
||||
|
||||
.. method:: CollectorBase.start()
|
||||
|
||||
Starts collecting from the target.
|
||||
|
||||
.. method:: CollectorBase.stop()
|
||||
|
||||
Stops collecting from target. Must be called after
|
||||
:func:`start()`.
|
||||
|
||||
|
||||
.. method:: CollectorBase.set_output(output_path)
|
||||
|
||||
Configure the output path for the particular collector. This will be either
|
||||
a directory or file path which will be used when storing the data. Please see
|
||||
the individual Collector documentation for more information.
|
||||
|
||||
|
||||
.. method:: CollectorBase.get_data()
|
||||
|
||||
The collected data will be return via the previously specified output_path.
|
||||
This method will return a ``CollectorOutput`` object which is a subclassed
|
||||
list object containing individual ``CollectorOutputEntry`` objects with details
|
||||
about the individual output entry.
|
||||
|
||||
|
||||
CollectorOutputEntry
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This object is designed to allow for the output of a collector to be processed
|
||||
generically. The object will behave as a regular string containing the path to
|
||||
underlying output path and can be used directly in ``os.path`` operations.
|
||||
|
||||
.. attribute:: CollectorOutputEntry.path
|
||||
|
||||
The file path for the corresponding output item.
|
||||
|
||||
.. attribute:: CollectorOutputEntry.path_kind
|
||||
|
||||
The type of output the is specified in the ``path`` attribute. Current valid
|
||||
kinds are: ``file`` and ``directory``.
|
||||
|
||||
.. method:: CollectorOutputEntry.__init__(path, path_kind)
|
||||
|
||||
Initialises a ``CollectorOutputEntry`` object with the desired file path and
|
||||
kind of file path specified.
|
||||
|
||||
|
||||
.. collectors:
|
||||
|
||||
Available Collectors
|
||||
---------------------
|
||||
|
||||
This section lists collectors that are currently part of devlib.
|
||||
|
||||
.. todo:: Add collectors
|
@@ -31,6 +31,9 @@ import shlex
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.graphviz',
|
||||
'sphinx.ext.mathjax',
|
||||
'sphinx.ext.todo',
|
||||
'sphinx.ext.viewcode',
|
||||
]
|
||||
|
||||
@@ -58,9 +61,9 @@ author = u'ARM Limited'
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '0.1'
|
||||
version = '1.0.0'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '0.1'
|
||||
release = '1.0.0'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@@ -104,7 +107,7 @@ pygments_style = 'sphinx'
|
||||
#keep_warnings = False
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = False
|
||||
todo_include_todos = True
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
@@ -3,16 +3,17 @@ Connection
|
||||
|
||||
A :class:`Connection` abstracts an actual physical connection to a device. The
|
||||
first connection is created when :func:`Target.connect` method is called. If a
|
||||
:class:`Target` is used in a multi-threaded environment, it will maintain a
|
||||
connection for each thread in which it is invoked. This allows the same target
|
||||
object to be used in parallel in multiple threads.
|
||||
:class:`~devlib.target.Target` is used in a multi-threaded environment, it will
|
||||
maintain a connection for each thread in which it is invoked. This allows
|
||||
the same target object to be used in parallel in multiple threads.
|
||||
|
||||
:class:`Connection`\ s will be automatically created and managed by
|
||||
:class:`Target`\ s, so there is usually no reason to create one manually.
|
||||
Instead, configuration for a :class:`Connection` is passed as
|
||||
`connection_settings` parameter when creating a :class:`Target`. The connection
|
||||
to be used target is also specified on instantiation by `conn_cls` parameter,
|
||||
though all concrete :class:`Target` implementations will set an appropriate
|
||||
:class:`~devlib.target.Target`\ s, so there is usually no reason to create one
|
||||
manually. Instead, configuration for a :class:`Connection` is passed as
|
||||
`connection_settings` parameter when creating a
|
||||
:class:`~devlib.target.Target`. The connection to be used target is also
|
||||
specified on instantiation by `conn_cls` parameter, though all concrete
|
||||
:class:`~devlib.target.Target` implementations will set an appropriate
|
||||
default, so there is typically no need to specify this explicitly.
|
||||
|
||||
:class:`Connection` classes are not a part of an inheritance hierarchy, i.e.
|
||||
@@ -20,27 +21,27 @@ they do not derive from a common base. Instead, a :class:`Connection` is any
|
||||
class that implements the following methods.
|
||||
|
||||
|
||||
.. method:: push(self, source, dest, timeout=None)
|
||||
.. method:: push(self, sources, dest, timeout=None)
|
||||
|
||||
Transfer a file from the host machine to the connected device.
|
||||
Transfer a list of files from the host machine to the connected device.
|
||||
|
||||
:param source: path of to the file on the host
|
||||
:param dest: path of to the file on the connected device.
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
:param sources: list of paths on the host
|
||||
:param dest: path to the file or folder on the connected device.
|
||||
:param timeout: timeout (in seconds) for the transfer of each file; if the
|
||||
transfer does not complete within this period, an exception will be
|
||||
raised.
|
||||
|
||||
.. method:: pull(self, source, dest, timeout=None)
|
||||
.. method:: pull(self, sources, dest, timeout=None)
|
||||
|
||||
Transfer a file, or files matching a glob pattern, from the connected device
|
||||
to the host machine.
|
||||
Transfer a list of files from the connected device to the host machine.
|
||||
|
||||
:param source: path of to the file on the connected device. If ``dest`` is a
|
||||
directory, may be a glob pattern.
|
||||
:param dest: path of to the file on the host
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
:param sources: list of paths on the connected device.
|
||||
:param dest: path to the file or folder on the host
|
||||
:param timeout: timeout (in seconds) for the transfer for each file; if the
|
||||
transfer does not complete within this period, an exception will be
|
||||
raised.
|
||||
|
||||
.. method:: execute(self, command, timeout=None, check_exit_code=False, as_root=False)
|
||||
.. method:: execute(self, command, timeout=None, check_exit_code=False, as_root=False, strip_colors=True, will_succeed=False)
|
||||
|
||||
Execute the specified command on the connected device and return its output.
|
||||
|
||||
@@ -53,6 +54,13 @@ class that implements the following methods.
|
||||
raised if it is not ``0``.
|
||||
:param as_root: The command will be executed as root. This will fail on
|
||||
unrooted connected devices.
|
||||
:param strip_colours: The command output will have colour encodings and
|
||||
most ANSI escape sequences striped out before returning.
|
||||
:param will_succeed: The command is assumed to always succeed, unless there is
|
||||
an issue in the environment like the loss of network connectivity. That
|
||||
will make the method always raise an instance of a subclass of
|
||||
:class:`DevlibTransientError` when the command fails, instead of a
|
||||
:class:`DevlibStableError`.
|
||||
|
||||
.. method:: background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False)
|
||||
|
||||
@@ -69,7 +77,7 @@ class that implements the following methods.
|
||||
|
||||
.. note:: This **will block the connection** until the command completes.
|
||||
|
||||
.. note:: The above methods are directly wrapped by :class:`Target` methods,
|
||||
.. note:: The above methods are directly wrapped by :class:`~devlib.target.Target` methods,
|
||||
however note that some of the defaults are different.
|
||||
|
||||
.. method:: cancel_running_command(self)
|
||||
@@ -93,7 +101,12 @@ class that implements the following methods.
|
||||
Connection Types
|
||||
----------------
|
||||
|
||||
.. class:: AdbConnection(device=None, timeout=None)
|
||||
|
||||
.. module:: devlib.utils.android
|
||||
|
||||
.. class:: AdbConnection(device=None, timeout=None, adb_server=None, adb_as_root=False, connection_attempts=MAX_ATTEMPTS,\
|
||||
poll_transfers=False, start_transfer_poll_delay=30, total_transfer_timeout=3600,\
|
||||
transfer_poll_period=30)
|
||||
|
||||
A connection to an android device via ``adb`` (Android Debug Bridge).
|
||||
``adb`` is part of the Android SDK (though stand-alone versions are also
|
||||
@@ -106,10 +119,37 @@ Connection Types
|
||||
:param timeout: Connection timeout in seconds. If a connection to the device
|
||||
is not established within this period, :class:`HostError`
|
||||
is raised.
|
||||
:param adb_server: Allows specifying the address of the adb server to use.
|
||||
:param adb_as_root: Specify whether the adb server should be restarted in root mode.
|
||||
:param connection_attempts: Specify how many connection attempts, 10 seconds
|
||||
apart, should be attempted to connect to the device.
|
||||
Defaults to 5.
|
||||
:param poll_transfers: Specify whether file transfers should be polled. Polling
|
||||
monitors the progress of file transfers and periodically
|
||||
checks whether they have stalled, attempting to cancel
|
||||
the transfers prematurely if so.
|
||||
:param start_transfer_poll_delay: If transfers are polled, specify the length of
|
||||
time after a transfer has started before polling
|
||||
should start.
|
||||
:param total_transfer_timeout: If transfers are polled, specify the total amount of time
|
||||
to elapse before the transfer is cancelled, regardless
|
||||
of its activity.
|
||||
:param transfer_poll_period: If transfers are polled, specify the period at which
|
||||
the transfers are sampled for activity. Too small values
|
||||
may cause the destination size to appear the same over
|
||||
one or more sample periods, causing improper transfer
|
||||
cancellation.
|
||||
|
||||
|
||||
.. class:: SshConnection(host, username, password=None, keyfile=None, port=None,\
|
||||
timeout=None, password_prompt=None)
|
||||
|
||||
.. module:: devlib.utils.ssh
|
||||
|
||||
.. class:: SshConnection(host, username, password=None, keyfile=None, port=22,\
|
||||
timeout=None, platform=None, \
|
||||
sudo_cmd="sudo -- sh -c {}", strict_host_check=True, \
|
||||
use_scp=False, poll_transfers=False, \
|
||||
start_transfer_poll_delay=30, total_transfer_timeout=3600,\
|
||||
transfer_poll_period=30)
|
||||
|
||||
A connection to a device on the network over SSH.
|
||||
|
||||
@@ -117,6 +157,9 @@ Connection Types
|
||||
:param username: username for SSH login
|
||||
:param password: password for the SSH connection
|
||||
|
||||
.. note:: To connect to a system without a password this
|
||||
parameter should be set to an empty string otherwise
|
||||
ssh key authentication will be attempted.
|
||||
.. note:: In order to user password-based authentication,
|
||||
``sshpass`` utility must be installed on the
|
||||
system.
|
||||
@@ -131,10 +174,26 @@ Connection Types
|
||||
:param timeout: Timeout for the connection in seconds. If a connection
|
||||
cannot be established within this time, an error will be
|
||||
raised.
|
||||
:param password_prompt: A string with the password prompt used by
|
||||
``sshpass``. Set this if your version of ``sshpass``
|
||||
uses something other than ``"[sudo] password"``.
|
||||
|
||||
:param platform: Specify the platform to be used. The generic :class:`~devlib.platform.Platform`
|
||||
class is used by default.
|
||||
:param sudo_cmd: Specify the format of the command used to grant sudo access.
|
||||
:param strict_host_check: Specify the ssh connection parameter ``StrictHostKeyChecking``,
|
||||
:param use_scp: Use SCP for file transfers, defaults to SFTP.
|
||||
:param poll_transfers: Specify whether file transfers should be polled. Polling
|
||||
monitors the progress of file transfers and periodically
|
||||
checks whether they have stalled, attempting to cancel
|
||||
the transfers prematurely if so.
|
||||
:param start_transfer_poll_delay: If transfers are polled, specify the length of
|
||||
time after a transfer has started before polling
|
||||
should start.
|
||||
:param total_transfer_timeout: If transfers are polled, specify the total amount of time
|
||||
to elapse before the transfer is cancelled, regardless
|
||||
of its activity.
|
||||
:param transfer_poll_period: If transfers are polled, specify the period at which
|
||||
the transfers are sampled for activity. Too small values
|
||||
may cause the destination size to appear the same over
|
||||
one or more sample periods, causing improper transfer
|
||||
cancellation.
|
||||
|
||||
.. class:: TelnetConnection(host, username, password=None, port=None,\
|
||||
timeout=None, password_prompt=None,\
|
||||
@@ -167,6 +226,7 @@ Connection Types
|
||||
connection to reduce the possibility of clashes).
|
||||
This parameter is ignored for SSH connections.
|
||||
|
||||
.. module:: devlib.host
|
||||
|
||||
.. class:: LocalConnection(keep_password=True, unrooted=False, password=None)
|
||||
|
||||
@@ -182,6 +242,9 @@ Connection Types
|
||||
prompting for it.
|
||||
|
||||
|
||||
.. module:: devlib.utils.ssh
|
||||
:noindex:
|
||||
|
||||
.. class:: Gem5Connection(platform, host=None, username=None, password=None,\
|
||||
timeout=None, password_prompt=None,\
|
||||
original_prompt=None)
|
||||
@@ -190,19 +253,18 @@ Connection Types
|
||||
|
||||
.. note:: Some of the following input parameters are optional and will be ignored during
|
||||
initialisation. They were kept to keep the analogy with a :class:`TelnetConnection`
|
||||
(i.e. ``host``, `username``, ``password``, ``port``,
|
||||
(i.e. ``host``, ``username``, ``password``, ``port``,
|
||||
``password_prompt`` and ``original_promp``)
|
||||
|
||||
|
||||
:param host: Host on which the gem5 simulation is running
|
||||
|
||||
.. note:: Even thought the input parameter for the ``host``
|
||||
will be ignored, the gem5 simulation needs to on
|
||||
the same host as the user as the user is
|
||||
currently on, so if the host given as input
|
||||
parameter is not the same as the actual host, a
|
||||
``TargetError`` will be raised to prevent
|
||||
confusion.
|
||||
.. note:: Even though the input parameter for the ``host``
|
||||
will be ignored, the gem5 simulation needs to be
|
||||
on the same host the user is currently on, so if
|
||||
the host given as input parameter is not the
|
||||
same as the actual host, a :class:`TargetStableError`
|
||||
will be raised to prevent confusion.
|
||||
|
||||
:param username: Username in the simulated system
|
||||
:param password: No password required in gem5 so does not need to be set
|
||||
@@ -227,14 +289,14 @@ The only methods discussed below are those that will be overwritten by the
|
||||
|
||||
A connection to a gem5 simulation that emulates a Linux system.
|
||||
|
||||
.. method:: _login_to_device(self)
|
||||
.. method:: _login_to_device(self)
|
||||
|
||||
Login to the gem5 simulated system.
|
||||
Login to the gem5 simulated system.
|
||||
|
||||
.. class:: AndroidGem5Connection
|
||||
|
||||
A connection to a gem5 simulation that emulates an Android system.
|
||||
|
||||
.. method:: _wait_for_boot(self)
|
||||
.. method:: _wait_for_boot(self)
|
||||
|
||||
Wait for the gem5 simulated system to have booted and finished the booting animation.
|
||||
Wait for the gem5 simulated system to have booted and finished the booting animation.
|
||||
|
@@ -1,7 +1,6 @@
|
||||
Derived Measurements
|
||||
=====================
|
||||
|
||||
|
||||
The ``DerivedMeasurements`` API provides a consistent way of performing post
|
||||
processing on a provided :class:`MeasurementCsv` file.
|
||||
|
||||
@@ -35,6 +34,8 @@ API
|
||||
Derived Measurements
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. module:: devlib.derived
|
||||
|
||||
.. class:: DerivedMeasurements
|
||||
|
||||
The ``DerivedMeasurements`` class provides an API for post-processing
|
||||
@@ -102,17 +103,20 @@ Available Derived Measurements
|
||||
Energy
|
||||
~~~~~~
|
||||
|
||||
.. module:: devlib.derived.energy
|
||||
|
||||
.. class:: DerivedEnergyMeasurements
|
||||
|
||||
The ``DerivedEnergyMeasurements`` class is used to calculate average power and
|
||||
cumulative energy for each site if the required data is present.
|
||||
The ``DerivedEnergyMeasurements`` class is used to calculate average power
|
||||
and cumulative energy for each site if the required data is present.
|
||||
|
||||
The calculation of cumulative energy can occur in 3 ways. If a
|
||||
``site`` contains ``energy`` results, the first and last measurements are extracted
|
||||
and the delta calculated. If not, a ``timestamp`` channel will be used to calculate
|
||||
the energy from the power channel, failing back to using the sample rate attribute
|
||||
of the :class:`MeasurementCsv` file if timestamps are not available. If neither
|
||||
timestamps or a sample rate are available then an error will be raised.
|
||||
The calculation of cumulative energy can occur in 3 ways. If a ``site``
|
||||
contains ``energy`` results, the first and last measurements are extracted
|
||||
and the delta calculated. If not, a ``timestamp`` channel will be used to
|
||||
calculate the energy from the power channel, failing back to using the sample
|
||||
rate attribute of the :class:`MeasurementCsv` file if timestamps are not
|
||||
available. If neither timestamps or a sample rate are available then an error
|
||||
will be raised.
|
||||
|
||||
|
||||
.. method:: DerivedEnergyMeasurements.process(measurement_csv)
|
||||
@@ -128,6 +132,8 @@ Energy
|
||||
FPS / Rendering
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. module:: devlib.derived.fps
|
||||
|
||||
.. class:: DerivedGfxInfoStats(drop_threshold=5, suffix='-fps', filename=None, outdir=None)
|
||||
|
||||
Produces FPS (frames-per-second) and other derived statistics from
|
||||
|
BIN
doc/images/instrumentation/baylibre_acme/bottleneck.png
Normal file
BIN
doc/images/instrumentation/baylibre_acme/bottleneck.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 46 KiB |
BIN
doc/images/instrumentation/baylibre_acme/buffer.png
Normal file
BIN
doc/images/instrumentation/baylibre_acme/buffer.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 140 KiB |
BIN
doc/images/instrumentation/baylibre_acme/cape.png
Normal file
BIN
doc/images/instrumentation/baylibre_acme/cape.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.1 MiB |
BIN
doc/images/instrumentation/baylibre_acme/ina226_circuit.png
Normal file
BIN
doc/images/instrumentation/baylibre_acme/ina226_circuit.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 70 KiB |
BIN
doc/images/instrumentation/baylibre_acme/ina226_functional.png
Normal file
BIN
doc/images/instrumentation/baylibre_acme/ina226_functional.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 42 KiB |
BIN
doc/images/instrumentation/baylibre_acme/int_time.png
Normal file
BIN
doc/images/instrumentation/baylibre_acme/int_time.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 132 KiB |
@@ -3,6 +3,8 @@
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
.. module:: devlib
|
||||
|
||||
Welcome to devlib documentation
|
||||
===============================
|
||||
|
||||
@@ -19,6 +21,7 @@ Contents:
|
||||
target
|
||||
modules
|
||||
instrumentation
|
||||
collectors
|
||||
derived_measurements
|
||||
platform
|
||||
connection
|
||||
|
@@ -1,11 +1,13 @@
|
||||
.. _instrumentation:
|
||||
|
||||
Instrumentation
|
||||
===============
|
||||
|
||||
The ``Instrument`` API provide a consistent way of collecting measurements from
|
||||
a target. Measurements are collected via an instance of a class derived from
|
||||
:class:`Instrument`. An ``Instrument`` allows collection of measurement from one
|
||||
or more channels. An ``Instrument`` may support ``INSTANTANEOUS`` or
|
||||
``CONTINUOUS`` collection, or both.
|
||||
:class:`~devlib.instrument.Instrument`. An ``Instrument`` allows collection of
|
||||
measurement from one or more channels. An ``Instrument`` may support
|
||||
``INSTANTANEOUS`` or ``CONTINUOUS`` collection, or both.
|
||||
|
||||
Example
|
||||
-------
|
||||
@@ -13,7 +15,7 @@ Example
|
||||
The following example shows how to use an instrument to read temperature from an
|
||||
Android target.
|
||||
|
||||
.. code-block:: ipython
|
||||
.. code-block:: python
|
||||
|
||||
# import and instantiate the Target and the instrument
|
||||
# (note: this assumes exactly one android target connected
|
||||
@@ -48,10 +50,12 @@ Android target.
|
||||
API
|
||||
---
|
||||
|
||||
.. module:: devlib.instrument
|
||||
|
||||
Instrument
|
||||
~~~~~~~~~~
|
||||
|
||||
.. class:: Instrument(target, **kwargs)
|
||||
.. class:: Instrument(target, \*\*kwargs)
|
||||
|
||||
An ``Instrument`` allows collection of measurement from one or more
|
||||
channels. An ``Instrument`` may support ``INSTANTANEOUS`` or ``CONTINUOUS``
|
||||
@@ -88,7 +92,7 @@ Instrument
|
||||
Returns channels for a particular ``measure`` type. A ``measure`` can be
|
||||
either a string (e.g. ``"power"``) or a :class:`MeasurmentType` instance.
|
||||
|
||||
.. method:: Instrument.setup(*args, **kwargs)
|
||||
.. method:: Instrument.setup(\*args, \*\*kwargs)
|
||||
|
||||
This will set up the instrument on the target. Parameters this method takes
|
||||
are particular to subclasses (see documentation for specific instruments
|
||||
@@ -115,19 +119,21 @@ Instrument
|
||||
If none of ``sites``, ``kinds`` or ``channels`` are provided then all
|
||||
available channels are enabled.
|
||||
|
||||
.. method:: Instrument.take_measurment()
|
||||
.. method:: Instrument.take_measurement()
|
||||
|
||||
Take a single measurement from ``active_channels``. Returns a list of
|
||||
:class:`Measurement` objects (one for each active channel).
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
.. note:: This method is only implemented by
|
||||
:class:`~devlib.instrument.Instrument`\ s that
|
||||
support ``INSTANTANEOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.start()
|
||||
|
||||
Starts collecting measurements from ``active_channels``.
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
.. note:: This method is only implemented by
|
||||
:class:`~devlib.instrument.Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.stop()
|
||||
@@ -135,7 +141,8 @@ Instrument
|
||||
Stops collecting measurements from ``active_channels``. Must be called after
|
||||
:func:`start()`.
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
.. note:: This method is only implemented by
|
||||
:class:`~devlib.instrument.Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.get_data(outfile)
|
||||
@@ -146,9 +153,9 @@ Instrument
|
||||
``<site>_<kind>`` (see :class:`InstrumentChannel`). The order of the columns
|
||||
will be the same as the order of channels in ``Instrument.active_channels``.
|
||||
|
||||
If reporting timestamps, one channel must have a ``site`` named ``"timestamp"``
|
||||
and a ``kind`` of a :class:`MeasurmentType` of an appropriate time unit which will
|
||||
be used, if appropriate, during any post processing.
|
||||
If reporting timestamps, one channel must have a ``site`` named
|
||||
``"timestamp"`` and a ``kind`` of a :class:`MeasurmentType` of an appropriate
|
||||
time unit which will be used, if appropriate, during any post processing.
|
||||
|
||||
.. note:: Currently supported time units are seconds, milliseconds and
|
||||
microseconds, other units can also be used if an appropriate
|
||||
@@ -158,31 +165,44 @@ Instrument
|
||||
that can be used to stream :class:`Measurement`\ s lists (similar to what is
|
||||
returned by ``take_measurement()``.
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
.. note:: This method is only implemented by
|
||||
:class:`~devlib.instrument.Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.get_raw()
|
||||
|
||||
Returns a list of paths to files containing raw output from the underlying
|
||||
source(s) that is used to produce the data CSV. If now raw output is
|
||||
source(s) that is used to produce the data CSV. If no raw output is
|
||||
generated or saved, an empty list will be returned. The format of the
|
||||
contents of the raw files is entirely source-dependent.
|
||||
|
||||
.. note:: This method is not guaranteed to return valid filepaths after the
|
||||
:meth:`teardown` method has been invoked as the raw files may have
|
||||
been deleted. Please ensure that copies are created manually
|
||||
prior to calling :meth:`teardown` if the files are to be retained.
|
||||
|
||||
.. method:: Instrument.teardown()
|
||||
|
||||
Performs any required clean up of the instrument. This usually includes
|
||||
removing temporary and raw files (if ``keep_raw`` is set to ``False`` on relevant
|
||||
instruments), stopping services etc.
|
||||
|
||||
.. attribute:: Instrument.sample_rate_hz
|
||||
|
||||
Sample rate of the instrument in Hz. Assumed to be the same for all channels.
|
||||
|
||||
.. note:: This attribute is only provided by :class:`Instrument`\ s that
|
||||
.. note:: This attribute is only provided by
|
||||
:class:`~devlib.instrument.Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
Instrument Channel
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. class:: InstrumentChannel(name, site, measurement_type, **attrs)
|
||||
.. class:: InstrumentChannel(name, site, measurement_type, \*\*attrs)
|
||||
|
||||
An :class:`InstrumentChannel` describes a single type of measurement that may
|
||||
be collected by an :class:`Instrument`. A channel is primarily defined by a
|
||||
``site`` and a ``measurement_type``.
|
||||
be collected by an :class:`~devlib.instrument.Instrument`. A channel is
|
||||
primarily defined by a ``site`` and a ``measurement_type``.
|
||||
|
||||
A ``site`` indicates where on the target a measurement is collected from
|
||||
(e.g. a voltage rail or location of a sensor).
|
||||
@@ -228,9 +248,9 @@ Measurement Types
|
||||
|
||||
In order to make instruments easer to use, and to make it easier to swap them
|
||||
out when necessary (e.g. change method of collecting power), a number of
|
||||
standard measurement types are defined. This way, for example, power will always
|
||||
be reported as "power" in Watts, and never as "pwr" in milliWatts. Currently
|
||||
defined measurement types are
|
||||
standard measurement types are defined. This way, for example, power will
|
||||
always be reported as "power" in Watts, and never as "pwr" in milliWatts.
|
||||
Currently defined measurement types are
|
||||
|
||||
|
||||
+-------------+-------------+---------------+
|
||||
@@ -269,4 +289,648 @@ Available Instruments
|
||||
|
||||
This section lists instruments that are currently part of devlib.
|
||||
|
||||
TODO
|
||||
.. todo:: Add other instruments
|
||||
|
||||
|
||||
Baylibre ACME BeagleBone Black Cape
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. _official project page: http://baylibre.com/acme/
|
||||
.. _image built for using the ACME: https://gitlab.com/baylibre-acme/ACME-Software-Release/blob/master/README.md
|
||||
.. _libiio (the Linux IIO interface): https://github.com/analogdevicesinc/libiio
|
||||
.. _Linux Industrial I/O Subsystem: https://wiki.analog.com/software/linux/docs/iio/iio
|
||||
.. _Texas Instruments INA226: http://www.ti.com/lit/ds/symlink/ina226.pdf
|
||||
|
||||
From the `official project page`_:
|
||||
|
||||
[The Baylibre Another Cute Measurement Equipment (ACME)] is an extension for
|
||||
the BeagleBone Black (the ACME Cape), designed to provide multi-channel power
|
||||
and temperature measurements capabilities to the BeagleBone Black (BBB). It
|
||||
comes with power and temperature probes integrating a power switch (the ACME
|
||||
Probes), turning it into an advanced all-in-one power/temperature measurement
|
||||
solution.
|
||||
|
||||
The ACME initiative is completely open source, from HW to SW drivers and
|
||||
applications.
|
||||
|
||||
|
||||
The Infrastructure
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Retrieving measurement from the ACME through devlib requires:
|
||||
|
||||
- a BBB running the `image built for using the ACME`_ (micro SD card required);
|
||||
|
||||
- an ACME cape on top of the BBB;
|
||||
|
||||
- at least one ACME probe [#acme_probe_variants]_ connected to the ACME cape;
|
||||
|
||||
- a BBB-host interface (typically USB or Ethernet) [#acme_name_conflicts]_;
|
||||
|
||||
- a host (the one running devlib) with `libiio (the Linux IIO interface)`_
|
||||
installed, and a Python environment able to find the libiio Python wrapper
|
||||
*i.e.* able to ``import iio`` as communications between the BBB and the
|
||||
host rely on the `Linux Industrial I/O Subsystem`_ (IIO).
|
||||
|
||||
The ACME probes are built on top of the `Texas Instruments INA226`_ and the
|
||||
data acquisition chain is as follows:
|
||||
|
||||
.. graphviz::
|
||||
|
||||
digraph target {
|
||||
rankdir = LR
|
||||
bgcolor = transparent
|
||||
|
||||
subgraph cluster_target {
|
||||
|
||||
subgraph cluster_BBB {
|
||||
node [style = filled, color = white];
|
||||
style = filled;
|
||||
color = lightgrey;
|
||||
label = "BeagleBone Black";
|
||||
|
||||
drivers -> "IIO Daemon" [dir = both]
|
||||
}
|
||||
|
||||
subgraph cluster_INA226 {
|
||||
node [style = filled, color = white];
|
||||
style = filled;
|
||||
color = lightgrey;
|
||||
label = INA226;
|
||||
|
||||
ADC -> Processing
|
||||
Processing -> Registers
|
||||
}
|
||||
|
||||
subgraph cluster_inputs {
|
||||
node [style = filled, color = white];
|
||||
style = filled;
|
||||
color = lightgrey;
|
||||
label = Inputs;
|
||||
|
||||
"Bus Voltage" -> ADC;
|
||||
"Shunt Voltage" -> ADC;
|
||||
}
|
||||
|
||||
Registers -> drivers [dir = both, label = I2C];
|
||||
}
|
||||
|
||||
subgraph cluster_IIO {
|
||||
style = none
|
||||
"IIO Daemon" -> "IIO Interface" [dir = both, label = "Eth./USB"]
|
||||
}
|
||||
}
|
||||
|
||||
For reference, the software stack on the host is roughly given by:
|
||||
|
||||
.. graphviz::
|
||||
|
||||
digraph host {
|
||||
rankdir = LR
|
||||
bgcolor = transparent
|
||||
|
||||
subgraph cluster_host {
|
||||
|
||||
subgraph cluster_backend {
|
||||
node [style = filled, color = white];
|
||||
style = filled;
|
||||
color = lightgrey;
|
||||
label = Backend;
|
||||
|
||||
"IIO Daemon" -> "C API" [dir = both]
|
||||
}
|
||||
|
||||
subgraph cluster_Python {
|
||||
node [style = filled, color = white];
|
||||
style = filled;
|
||||
color = lightgrey;
|
||||
label = Python;
|
||||
|
||||
"C API" -> "iio Wrapper" [dir = both]
|
||||
"iio Wrapper" -> devlib [dir = both]
|
||||
devlib -> "User" [dir = both]
|
||||
}
|
||||
}
|
||||
|
||||
subgraph cluster_IIO {
|
||||
style = none
|
||||
"IIO Interface" -> "IIO Daemon" [dir = both, label = "Eth./USB"]
|
||||
}
|
||||
}
|
||||
|
||||
Ethernet was the only IIO Interface used and tested during the development of
|
||||
this instrument. However,
|
||||
`USB seems to be supported <https://gitlab.com/baylibre-acme/ACME/issues/2>`_.
|
||||
The IIO library also provides "Local" and "XML" connections but these are to be
|
||||
used when the IIO devices are directly connected to the host *i.e.* in our
|
||||
case, if we were to run Python and devlib on the BBB. These are also untested.
|
||||
|
||||
Measuring Power
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
In IIO terminology, the ACME cape is an *IIO context* and ACME probes are *IIO
|
||||
devices* with *IIO channels*. An input *IIO channel* (the ACME has no *output
|
||||
IIO channel*) is a stream of samples and an ACME cape can be connected to up to
|
||||
8 probes *i.e.* have 8 *IIO devices*. The probes are discovered at startup by
|
||||
the IIO drivers on the BBB and are indexed according to the order in which they
|
||||
are connected to the ACME cape (with respect to the "Probe *X*" connectors on
|
||||
the cape).
|
||||
|
||||
|
||||
.. figure:: images/instrumentation/baylibre_acme/cape.png
|
||||
:width: 50%
|
||||
:alt: ACME Cape
|
||||
:align: center
|
||||
|
||||
ACME Cape on top of a BBB: Notice the numbered probe connectors (
|
||||
`source <https://baylibre.com/wp-content/uploads/2015/11/20150916_BayLibre_ACME_RevB-010-1030x599.png>`_)
|
||||
|
||||
|
||||
Please note that the numbers on the PCB do not represent the index of a probe
|
||||
in IIO; on top of being 1-based (as opposed to IIO device indexing being
|
||||
0-based), skipped connectors do not result in skipped indices *e.g.* if three
|
||||
probes are connected to the cape at ``Probe 1``, ``Probe 3`` and ``Probe 7``,
|
||||
IIO (and therefore the entire software stack, including devlib) will still
|
||||
refer to them as devices ``0``, ``1`` and ``2``, respectively. Furthermore,
|
||||
probe "hot swapping" does not seem to be supported.
|
||||
|
||||
INA226: The probing spearhead
|
||||
"""""""""""""""""""""""""""""
|
||||
|
||||
An ACME probe has 5 *IIO channels*, 4 of which being "IIO wrappers" around what
|
||||
the INA226 outputs (through its I2C registers): the bus voltage, the shunt
|
||||
voltage, the shunt current and the load power. The last channel gives the
|
||||
timestamps and is probably added further down the pipeline. A typical circuit
|
||||
configuration for the INA226 (useful when shunt-based ACME probes are used as
|
||||
their PCB does not contain the full circuit unlike the USB and jack variants)
|
||||
is given by its datasheet:
|
||||
|
||||
.. figure:: images/instrumentation/baylibre_acme/ina226_circuit.png
|
||||
:width: 90%
|
||||
:alt: Typical circuit configuration, INA226
|
||||
:align: center
|
||||
|
||||
Typical Circuit Configuration (source: `Texas Instruments INA226`_)
|
||||
|
||||
|
||||
The analog-to-digital converter (ADC)
|
||||
'''''''''''''''''''''''''''''''''''''
|
||||
|
||||
The digital time-discrete sampled signal of the analog time-continuous input
|
||||
voltage signal is obtained through an analog-to-digital converter (ADC). To
|
||||
measure the "instantaneous input voltage", the ADC "charges up or down" a
|
||||
capacitor before measuring its charge.
|
||||
|
||||
The *integration time* is the time spend by the ADC acquiring the input signal
|
||||
in its capacitor. The longer this time is, the more resilient the sampling
|
||||
process is to unwanted noise. The drawback is that, if the integration time is
|
||||
increased then the sampling rate decreases. This effect can be somewhat
|
||||
compared to a *low-pass filter*.
|
||||
|
||||
As the INA226 alternatively connects its ADC to the bus voltage and shunt
|
||||
voltage (see previous figure), samples are retrieved at a frequency of
|
||||
|
||||
.. math::
|
||||
\frac{1}{T_{bus} + T_{shunt}}
|
||||
|
||||
where :math:`T_X` is the integration time for the :math:`X` voltage.
|
||||
|
||||
As described below (:meth:`BaylibreAcmeInstrument.reset
|
||||
<devlib.instrument.baylibre_acme.BaylibreAcmeInstrument.reset>`), the
|
||||
integration times for the bus and shunt voltage can be set separately which
|
||||
allows a tradeoff of accuracy between signals. This is particularly useful as
|
||||
the shunt voltage returned by the INA226 has a higher resolution than the bus
|
||||
voltage (2.5 μV and 1.25 mV LSB, respectively) and therefore would benefit more
|
||||
from a longer integration time.
|
||||
|
||||
As an illustration, consider the following sampled sine wave and notice how
|
||||
increasing the integration time (of the bus voltage in this case) "smoothes"
|
||||
out the signal:
|
||||
|
||||
.. figure:: images/instrumentation/baylibre_acme/int_time.png
|
||||
:alt: Illustration of the impact of the integration time
|
||||
:align: center
|
||||
|
||||
Increasing the integration time increases the resilience to noise
|
||||
|
||||
|
||||
Internal signal processing
|
||||
''''''''''''''''''''''''''
|
||||
|
||||
The INA226 is able to accumulate samples acquired by its ADC and output to the
|
||||
ACME board (technically, to its I2C registers) the average value of :math:`N`
|
||||
samples. This is called *oversampling*. While the integration time somewhat
|
||||
behaves as an analog low-pass filter, the oversampling feature is a digital
|
||||
low-pass filter by definition. The former should be set to reduce sampling
|
||||
noise (*i.e.* noise on a single sample coming from the sampling process) while
|
||||
the latter should be used to filter out high-frequency noise present in the
|
||||
input signal and control the sampling frequency.
|
||||
|
||||
Therefore, samples are available at the output of the INA226 at a frequency
|
||||
|
||||
.. math::
|
||||
\frac{1}{N(T_{bus} + T_{shunt})}
|
||||
|
||||
and oversampling ratio provides a way to control the output sampling frequency
|
||||
(*i.e.* to limit the required output bandwidth) while making sure the signal
|
||||
fidelity is as desired.
|
||||
|
||||
|
||||
The 4 IIO channels coming from the INA226 can be grouped according to their
|
||||
respective origins: the bus and shunt voltages are measured (and, potentially
|
||||
filtered) while the shunt current and load power are computed. Indeed, the
|
||||
INA226 contains on-board fixed-point arithmetic units to compute the trivial
|
||||
expressions:
|
||||
|
||||
.. math::
|
||||
|
||||
I_{shunt} = \frac{V_{shunt}}{R_{shunt}}
|
||||
,\ \
|
||||
P_{load} = V_{load}\ I_{load}
|
||||
\approx V_{bus} \ I_{shunt}
|
||||
|
||||
A functional block diagram of this is also given by the datasheet:
|
||||
|
||||
.. figure:: images/instrumentation/baylibre_acme/ina226_functional.png
|
||||
:width: 60%
|
||||
:alt: Functional block diagram, INA226
|
||||
:align: center
|
||||
|
||||
Acquisition and Processing: Functional Block Diagram
|
||||
(source: `Texas Instruments INA226`_)
|
||||
|
||||
In the end, there are therefore 3 channels (bus voltage, shunt voltage and
|
||||
timestamps) that are necessary to figure out the load power consumption, while
|
||||
the others are being provided for convenience *e.g.* in case the rest of the
|
||||
hardware does not have the computing power to make the computation.
|
||||
|
||||
|
||||
Sampling Frequency Issues
|
||||
"""""""""""""""""""""""""
|
||||
|
||||
It looks like the INA226-ACME-BBB setup has a bottleneck preventing the
|
||||
sampling frequency to go higher than ~1.4 kHz (the maximal theoretical sampling
|
||||
frequency is ~3.6 kHz). We know that this issue is not internal to the ADC
|
||||
itself (inside of the INA226) because modifying the integration time affects
|
||||
the output signal even when the sampling frequency is capped (as shown above)
|
||||
but it may come from anywhere after that.
|
||||
|
||||
Because of this, there is no point in using a (theoretical) sampling frequency
|
||||
that is larger than 1.4 kHz. But it is important to note that the ACME will
|
||||
still report the theoretical sampling rate (probably computed with the formula
|
||||
given above) through :attr:`BaylibreAcmeInstrument.sample_rate_hz` and
|
||||
:attr:`IIOINA226Instrument.sample_rate_hz` even if it differs from the actual
|
||||
sampling rate.
|
||||
|
||||
Note that, even though this is obvious for the theoretical sampling rate, the
|
||||
specific values of the bus and shunt integration times do not seem to have an
|
||||
influence on the measured sampling rate; only their sum matters. This further
|
||||
points toward a data-processing bottleneck rather than a hardware bug in the
|
||||
acquisition device.
|
||||
|
||||
The following chart compares the evolution of the measured sampling rate with
|
||||
the expected one as we modify it through :math:`T_{shunt}`, :math:`T_{bus}` and
|
||||
:math:`N`:
|
||||
|
||||
.. figure:: images/instrumentation/baylibre_acme/bottleneck.png
|
||||
:alt: Sampling frequency does not go higher than 1.4 kHz
|
||||
:align: center
|
||||
|
||||
Theoretical vs measured sampling rates
|
||||
|
||||
|
||||
Furthermore, because the transactions are done through a buffer (see next
|
||||
section), if the sampling frequency is too low, the connection may time-out
|
||||
before the buffer is full and ready to be sent. This may be fixed in an
|
||||
upcoming release.
|
||||
|
||||
Buffer-based transactions
|
||||
"""""""""""""""""""""""""
|
||||
|
||||
Samples made available by the INA226 are retrieved by the BBB and stored in a
|
||||
buffer which is sent back to the host once it is full (see
|
||||
``buffer_samples_count`` in :meth:`BaylibreAcmeInstrument.setup
|
||||
<devlib.instrument.baylibre_acme.BaylibreAcmeInstrument.setup>` for setting its
|
||||
size). Therefore, the larger the buffer is, the longer it takes to be
|
||||
transmitted back but the less often it has to be transmitted. To illustrate
|
||||
this, consider the following graphs showing the time difference between
|
||||
successive samples in a retrieved signal when the size of the buffer changes:
|
||||
|
||||
.. figure:: images/instrumentation/baylibre_acme/buffer.png
|
||||
:alt: Buffer size impact on the sampled signal
|
||||
:align: center
|
||||
|
||||
Impact of the buffer size on the sampling regularity
|
||||
|
||||
devlib API
|
||||
^^^^^^^^^^
|
||||
|
||||
ACME Cape + BBB (IIO Context)
|
||||
"""""""""""""""""""""""""""""
|
||||
|
||||
devlib provides wrapper classes for all the IIO connections to an IIO context
|
||||
given by `libiio (the Linux IIO interface)`_ however only the network-based one
|
||||
has been tested. For the other classes, please refer to the official IIO
|
||||
documentation for the meaning of their constructor parameters.
|
||||
|
||||
.. module:: devlib.instrument.baylibre_acme
|
||||
|
||||
.. class:: BaylibreAcmeInstrument(target=None, iio_context=None, use_base_iio_context=False, probe_names=None)
|
||||
|
||||
Base class wrapper for the ACME instrument which itself is a wrapper for the
|
||||
IIO context base class. This class wraps around the passed ``iio_context``;
|
||||
if ``use_base_iio_context`` is ``True``, ``iio_context`` is first passed to
|
||||
the :class:`iio.Context` base class (see its documentation for how this
|
||||
parameter is then used), else ``iio_context`` is expected to be a valid
|
||||
instance of :class:`iio.Context`.
|
||||
|
||||
``probe_names`` is expected to be a string or list of strings; if passed,
|
||||
the probes in the instance are named according to it in the order in which
|
||||
they are discovered (see previous comment about probe discovery and
|
||||
:attr:`BaylibreAcmeInstrument.probes`). There should be as many
|
||||
``probe_names`` as there are probes connected to the ACME. By default, the
|
||||
probes keep their IIO names.
|
||||
|
||||
To ensure that the setup is reliable, ``devlib`` requires minimal versions
|
||||
for ``iio``, the IIO drivers and the ACME BBB SD image.
|
||||
|
||||
.. class:: BaylibreAcmeNetworkInstrument(target=None, hostname=None, probe_names=None)
|
||||
|
||||
Child class of :class:`BaylibreAcmeInstrument` for Ethernet-based IIO
|
||||
communication. The ``hostname`` should be the IP address or network name of
|
||||
the BBB. If it is ``None``, the ``IIOD_REMOTE`` environment variable will be
|
||||
used as the hostname. If that environment variable is empty, the server will
|
||||
be discovered using ZeroConf. If that environment variable is not set, a
|
||||
local context is created.
|
||||
|
||||
.. class:: BaylibreAcmeXMLInstrument(target=None, xmlfile=None, probe_names=None)
|
||||
|
||||
Child class of :class:`BaylibreAcmeInstrument` using the XML backend of the
|
||||
IIO library and building an IIO context from the provided ``xmlfile`` (a
|
||||
string giving the path to the file is expected).
|
||||
|
||||
.. class:: BaylibreAcmeLocalInstrument(target=None, probe_names=None)
|
||||
|
||||
Child class of :class:`BaylibreAcmeInstrument` using the Local IIO backend.
|
||||
|
||||
.. attribute:: BaylibreAcmeInstrument.mode
|
||||
|
||||
The collection mode for the ACME is ``CONTINUOUS``.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.setup(shunt_resistor, integration_time_bus, integration_time_shunt, oversampling_ratio, buffer_samples_count=None, buffer_is_circular=False, absolute_timestamps=False, high_resolution=True)
|
||||
|
||||
The ``shunt_resistor`` (:math:`R_{shunt}` [:math:`\mu\Omega`]),
|
||||
``integration_time_bus`` (:math:`T_{bus}` [s]), ``integration_time_shunt``
|
||||
(:math:`T_{shunt}` [s]) and ``oversampling_ratio`` (:math:`N`) are copied
|
||||
into on-board registers inside of the INA226 to be used as described above.
|
||||
Please note that there exists a limited set of accepted values for these
|
||||
parameters; for the integration times, refer to
|
||||
``IIOINA226Instrument.INTEGRATION_TIMES_AVAILABLE`` and for the
|
||||
``oversampling_ratio``, refer to
|
||||
``IIOINA226Instrument.OVERSAMPLING_RATIOS_AVAILABLE``. If all probes share
|
||||
the same value for these attributes, this class provides
|
||||
:attr:`BaylibreAcmeInstrument.OVERSAMPLING_RATIOS_AVAILABLE` and
|
||||
:attr:`BaylibreAcmeInstrument.INTEGRATION_TIMES_AVAILABLE`.
|
||||
|
||||
The ``buffer_samples_count`` is the size of the IIO buffer expressed **in
|
||||
samples**; this is independent of the number of active channels! By default,
|
||||
if ``buffer_samples_count`` is not passed, the IIO buffer of size
|
||||
:attr:`IIOINA226Instrument.sample_rate_hz` is created meaning that a buffer
|
||||
transfer happens roughly every second.
|
||||
|
||||
If ``absolute_timestamps`` is ``False``, the first sample from the
|
||||
``timestamps`` channel is substracted from all the following samples of this
|
||||
channel, effectively making its signal start at 0.
|
||||
|
||||
``high_resolution`` is used to enable a mode where power and current are
|
||||
computed offline on the host machine running ``devlib``: even if the user
|
||||
asks for power or current channels, they are not enabled in hardware
|
||||
(INA226) and instead the necessary voltage signal(s) are enabled to allow
|
||||
the computation of the desired signals using the FPU of the host (which is
|
||||
very likely to be much more accurate than the fixed-point 16-bit unit of the
|
||||
INA226).
|
||||
|
||||
A circular buffer can be used by setting ``buffer_is_circular`` to ``True``
|
||||
(directly passed to :class:`iio.Buffer`).
|
||||
|
||||
Each one of the arguments of this method can either be a single value which
|
||||
will be used for all probes or a list of values giving the corresponding
|
||||
setting for each probe (in the order of ``probe_names`` passed to the
|
||||
constructor) with the exception of ``absolute_timestamps`` (as all signals
|
||||
are resampled onto a common time signal) which, if passed as an array, will
|
||||
be ``True`` only if all of its elements are ``True``.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.reset(sites=None, kinds=None, channels=None)
|
||||
|
||||
:meth:`BaylibreAcmeInstrument.setup` should **always** be called before
|
||||
calling this method so that the hardware is correctly configured. Once this
|
||||
method has been called, :meth:`BaylibreAcmeInstrument.setup` can only be
|
||||
called again once :meth:`BaylibreAcmeInstrument.teardown` has been called.
|
||||
|
||||
This method inherits from :meth:`Instrument.reset`; call
|
||||
:meth:`list_channels` for a list of available channels from a given
|
||||
instance.
|
||||
|
||||
Please note that the size of the transaction buffer is proportional to the
|
||||
number of active channels (for a fixed ``buffer_samples_count``). Therefore,
|
||||
limiting the number of active channels allows to limit the required
|
||||
bandwidth. ``high_resolution`` in :meth:`BaylibreAcmeInstrument.setup`
|
||||
limits the number of active channels to the minimum required.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.start()
|
||||
|
||||
:meth:`BaylibreAcmeInstrument.reset` should **always** be called before
|
||||
calling this method so that the right channels are active,
|
||||
:meth:`BaylibreAcmeInstrument.stop` should **always** be called after
|
||||
calling this method and no other method of the object should be called
|
||||
in-between.
|
||||
|
||||
This method starts the sampling process of the active channels. The samples
|
||||
are stored but are not available until :meth:`BaylibreAcmeInstrument.stop`
|
||||
has been called.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.stop()
|
||||
|
||||
:meth:`BaylibreAcmeInstrument.start` should **always** be called before
|
||||
calling this method so that samples are being captured.
|
||||
|
||||
This method stops the sampling process of the active channels and retrieves
|
||||
and pre-processes the samples. Once this function has been called, the
|
||||
samples are made available through :meth:`BaylibreAcmeInstrument.get_data`.
|
||||
Note that it is safe to call :meth:`BaylibreAcmeInstrument.start` after this
|
||||
method returns but this will discard the data previously acquired.
|
||||
|
||||
When this method returns, It is guaranteed that the content of at least one
|
||||
IIO buffer will have been captured.
|
||||
|
||||
If different sampling frequencies were used for the different probes, the
|
||||
signals are resampled to share the time signal with the highest sampling
|
||||
frequency.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.teardown()
|
||||
|
||||
This method can be called at any point (unless otherwise specified *e.g.*
|
||||
:meth:`BaylibreAcmeInstrument.start`) to deactive any active probe once
|
||||
:meth:`BaylibreAcmeInstrument.reset` has been called. This method does not
|
||||
affect already captured samples.
|
||||
|
||||
The following graph gives a summary of the allowed calling sequence(s) where
|
||||
each edge means "can be called directly after":
|
||||
|
||||
.. graphviz::
|
||||
|
||||
digraph acme_calls {
|
||||
rankdir = LR
|
||||
bgcolor = transparent
|
||||
|
||||
__init__ -> setup -> reset -> start -> stop -> teardown
|
||||
|
||||
teardown:sw -> setup [style=dashed]
|
||||
teardown -> reset [style=dashed]
|
||||
|
||||
stop -> reset [style=dashed]
|
||||
stop:nw -> start [style=dashed]
|
||||
|
||||
reset -> teardown [style=dashed]
|
||||
}
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.get_data(outfile=None)
|
||||
|
||||
Inherited from :meth:`Instrument.get_data`. If ``outfile`` is ``None``
|
||||
(default), the samples are returned as a `pandas.DataFrame` with the
|
||||
channels as columns. Else, it behaves like the parent class, returning a
|
||||
``MeasurementCsv``.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.add_channel()
|
||||
|
||||
Should not be used as new channels are discovered through the IIO context.
|
||||
|
||||
.. method:: BaylibreAcmeInstrument.list_channels()
|
||||
|
||||
Inherited from :meth:`Instrument.list_channels`.
|
||||
|
||||
.. attribute:: BaylibreAcmeInstrument.sample_rate_hz
|
||||
.. attribute:: BaylibreAcmeInstrument.OVERSAMPLING_RATIOS_AVAILABLE
|
||||
.. attribute:: BaylibreAcmeInstrument.INTEGRATION_TIMES_AVAILABLE
|
||||
|
||||
These attributes return the corresponding attributes of the probes if they
|
||||
all share the same value (and are therefore provided to avoid reading from a
|
||||
single probe and expecting the others to share this value). They should be
|
||||
used whenever the assumption that all probes share the same value for the
|
||||
accessed attribute is made. For this reason, an exception is raised if it is
|
||||
not the case.
|
||||
|
||||
If probes are active (*i.e.* :meth:`BaylibreAcmeInstrument.reset` has been
|
||||
called), only these are read for the value of the attribute (as others have
|
||||
been tagged to be ignored). If not, all probes are used.
|
||||
|
||||
.. attribute:: BaylibreAcmeInstrument.probes
|
||||
|
||||
Dictionary of :class:`IIOINA226Instrument` instances representing the probes
|
||||
connected to the ACME. If provided to the constructor, the keys are the
|
||||
``probe_names`` that were passed.
|
||||
|
||||
ACME Probes (IIO Devices)
|
||||
"""""""""""""""""""""""""
|
||||
|
||||
The following class is not supposed to be instantiated by the user code: the
|
||||
API is provided as the ACME probes can be accessed through the
|
||||
:attr:`BaylibreAcmeInstrument.probes` attribute.
|
||||
|
||||
.. class:: IIOINA226Instrument(iio_device)
|
||||
|
||||
This class is a wrapper for the :class:`iio.Device` class and takes a valid
|
||||
instance as ``iio_device``. It is not supposed to be instantiated by the
|
||||
user and its partial documentation is provided for read-access only.
|
||||
|
||||
.. attribute:: IIOINA226Instrument.shunt_resistor
|
||||
.. attribute:: IIOINA226Instrument.sample_rate_hz
|
||||
.. attribute:: IIOINA226Instrument.oversampling_ratio
|
||||
.. attribute:: IIOINA226Instrument.integration_time_shunt
|
||||
.. attribute:: IIOINA226Instrument.integration_time_bus
|
||||
.. attribute:: IIOINA226Instrument.OVERSAMPLING_RATIOS_AVAILABLE
|
||||
.. attribute:: IIOINA226Instrument.INTEGRATION_TIMES_AVAILABLE
|
||||
|
||||
These attributes are provided *for reference* and should not be assigned to
|
||||
but can be used to make the user code more readable, if needed. Please note
|
||||
that, as reading these attributes reads the underlying value from the
|
||||
hardware, they should not be read when the ACME is active *i.e* when
|
||||
:meth:`BaylibreAcmeInstrument.setup` has been called without calling
|
||||
:meth:`BaylibreAcmeInstrument.teardown`.
|
||||
|
||||
|
||||
Examples
|
||||
""""""""
|
||||
|
||||
The following example shows a basic use of an ACME at IP address
|
||||
``ACME_IP_ADDR`` with 2 probes connected, capturing all the channels during
|
||||
(roughly) 10 seconds at a sampling rate of 613 Hz and outputing the
|
||||
measurements to the CSV file ``acme.csv``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import time
|
||||
import devlib
|
||||
|
||||
acme = devlib.BaylibreAcmeNetworkInstrument(hostname=ACME_IP_ADDR,
|
||||
probe_names=['battery', 'usb'])
|
||||
|
||||
int_times = acme.INTEGRATION_TIMES_AVAILABLE
|
||||
ratios = acme.OVERSAMPLING_RATIOS_AVAILABLE
|
||||
|
||||
acme.setup(shunt_resistor=20000,
|
||||
integration_time_bus=int_times[1],
|
||||
integration_time_shunt=int_times[1],
|
||||
oversampling_ratio=ratios[1])
|
||||
|
||||
acme.reset()
|
||||
acme.start()
|
||||
time.sleep(10)
|
||||
acme.stop()
|
||||
acme.get_data('acme.csv')
|
||||
acme.teardown()
|
||||
|
||||
It is common to have different resistances for different probe shunt resistors.
|
||||
Furthermore, we may want to have different sampling frequencies for different
|
||||
probes (*e.g.* if it is known that the USB voltage changes rather slowly).
|
||||
Finally, it is possible to set the integration times for the bus and shunt
|
||||
voltages of a same probe to different values. The following call to
|
||||
:meth:`BaylibreAcmeInstrument.setup` illustrates these:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
acme.setup(shunt_resistor=[20000, 10000],
|
||||
integration_time_bus=[int_times[2], int_times[3]],
|
||||
integration_time_shunt=[int_times[3], int_times[4]],
|
||||
oversampling_ratio=[ratios[0], ratios[1]])
|
||||
|
||||
for n, p in acme.probes.iteritems():
|
||||
print('{}:'.format(n))
|
||||
print(' T_bus = {} s'.format(p.integration_time_bus))
|
||||
print(' T_shn = {} s'.format(p.integration_time_shunt))
|
||||
print(' N = {}'.format(p.oversampling_ratio))
|
||||
print(' freq = {} Hz'.format(p.sample_rate_hz))
|
||||
|
||||
# Output:
|
||||
#
|
||||
# battery:
|
||||
# T_bus = 0.000332 s
|
||||
# T_shn = 0.000588 s
|
||||
# N = 1
|
||||
# freq = 1087 Hz
|
||||
# usb:
|
||||
# T_bus = 0.000588 s
|
||||
# T_shn = 0.0011 s
|
||||
# N = 4
|
||||
# freq = 148 Hz
|
||||
|
||||
Please keep in mind that calling ``acme.get_data('acme.csv')`` after capturing
|
||||
samples with this setup will output signals with the same sampling frequency
|
||||
(the highest one among the sampling frequencies) as the signals are resampled
|
||||
to output a single time signal.
|
||||
|
||||
.. rubric:: Footnotes
|
||||
|
||||
.. [#acme_probe_variants] There exist different variants of the ACME probe (USB, Jack, shunt resistor) but they all use the same probing hardware (the TI INA226) and don't differ from the point of view of the software stack (at any level, including devlib, the highest one)
|
||||
|
||||
.. [#acme_name_conflicts] Be careful that in cases where multiple ACME boards are being used, it may be required to manually handle name conflicts
|
||||
|
@@ -1,11 +1,13 @@
|
||||
.. module:: devlib.module
|
||||
|
||||
.. _modules:
|
||||
|
||||
Modules
|
||||
=======
|
||||
|
||||
Modules add additional functionality to the core :class:`Target` interface.
|
||||
Usually, it is support for specific subsystems on the target. Modules are
|
||||
instantiated as attributes of the :class:`Target` instance.
|
||||
Modules add additional functionality to the core :class:`~devlib.target.Target`
|
||||
interface. Usually, it is support for specific subsystems on the target. Modules
|
||||
are instantiated as attributes of the :class:`~devlib.target.Target` instance.
|
||||
|
||||
hotplug
|
||||
-------
|
||||
@@ -28,6 +30,8 @@ interface to this subsystem
|
||||
# Make sure all cpus are online
|
||||
target.hotplug.online_all()
|
||||
|
||||
.. module:: devlib.module.cpufreq
|
||||
|
||||
cpufreq
|
||||
-------
|
||||
|
||||
@@ -132,6 +136,9 @@ policies (governors). The ``devlib`` module exposes the following interface
|
||||
``1`` or ``"cpu1"``).
|
||||
:param frequency: Frequency to set.
|
||||
|
||||
|
||||
.. module:: devlib.module.cupidle
|
||||
|
||||
cpuidle
|
||||
-------
|
||||
|
||||
@@ -167,11 +174,15 @@ cpuidle
|
||||
You can also call ``enable()`` or ``disable()`` on :class:`CpuidleState` objects
|
||||
returned by get_state(s).
|
||||
|
||||
.. module:: devlib.module.cgroups
|
||||
|
||||
cgroups
|
||||
-------
|
||||
|
||||
TODO
|
||||
|
||||
.. module:: devlib.module.hwmon
|
||||
|
||||
hwmon
|
||||
-----
|
||||
|
||||
@@ -187,8 +198,8 @@ Modules implement discrete, optional pieces of functionality ("optional" in the
|
||||
sense that the functionality may or may not be present on the target device, or
|
||||
that it may or may not be necessary for a particular application).
|
||||
|
||||
Every module (ultimately) derives from :class:`Module` class. A module must
|
||||
define the following class attributes:
|
||||
Every module (ultimately) derives from :class:`devlib.module.Module` class. A
|
||||
module must define the following class attributes:
|
||||
|
||||
:name: A unique name for the module. This cannot clash with any of the existing
|
||||
names and must be a valid Python identifier, but is otherwise free-form.
|
||||
@@ -204,21 +215,26 @@ define the following class attributes:
|
||||
which case the module's ``name`` will be treated as its
|
||||
``kind`` as well.
|
||||
|
||||
:stage: This defines when the module will be installed into a :class:`Target`.
|
||||
Currently, the following values are allowed:
|
||||
:stage: This defines when the module will be installed into a
|
||||
:class:`~devlib.target.Target`. Currently, the following values are
|
||||
allowed:
|
||||
|
||||
:connected: The module is installed after a connection to the target has
|
||||
been established. This is the default.
|
||||
:early: The module will be installed when a :class:`Target` is first
|
||||
created. This should be used for modules that do not rely on a
|
||||
live connection to the target.
|
||||
:early: The module will be installed when a
|
||||
:class:`~devlib.target.Target` is first created. This should be
|
||||
used for modules that do not rely on a live connection to the
|
||||
target.
|
||||
:setup: The module will be installed after initial setup of the device
|
||||
has been performed. This allows the module to utilize assets
|
||||
deployed during the setup stage for example 'Busybox'.
|
||||
|
||||
Additionally, a module must implement a static (or class) method :func:`probe`:
|
||||
|
||||
.. method:: Module.probe(target)
|
||||
|
||||
This method takes a :class:`Target` instance and returns ``True`` if this
|
||||
module is supported by that target, or ``False`` otherwise.
|
||||
This method takes a :class:`~devlib.target.Target` instance and returns
|
||||
``True`` if this module is supported by that target, or ``False`` otherwise.
|
||||
|
||||
.. note:: If the module ``stage`` is ``"early"``, this method cannot assume
|
||||
that a connection has been established (i.e. it can only access
|
||||
@@ -228,9 +244,9 @@ Installation and invocation
|
||||
***************************
|
||||
|
||||
The default installation method will create an instance of a module (the
|
||||
:class:`Target` instance being the sole argument) and assign it to the target
|
||||
instance attribute named after the module's ``kind`` (or ``name`` if ``kind`` is
|
||||
``None``).
|
||||
:class:`~devlib.target.Target` instance being the sole argument) and assign it
|
||||
to the target instance attribute named after the module's ``kind`` (or
|
||||
``name`` if ``kind`` is ``None``).
|
||||
|
||||
It is possible to change the installation procedure for a module by overriding
|
||||
the default :func:`install` method. The method must have the following
|
||||
@@ -319,7 +335,7 @@ FlashModule
|
||||
|
||||
"flash"
|
||||
|
||||
.. method:: __call__(image_bundle=None, images=None, boot_config=None)
|
||||
.. method:: __call__(image_bundle=None, images=None, boot_config=None, connect=True)
|
||||
|
||||
Must be implemented by derived classes.
|
||||
|
||||
@@ -335,15 +351,17 @@ FlashModule
|
||||
:param boot_config: Some platforms require specifying boot arguments at the
|
||||
time of flashing the images, rather than during each
|
||||
reboot. For other platforms, this will be ignored.
|
||||
:connect: Specifiy whether to try and connect to the target after flashing.
|
||||
|
||||
|
||||
Module Registration
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Modules are specified on :class:`Target` or :class:`Platform` creation by name.
|
||||
In order to find the class associated with the name, the module needs to be
|
||||
registered with ``devlib``. This is accomplished by passing the module class
|
||||
into :func:`register_module` method once it is defined.
|
||||
Modules are specified on :class:`~devlib.target.Target` or
|
||||
:class:`~devlib.platform.Platform` creation by name. In order to find the class
|
||||
associated with the name, the module needs to be registered with ``devlib``.
|
||||
This is accomplished by passing the module class into :func:`register_module`
|
||||
method once it is defined.
|
||||
|
||||
.. note:: If you're wiring a module to be included as part of ``devlib`` code
|
||||
base, you can place the file with the module class under
|
||||
|
160
doc/overview.rst
160
doc/overview.rst
@@ -1,24 +1,26 @@
|
||||
Overview
|
||||
========
|
||||
|
||||
A :class:`Target` instance serves as the main interface to the target device.
|
||||
There currently three target interfaces:
|
||||
A :class:`~devlib.target.Target` instance serves as the main interface to the target device.
|
||||
There are currently four target interfaces:
|
||||
|
||||
- :class:`LinuxTarget` for interacting with Linux devices over SSH.
|
||||
- :class:`AndroidTraget` for interacting with Android devices over adb.
|
||||
- :class:`LocalLinuxTarget`: for interacting with the local Linux host.
|
||||
- :class:`~devlib.target.LinuxTarget` for interacting with Linux devices over SSH.
|
||||
- :class:`~devlib.target.AndroidTarget` for interacting with Android devices over adb.
|
||||
- :class:`~devlib.target.ChromeOsTarget`: for interacting with ChromeOS devices
|
||||
over SSH, and their Android containers over adb.
|
||||
- :class:`~devlib.target.LocalLinuxTarget`: for interacting with the local Linux host.
|
||||
|
||||
They all work in more-or-less the same way, with the major difference being in
|
||||
how connection settings are specified; though there may also be a few APIs
|
||||
specific to a particular target type (e.g. :class:`AndroidTarget` exposes
|
||||
methods for working with logcat).
|
||||
specific to a particular target type (e.g. :class:`~devlib.target.AndroidTarget`
|
||||
exposes methods for working with logcat).
|
||||
|
||||
|
||||
Acquiring a Target
|
||||
------------------
|
||||
|
||||
To create an interface to your device, you just need to instantiate one of the
|
||||
:class:`Target` derivatives listed above, and pass it the right
|
||||
:class:`~devlib.target.Target` derivatives listed above, and pass it the right
|
||||
``connection_settings``. Code snippet below gives a typical example of
|
||||
instantiating each of the three target types.
|
||||
|
||||
@@ -37,6 +39,7 @@ instantiating each of the three target types.
|
||||
'password': 'sekrit',
|
||||
# or
|
||||
'keyfile': '/home/me/.ssh/id_rsa'})
|
||||
# ChromeOsTarget connection is performed in the same way as LinuxTarget
|
||||
|
||||
# For an Android target, you will need to pass the device name as reported
|
||||
# by "adb devices". If there is only one device visible to adb, you can omit
|
||||
@@ -44,21 +47,22 @@ instantiating each of the three target types.
|
||||
t3 = AndroidTarget(connection_settings={'device': '0123456789abcde'})
|
||||
|
||||
Instantiating a target may take a second or two as the remote device will be
|
||||
queried to initialize :class:`Target`'s internal state. If you would like to
|
||||
create a :class:`Target` instance but not immediately connect to the remote
|
||||
device, you can pass ``connect=False`` parameter. If you do that, you would have
|
||||
to then explicitly call ``t.connect()`` before you can interact with the device.
|
||||
queried to initialize :class:`~devlib.target.Target`'s internal state. If you
|
||||
would like to create a :class:`~devlib.target.Target` instance but not
|
||||
immediately connect to the remote device, you can pass ``connect=False``
|
||||
parameter. If you do that, you would have to then explicitly call
|
||||
``t.connect()`` before you can interact with the device.
|
||||
|
||||
There are a few additional parameters you can pass in instantiation besides
|
||||
``connection_settings``, but they are usually unnecessary. Please see
|
||||
:class:`Target` API documentation for more details.
|
||||
:class:`~devlib.target.Target` API documentation for more details.
|
||||
|
||||
Target Interface
|
||||
----------------
|
||||
|
||||
This is a quick overview of the basic interface to the device. See
|
||||
:class:`Target` API documentation for the full list of supported methods and
|
||||
more detailed documentation.
|
||||
:class:`~devlib.target.Target` API documentation for the full list of supported
|
||||
methods and more detailed documentation.
|
||||
|
||||
One-time Setup
|
||||
~~~~~~~~~~~~~~
|
||||
@@ -79,8 +83,14 @@ safe side, it's a good idea to call this once at the beginning of your scripts.
|
||||
Command Execution
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
There are several ways to execute a command on the target. In each case, a
|
||||
:class:`TargetError` will be raised if something goes wrong. In each case, it is
|
||||
There are several ways to execute a command on the target. In each case, an
|
||||
instance of a subclass of :class:`TargetError` will be raised if something goes
|
||||
wrong. When a transient error is encountered such as the loss of the network
|
||||
connectivity, it will raise a :class:`TargetTransientError`. When the command
|
||||
fails, it will raise a :class:`TargetStableError` unless the
|
||||
``will_succeed=True`` parameter is specified, in which case a
|
||||
:class:`TargetTransientError` will be raised since it is assumed that the
|
||||
command cannot fail unless there is an environment issue. In each case, it is
|
||||
also possible to specify ``as_root=True`` if the specified command should be
|
||||
executed as root.
|
||||
|
||||
@@ -158,15 +168,16 @@ Process Control
|
||||
# PsEntry records.
|
||||
entries = t.ps()
|
||||
# e.g. print virtual memory sizes of all running sshd processes:
|
||||
print ', '.join(str(e.vsize) for e in entries if e.name == 'sshd')
|
||||
print(', '.join(str(e.vsize) for e in entries if e.name == 'sshd'))
|
||||
|
||||
|
||||
More...
|
||||
~~~~~~~
|
||||
|
||||
As mentioned previously, the above is not intended to be exhaustive
|
||||
documentation of the :class:`Target` interface. Please refer to the API
|
||||
documentation for the full list of attributes and methods and their parameters.
|
||||
documentation of the :class:`~devlib.target.Target` interface. Please refer to
|
||||
the API documentation for the full list of attributes and methods and their
|
||||
parameters.
|
||||
|
||||
Super User Privileges
|
||||
---------------------
|
||||
@@ -213,13 +224,75 @@ executables_directory
|
||||
t.push('/local/path/to/assets.tar.gz', t.get_workpath('assets.tar.gz'))
|
||||
|
||||
|
||||
Exceptions Handling
|
||||
-------------------
|
||||
|
||||
Devlib custom exceptions all derive from :class:`DevlibError`. Some exceptions
|
||||
are further categorized into :class:`DevlibTransientError` and
|
||||
:class:`DevlibStableError`. Transient errors are raised when there is an issue
|
||||
in the environment that can happen randomly such as the loss of network
|
||||
connectivity. Even a properly configured environment can be subject to such
|
||||
transient errors. Stable errors are related to either programming errors or
|
||||
configuration issues in the broad sense. This distinction allows quicker
|
||||
analysis of failures, since most transient errors can be ignored unless they
|
||||
happen at an alarming rate. :class:`DevlibTransientError` usually propagates up
|
||||
to the caller of devlib APIs, since it means that an operation could not
|
||||
complete. Retrying it or bailing out is therefore a responsability of the caller.
|
||||
|
||||
The hierarchy is as follows:
|
||||
|
||||
.. module:: devlib.exception
|
||||
|
||||
- :class:`DevlibError`
|
||||
|
||||
- :class:`WorkerThreadError`
|
||||
- :class:`HostError`
|
||||
- :class:`TargetError`
|
||||
|
||||
- :class:`TargetStableError`
|
||||
- :class:`TargetTransientError`
|
||||
- :class:`TargetNotRespondingError`
|
||||
|
||||
- :class:`DevlibStableError`
|
||||
|
||||
- :class:`TargetStableError`
|
||||
|
||||
- :class:`DevlibTransientError`
|
||||
|
||||
- :class:`TimeoutError`
|
||||
- :class:`TargetTransientError`
|
||||
- :class:`TargetNotRespondingError`
|
||||
|
||||
|
||||
Extending devlib
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
New devlib code is likely to face the decision of raising a transient or stable
|
||||
error. When it is unclear which one should be used, it can generally be assumed
|
||||
that the system is properly configured and therefore, the error is linked to an
|
||||
environment transient failure. If a function is somehow probing a property of a
|
||||
system in the broad meaning, it can use a stable error as a way to signal a
|
||||
non-expected value of that property even if it can also face transient errors.
|
||||
An example are the various ``execute()`` methods where the command can generally
|
||||
not be assumed to be supposed to succeed by devlib. Their failure does not
|
||||
usually come from an environment random issue, but for example a permission
|
||||
error. The user can use such expected failure to probe the system. Another
|
||||
example is boot completion detection on Android: boot failure cannot be
|
||||
distinguished from a timeout which is too small. A non-transient exception is
|
||||
still raised, since assuming the timeout comes from a network failure would
|
||||
either make the function useless, or force the calling code to handle a
|
||||
transient exception under normal operation. The calling code would potentially
|
||||
wrongly catch transient exceptions raised by other functions as well and attach
|
||||
a wrong meaning to them.
|
||||
|
||||
|
||||
Modules
|
||||
-------
|
||||
|
||||
Additional functionality is exposed via modules. Modules are initialized as
|
||||
attributes of a target instance. By default, ``hotplug``, ``cpufreq``,
|
||||
``cpuidle``, ``cgroups`` and ``hwmon`` will attempt to load on target; additional
|
||||
modules may be specified when creating a :class:`Target` instance.
|
||||
modules may be specified when creating a :class:`~devlib.target.Target` instance.
|
||||
|
||||
A module will probe the target for support before attempting to load. So if the
|
||||
underlying platform does not support particular functionality (e.g. the kernel
|
||||
@@ -238,12 +311,22 @@ has been successfully installed on a target, you can use ``has()`` method, e.g.
|
||||
|
||||
Please see the modules documentation for more detail.
|
||||
|
||||
Instruments and Collectors
|
||||
--------------------------
|
||||
|
||||
Measurement and Trace
|
||||
---------------------
|
||||
You can retrieve multiple types of data from a target. There are two categories
|
||||
of classes that allow for this:
|
||||
|
||||
You can collected traces (currently, just ftrace) using
|
||||
:class:`TraceCollector`\ s. For example
|
||||
|
||||
- An :class:`Instrument` which may be used to collect measurements (such as power) from
|
||||
targets that support it. Please see the
|
||||
:ref:`instruments documentation <Instrumentation>` for more details.
|
||||
|
||||
- A :class:`Collector` may be used to collect arbitary data from a ``Target`` varying
|
||||
from screenshots to trace data. Please see the
|
||||
:ref:`collectors documentation <collector>` for more details.
|
||||
|
||||
An example workflow using :class:`FTraceCollector` is as follows:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@@ -254,29 +337,22 @@ You can collected traces (currently, just ftrace) using
|
||||
# the buffer size to be used.
|
||||
trace = FtraceCollector(t, events=['power*'], buffer_size=40000)
|
||||
|
||||
# clear ftrace buffer
|
||||
trace.reset()
|
||||
|
||||
# start trace collection
|
||||
trace.start()
|
||||
|
||||
# Perform the operations you want to trace here...
|
||||
import time; time.sleep(5)
|
||||
|
||||
# stop trace collection
|
||||
trace.stop()
|
||||
# As a context manager, clear ftrace buffer using trace.reset(),
|
||||
# start trace collection using trace.start(), then stop it Using
|
||||
# trace.stop(). Using a context manager brings the guarantee that
|
||||
# tracing will stop even if an exception occurs, including
|
||||
# KeyboardInterrupt (ctr-C) and SystemExit (sys.exit)
|
||||
with trace:
|
||||
# Perform the operations you want to trace here...
|
||||
import time; time.sleep(5)
|
||||
|
||||
# extract the trace file from the target into a local file
|
||||
trace.get_trace('/tmp/trace.bin')
|
||||
trace.get_data('/tmp/trace.bin')
|
||||
|
||||
# View trace file using Kernelshark (must be installed on the host).
|
||||
trace.view('/tmp/trace.bin')
|
||||
|
||||
# Convert binary trace into text format. This would normally be done
|
||||
# automatically during get_trace(), unless autoreport is set to False during
|
||||
# automatically during get_data(), unless autoreport is set to False during
|
||||
# instantiation of the trace collector.
|
||||
trace.report('/tmp/trace.bin', '/tmp/trace.txt')
|
||||
|
||||
In a similar way, :class:`Instrument` instances may be used to collect
|
||||
measurements (such as power) from targets that support it. Please see
|
||||
instruments documentation for more details.
|
||||
|
@@ -1,14 +1,17 @@
|
||||
.. module:: devlib.platform
|
||||
|
||||
.. _platform:
|
||||
|
||||
Platform
|
||||
========
|
||||
|
||||
:class:`Platform`\ s describe the system underlying the OS. They encapsulate
|
||||
hardware- and firmware-specific details. In most cases, the generic
|
||||
:class:`Platform` class, which gets used if a platform is not explicitly
|
||||
specified on :class:`Target` creation, will be sufficient. It will automatically
|
||||
query as much platform information (such CPU topology, hardware model, etc) if
|
||||
it was not specified explicitly by the user.
|
||||
:class:`~devlib.platform.Platform`\ s describe the system underlying the OS.
|
||||
They encapsulate hardware- and firmware-specific details. In most cases, the
|
||||
generic :class:`~devlib.platform.Platform` class, which gets used if a
|
||||
platform is not explicitly specified on :class:`~devlib.target.Target`
|
||||
creation, will be sufficient. It will automatically query as much platform
|
||||
information (such CPU topology, hardware model, etc) if it was not specified
|
||||
explicitly by the user.
|
||||
|
||||
|
||||
.. class:: Platform(name=None, core_names=None, core_clusters=None,\
|
||||
@@ -31,6 +34,7 @@ it was not specified explicitly by the user.
|
||||
platform (e.g. for handling flashing, rebooting, etc). These
|
||||
would be added to the Target's modules. (See :ref:`modules`\ ).
|
||||
|
||||
.. module:: devlib.platform.arm
|
||||
|
||||
Versatile Express
|
||||
-----------------
|
||||
@@ -38,8 +42,8 @@ Versatile Express
|
||||
The generic platform may be extended to support hardware- or
|
||||
infrastructure-specific functionality. Platforms exist for ARM
|
||||
VersatileExpress-based :class:`Juno` and :class:`TC2` development boards. In
|
||||
addition to the standard :class:`Platform` parameters above, these platforms
|
||||
support additional configuration:
|
||||
addition to the standard :class:`~devlib.platform.Platform` parameters above,
|
||||
these platforms support additional configuration:
|
||||
|
||||
|
||||
.. class:: VersatileExpressPlatform
|
||||
@@ -116,43 +120,53 @@ support additional configuration:
|
||||
Gem5 Simulation Platform
|
||||
------------------------
|
||||
|
||||
By initialising a Gem5SimulationPlatform, devlib will start a gem5 simulation (based upon the
|
||||
arguments the user provided) and then connect to it using :class:`Gem5Connection`.
|
||||
Using the methods discussed above, some methods of the :class:`Target` will be altered
|
||||
slightly to better suit gem5.
|
||||
By initialising a Gem5SimulationPlatform, devlib will start a gem5 simulation
|
||||
(based upon the arguments the user provided) and then connect to it using
|
||||
:class:`~devlib.utils.ssh.Gem5Connection`. Using the methods discussed above,
|
||||
some methods of the :class:`~devlib.target.Target` will be altered slightly to
|
||||
better suit gem5.
|
||||
|
||||
.. module:: devlib.platform.gem5
|
||||
|
||||
.. class:: Gem5SimulationPlatform(name, host_output_dir, gem5_bin, gem5_args, gem5_virtio, gem5_telnet_port=None)
|
||||
|
||||
During initialisation the gem5 simulation will be kicked off (based upon the arguments
|
||||
provided by the user) and the telnet port used by the gem5 simulation will be intercepted
|
||||
and stored for use by the :class:`Gem5Connection`.
|
||||
During initialisation the gem5 simulation will be kicked off (based upon the
|
||||
arguments provided by the user) and the telnet port used by the gem5
|
||||
simulation will be intercepted and stored for use by the
|
||||
:class:`~devlib.utils.ssh.Gem5Connection`.
|
||||
|
||||
:param name: Platform name
|
||||
|
||||
:param host_output_dir: Path on the host where the gem5 outputs will be placed (e.g. stats file)
|
||||
:param host_output_dir: Path on the host where the gem5 outputs will be
|
||||
placed (e.g. stats file)
|
||||
|
||||
:param gem5_bin: gem5 binary
|
||||
|
||||
:param gem5_args: Arguments to be passed onto gem5 such as config file etc.
|
||||
|
||||
:param gem5_virtio: Arguments to be passed onto gem5 in terms of the virtIO device used
|
||||
to transfer files between the host and the gem5 simulated system.
|
||||
:param gem5_virtio: Arguments to be passed onto gem5 in terms of the virtIO
|
||||
device used to transfer files between the host and the gem5 simulated
|
||||
system.
|
||||
|
||||
:param gem5_telnet_port: Not yet in use as it would be used in future implementations
|
||||
of devlib in which the user could use the platform to pick
|
||||
up an existing and running simulation.
|
||||
:param gem5_telnet_port: Not yet in use as it would be used in future
|
||||
implementations of devlib in which the user could
|
||||
use the platform to pick up an existing and running
|
||||
simulation.
|
||||
|
||||
|
||||
.. method:: Gem5SimulationPlatform.init_target_connection([target])
|
||||
|
||||
Based upon the OS defined in the :class:`Target`, the type of :class:`Gem5Connection`
|
||||
will be set (:class:`AndroidGem5Connection` or :class:`AndroidGem5Connection`).
|
||||
Based upon the OS defined in the :class:`~devlib.target.Target`, the type of
|
||||
:class:`~devlib.utils.ssh.Gem5Connection` will be set
|
||||
(:class:`~devlib.utils.ssh.AndroidGem5Connection` or
|
||||
:class:`~devlib.utils.ssh.AndroidGem5Connection`).
|
||||
|
||||
.. method:: Gem5SimulationPlatform.update_from_target([target])
|
||||
|
||||
This method provides specific setup procedures for a gem5 simulation. First of all, the m5
|
||||
binary will be installed on the guest (if it is not present). Secondly, three methods
|
||||
in the :class:`Target` will be monkey-patched:
|
||||
This method provides specific setup procedures for a gem5 simulation. First
|
||||
of all, the m5 binary will be installed on the guest (if it is not present).
|
||||
Secondly, three methods in the :class:`~devlib.target.Target` will be
|
||||
monkey-patched:
|
||||
|
||||
- **reboot**: this is not supported in gem5
|
||||
- **reset**: this is not supported in gem5
|
||||
@@ -160,7 +174,7 @@ slightly to better suit gem5.
|
||||
monkey-patched method will first try to
|
||||
transfer the existing screencaps.
|
||||
In case that does not work, it will fall back
|
||||
to the original :class:`Target` implementation
|
||||
to the original :class:`~devlib.target.Target` implementation
|
||||
of :func:`capture_screen`.
|
||||
|
||||
Finally, it will call the parent implementation of :func:`update_from_target`.
|
||||
|
319
doc/target.rst
319
doc/target.rst
@@ -1,57 +1,62 @@
|
||||
.. module:: devlib.target
|
||||
|
||||
Target
|
||||
======
|
||||
|
||||
|
||||
.. class:: Target(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, conn_cls=None)
|
||||
|
||||
:class:`Target` is the primary interface to the remote device. All interactions
|
||||
with the device are performed via a :class:`Target` instance, either
|
||||
directly, or via its modules or a wrapper interface (such as an
|
||||
:class:`Instrument`).
|
||||
:class:`~devlib.target.Target` is the primary interface to the remote
|
||||
device. All interactions with the device are performed via a
|
||||
:class:`~devlib.target.Target` instance, either directly, or via its
|
||||
modules or a wrapper interface (such as an
|
||||
:class:`~devlib.instrument.Instrument`).
|
||||
|
||||
:param connection_settings: A ``dict`` that specifies how to connect to the remote
|
||||
device. Its contents depend on the specific :class:`Target` type (used see
|
||||
:param connection_settings: A ``dict`` that specifies how to connect to the
|
||||
remote device. Its contents depend on the specific
|
||||
:class:`~devlib.target.Target` type (used see
|
||||
:ref:`connection-types`\ ).
|
||||
|
||||
:param platform: A :class:`Target` defines interactions at Operating System level. A
|
||||
:class:`Platform` describes the underlying hardware (such as CPUs
|
||||
available). If a :class:`Platform` instance is not specified on
|
||||
:class:`Target` creation, one will be created automatically and it will
|
||||
dynamically probe the device to discover as much about the underlying
|
||||
hardware as it can. See also :ref:`platform`\ .
|
||||
:param platform: A :class:`~devlib.target.Target` defines interactions at
|
||||
Operating System level. A :class:`~devlib.platform.Platform` describes
|
||||
the underlying hardware (such as CPUs available). If a
|
||||
:class:`~devlib.platform.Platform` instance is not specified on
|
||||
:class:`~devlib.target.Target` creation, one will be created
|
||||
automatically and it will dynamically probe the device to discover
|
||||
as much about the underlying hardware as it can. See also
|
||||
:ref:`platform`\ .
|
||||
|
||||
:param working_directory: This is primary location for on-target file system
|
||||
interactions performed by ``devlib``. This location *must* be readable and
|
||||
writable directly (i.e. without sudo) by the connection's user account.
|
||||
It may or may not allow execution. This location will be created,
|
||||
if necessary, during ``setup()``.
|
||||
interactions performed by ``devlib``. This location *must* be readable
|
||||
and writable directly (i.e. without sudo) by the connection's user
|
||||
account. It may or may not allow execution. This location will be
|
||||
created, if necessary, during :meth:`setup()`.
|
||||
|
||||
If not explicitly specified, this will be set to a default value
|
||||
depending on the type of :class:`Target`
|
||||
depending on the type of :class:`~devlib.target.Target`
|
||||
|
||||
:param executables_directory: This is the location to which ``devlib`` will
|
||||
install executable binaries (either during ``setup()`` or via an
|
||||
explicit ``install()`` call). This location *must* support execution
|
||||
install executable binaries (either during :meth:`setup()` or via an
|
||||
explicit :meth:`install()` call). This location *must* support execution
|
||||
(obviously). It should also be possible to write to this location,
|
||||
possibly with elevated privileges (i.e. on a rooted Linux target, it
|
||||
should be possible to write here with sudo, but not necessarily directly
|
||||
by the connection's account). This location will be created,
|
||||
if necessary, during ``setup()``.
|
||||
by the connection's account). This location will be created, if
|
||||
necessary, during :meth:`setup()`.
|
||||
|
||||
This location does *not* need to be same as the system's executables
|
||||
location. In fact, to prevent devlib from overwriting system's defaults,
|
||||
it better if this is a separate location, if possible.
|
||||
|
||||
If not explicitly specified, this will be set to a default value
|
||||
depending on the type of :class:`Target`
|
||||
depending on the type of :class:`~devlib.target.Target`
|
||||
|
||||
:param connect: Specifies whether a connections should be established to the
|
||||
target. If this is set to ``False``, then ``connect()`` must be
|
||||
explicitly called later on before the :class:`Target` instance can be
|
||||
used.
|
||||
target. If this is set to ``False``, then :meth:`connect()` must be
|
||||
explicitly called later on before the :class:`~devlib.target.Target`
|
||||
instance can be used.
|
||||
|
||||
:param modules: a list of additional modules to be installed. Some modules will
|
||||
try to install by default (if supported by the underlying target).
|
||||
:param modules: a list of additional modules to be installed. Some modules
|
||||
will try to install by default (if supported by the underlying target).
|
||||
Current default modules are ``hotplug``, ``cpufreq``, ``cpuidle``,
|
||||
``cgroups``, and ``hwmon`` (See :ref:`modules`\ ).
|
||||
|
||||
@@ -59,40 +64,40 @@ Target
|
||||
|
||||
:param load_default_modules: If set to ``False``, default modules listed
|
||||
above will *not* attempt to load. This may be used to either speed up
|
||||
target instantiation (probing for initializing modules takes a bit of time)
|
||||
or if there is an issue with one of the modules on a particular device
|
||||
(the rest of the modules will then have to be explicitly specified in
|
||||
the ``modules``).
|
||||
target instantiation (probing for initializing modules takes a bit of
|
||||
time) or if there is an issue with one of the modules on a particular
|
||||
device (the rest of the modules will then have to be explicitly
|
||||
specified in the ``modules``).
|
||||
|
||||
:param shell_prompt: This is a regular expression that matches the shell
|
||||
prompted on the target. This may be used by some modules that establish
|
||||
auxiliary connections to a target over UART.
|
||||
|
||||
:param conn_cls: This is the type of connection that will be used to communicate
|
||||
with the device.
|
||||
:param conn_cls: This is the type of connection that will be used to
|
||||
communicate with the device.
|
||||
|
||||
.. attribute:: Target.core_names
|
||||
|
||||
This is a list containing names of CPU cores on the target, in the order in
|
||||
which they are index by the kernel. This is obtained via the underlying
|
||||
:class:`Platform`.
|
||||
:class:`~devlib.platform.Platform`.
|
||||
|
||||
.. attribute:: Target.core_clusters
|
||||
|
||||
Some devices feature heterogeneous core configurations (such as ARM
|
||||
big.LITTLE). This is a list that maps CPUs onto underlying clusters.
|
||||
(Usually, but not always, clusters correspond to groups of CPUs with the same
|
||||
name). This is obtained via the underlying :class:`Platform`.
|
||||
name). This is obtained via the underlying :class:`~devlib.platform.Platform`.
|
||||
|
||||
.. attribute:: Target.big_core
|
||||
|
||||
This is the name of the cores that are the "big"s in an ARM big.LITTLE
|
||||
configuration. This is obtained via the underlying :class:`Platform`.
|
||||
configuration. This is obtained via the underlying :class:`~devlib.platform.Platform`.
|
||||
|
||||
.. attribute:: Target.little_core
|
||||
|
||||
This is the name of the cores that are the "little"s in an ARM big.LITTLE
|
||||
configuration. This is obtained via the underlying :class:`Platform`.
|
||||
configuration. This is obtained via the underlying :class:`~devlib.platform.Platform`.
|
||||
|
||||
.. attribute:: Target.is_connected
|
||||
|
||||
@@ -120,6 +125,27 @@ Target
|
||||
This is a dict that contains a mapping of OS version elements to their
|
||||
values. This mapping is OS-specific.
|
||||
|
||||
.. attribute:: Target.hostname
|
||||
|
||||
A string containing the hostname of the target.
|
||||
|
||||
.. attribute:: Target.hostid
|
||||
|
||||
A numerical id used to represent the identity of the target.
|
||||
|
||||
.. note:: Currently on 64-bit PowerPC devices this id will always be 0. This is
|
||||
due to the included busybox binary being linked with musl.
|
||||
|
||||
.. attribute:: Target.system_id
|
||||
|
||||
A unique identifier for the system running on the target. This identifier is
|
||||
intended to be unique for the combination of hardware, kernel, and file
|
||||
system.
|
||||
|
||||
.. attribute:: Target.model
|
||||
|
||||
The model name/number of the target device.
|
||||
|
||||
.. attribute:: Target.cpuinfo
|
||||
|
||||
This is a :class:`Cpuinfo` instance which contains parsed contents of
|
||||
@@ -142,11 +168,11 @@ Target
|
||||
|
||||
The underlying connection object. This will be ``None`` if an active
|
||||
connection does not exist (e.g. if ``connect=False`` as passed on
|
||||
initialization and ``connect()`` has not been called).
|
||||
initialization and :meth:`connect()` has not been called).
|
||||
|
||||
.. note:: a :class:`Target` will automatically create a connection per
|
||||
thread. This will always be set to the connection for the current
|
||||
thread.
|
||||
.. note:: a :class:`~devlib.target.Target` will automatically create a
|
||||
connection per thread. This will always be set to the connection
|
||||
for the current thread.
|
||||
|
||||
.. method:: Target.connect([timeout])
|
||||
|
||||
@@ -166,19 +192,20 @@ Target
|
||||
being executed.
|
||||
|
||||
This should *not* be used to establish an initial connection; use
|
||||
``connect()`` instead.
|
||||
:meth:`connect()` instead.
|
||||
|
||||
.. note:: :class:`Target` will automatically create a connection per
|
||||
thread, so you don't normally need to use this explicitly in
|
||||
.. note:: :class:`~devlib.target.Target` will automatically create a connection
|
||||
per thread, so you don't normally need to use this explicitly in
|
||||
threaded code. This is generally useful if you want to perform a
|
||||
blocking operation (e.g. using ``background()``) while at the same
|
||||
blocking operation (e.g. using :class:`background()`) while at the same
|
||||
time doing something else in the same host-side thread.
|
||||
|
||||
.. method:: Target.setup([executables])
|
||||
|
||||
This will perform an initial one-time set up of a device for devlib
|
||||
interaction. This involves deployment of tools relied on the :class:`Target`,
|
||||
creation of working locations on the device, etc.
|
||||
interaction. This involves deployment of tools relied on the
|
||||
:class:`~devlib.target.Target`, creation of working locations on the device,
|
||||
etc.
|
||||
|
||||
Usually, it is enough to call this method once per new device, as its effects
|
||||
will persist across reboots. However, it is safe to call this method multiple
|
||||
@@ -202,27 +229,46 @@ Target
|
||||
operations during reboot process to detect if the reboot has failed and
|
||||
the device has hung.
|
||||
|
||||
.. method:: Target.push(source, dest [,as_root , timeout])
|
||||
.. method:: Target.push(source, dest [,as_root , timeout, globbing])
|
||||
|
||||
Transfer a file from the host machine to the target device.
|
||||
|
||||
:param source: path of to the file on the host
|
||||
:param dest: path of to the file on the target
|
||||
If transfer polling is supported (ADB connections and SSH connections),
|
||||
``poll_transfers`` is set in the connection, and a timeout is not specified,
|
||||
the push will be polled for activity. Inactive transfers will be
|
||||
cancelled. (See :ref:`connection-types` for more information on polling).
|
||||
|
||||
:param source: path on the host
|
||||
:param dest: path on the target
|
||||
:param as_root: whether root is required. Defaults to false.
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
not complete within this period, an exception will be raised. Leave unset
|
||||
to utilise transfer polling if enabled.
|
||||
:param globbing: If ``True``, the ``source`` is interpreted as a globbing
|
||||
pattern instead of being take as-is. If the pattern has multiple
|
||||
matches, ``dest`` must be a folder (or will be created as such if it
|
||||
does not exists yet).
|
||||
|
||||
.. method:: Target.pull(source, dest [, as_root, timeout])
|
||||
.. method:: Target.pull(source, dest [, as_root, timeout, globbing])
|
||||
|
||||
Transfer a file from the target device to the host machine.
|
||||
|
||||
:param source: path of to the file on the target
|
||||
:param dest: path of to the file on the host
|
||||
If transfer polling is supported (ADB connections and SSH connections),
|
||||
``poll_transfers`` is set in the connection, and a timeout is not specified,
|
||||
the pull will be polled for activity. Inactive transfers will be
|
||||
cancelled. (See :ref:`connection-types` for more information on polling).
|
||||
|
||||
:param source: path on the target
|
||||
:param dest: path on the host
|
||||
:param as_root: whether root is required. Defaults to false.
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
:param globbing: If ``True``, the ``source`` is interpreted as a globbing
|
||||
pattern instead of being take as-is. If the pattern has multiple
|
||||
matches, ``dest`` must be a folder (or will be created as such if it
|
||||
does not exists yet).
|
||||
|
||||
.. method:: Target.execute(command [, timeout [, check_exit_code [, as_root]]])
|
||||
.. method:: Target.execute(command [, timeout [, check_exit_code [, as_root [, strip_colors [, will_succeed [, force_locale]]]]]])
|
||||
|
||||
Execute the specified command on the target device and return its output.
|
||||
|
||||
@@ -235,8 +281,18 @@ Target
|
||||
raised if it is not ``0``.
|
||||
:param as_root: The command will be executed as root. This will fail on
|
||||
unrooted targets.
|
||||
:param strip_colours: The command output will have colour encodings and
|
||||
most ANSI escape sequences striped out before returning.
|
||||
:param will_succeed: The command is assumed to always succeed, unless there is
|
||||
an issue in the environment like the loss of network connectivity. That
|
||||
will make the method always raise an instance of a subclass of
|
||||
:class:`DevlibTransientError` when the command fails, instead of a
|
||||
:class:`DevlibStableError`.
|
||||
:param force_locale: Prepend ``LC_ALL=<force_locale>`` in front of the
|
||||
command to get predictable output that can be more safely parsed.
|
||||
If ``None``, no locale is prepended.
|
||||
|
||||
.. method:: Target.background(command [, stdout [, stderr [, as_root]]])
|
||||
.. method:: Target.background(command [, stdout [, stderr [, as_root, [, force_locale [, timeout]]])
|
||||
|
||||
Execute the command on the target, invoking it via subprocess on the host.
|
||||
This will return :class:`subprocess.Popen` instance for the command.
|
||||
@@ -248,6 +304,12 @@ Target
|
||||
this may be used to redirect it to an alternative file handle.
|
||||
:param as_root: The command will be executed as root. This will fail on
|
||||
unrooted targets.
|
||||
:param force_locale: Prepend ``LC_ALL=<force_locale>`` in front of the
|
||||
command to get predictable output that can be more safely parsed.
|
||||
If ``None``, no locale is prepended.
|
||||
:param timeout: Timeout (in seconds) for the execution of the command. When
|
||||
the timeout expires, :meth:`BackgroundCommand.cancel` is executed to
|
||||
terminate the command.
|
||||
|
||||
.. note:: This **will block the connection** until the command completes.
|
||||
|
||||
@@ -261,31 +323,31 @@ Target
|
||||
a string.
|
||||
:param in_directory: execute the binary in the specified directory. This must
|
||||
be an absolute path.
|
||||
:param on_cpus: taskset the binary to these CPUs. This may be a single ``int`` (in which
|
||||
case, it will be interpreted as the mask), a list of ``ints``, in which
|
||||
case this will be interpreted as the list of cpus, or string, which
|
||||
will be interpreted as a comma-separated list of cpu ranges, e.g.
|
||||
``"0,4-7"``.
|
||||
:param on_cpus: taskset the binary to these CPUs. This may be a single
|
||||
``int`` (in which case, it will be interpreted as the mask), a list of
|
||||
``ints``, in which case this will be interpreted as the list of cpus,
|
||||
or string, which will be interpreted as a comma-separated list of cpu
|
||||
ranges, e.g. ``"0,4-7"``.
|
||||
:param as_root: Specify whether the command should be run as root
|
||||
:param timeout: If this is specified and invocation does not terminate within this number
|
||||
of seconds, an exception will be raised.
|
||||
|
||||
.. method:: Target.background_invoke(binary [, args [, in_directory [, on_cpus [, as_root ]]]])
|
||||
|
||||
Execute the specified binary on target (must already be installed) as a background
|
||||
task, under the specified conditions and return the :class:`subprocess.Popen`
|
||||
instance for the command.
|
||||
Execute the specified binary on target (must already be installed) as a
|
||||
background task, under the specified conditions and return the
|
||||
:class:`subprocess.Popen` instance for the command.
|
||||
|
||||
:param binary: binary to execute. Must be present and executable on the device.
|
||||
:param args: arguments to be passed to the binary. The can be either a list or
|
||||
a string.
|
||||
:param in_directory: execute the binary in the specified directory. This must
|
||||
be an absolute path.
|
||||
:param on_cpus: taskset the binary to these CPUs. This may be a single ``int`` (in which
|
||||
case, it will be interpreted as the mask), a list of ``ints``, in which
|
||||
case this will be interpreted as the list of cpus, or string, which
|
||||
will be interpreted as a comma-separated list of cpu ranges, e.g.
|
||||
``"0,4-7"``.
|
||||
:param on_cpus: taskset the binary to these CPUs. This may be a single
|
||||
``int`` (in which case, it will be interpreted as the mask), a list of
|
||||
``ints``, in which case this will be interpreted as the list of cpus,
|
||||
or string, which will be interpreted as a comma-separated list of cpu
|
||||
ranges, e.g. ``"0,4-7"``.
|
||||
:param as_root: Specify whether the command should be run as root
|
||||
|
||||
.. method:: Target.kick_off(command [, as_root])
|
||||
@@ -329,7 +391,19 @@ Target
|
||||
some sysfs entries silently failing to set the written value without
|
||||
returning an error code.
|
||||
|
||||
.. method:: Target.read_tree_values(path, depth=1, dictcls=dict):
|
||||
.. method:: Target.revertable_write_value(path, value [, verify])
|
||||
|
||||
Same as :meth:`Target.write_value`, but as a context manager that will write
|
||||
back the previous value on exit.
|
||||
|
||||
.. method:: Target.batch_revertable_write_value(kwargs_list)
|
||||
|
||||
Calls :meth:`Target.revertable_write_value` with all the keyword arguments
|
||||
dictionary given in the list. This is a convenience method to update
|
||||
multiple files at once, leaving them in their original state on exit. If one
|
||||
write fails, all the already-performed writes will be reverted as well.
|
||||
|
||||
.. method:: Target.read_tree_values(path, depth=1, dictcls=dict, [, tar [, decode_unicode [, strip_null_char ]]])
|
||||
|
||||
Read values of all sysfs (or similar) file nodes under ``path``, traversing
|
||||
up to the maximum depth ``depth``.
|
||||
@@ -341,11 +415,20 @@ Target
|
||||
value is a dict-line object with a key for every entry under ``path``
|
||||
mapping onto its value or further dict-like objects as appropriate.
|
||||
|
||||
Although the default behaviour should suit most users, it is possible to
|
||||
encounter issues when reading binary files, or files with colons in their
|
||||
name for example. In such cases, the ``tar`` parameter can be set to force a
|
||||
full archive of the tree using tar, hence providing a more robust behaviour.
|
||||
This can, however, slow down the read process significantly.
|
||||
|
||||
:param path: sysfs path to scan
|
||||
:param depth: maximum depth to descend
|
||||
:param dictcls: a dict-like type to be used for each level of the hierarchy.
|
||||
:param tar: the files will be read using tar rather than grep
|
||||
:param decode_unicode: decode the content of tar-ed files as utf-8
|
||||
:param strip_null_char: remove null chars from utf-8 decoded files
|
||||
|
||||
.. method:: Target.read_tree_values_flat(path, depth=1):
|
||||
.. method:: Target.read_tree_values_flat(path, depth=1)
|
||||
|
||||
Read values of all sysfs (or similar) file nodes under ``path``, traversing
|
||||
up to the maximum depth ``depth``.
|
||||
@@ -389,6 +472,10 @@ Target
|
||||
Return a list of :class:`PsEntry` instances for all running processes on the
|
||||
system.
|
||||
|
||||
.. method:: Target.makedirs(self, path)
|
||||
|
||||
Create a directory at the given path and all its ancestors if needed.
|
||||
|
||||
.. method:: Target.file_exists(self, filepath)
|
||||
|
||||
Returns ``True`` if the specified path exists on the target and ``False``
|
||||
@@ -504,15 +591,43 @@ Target
|
||||
|
||||
:returns: ``True`` if internet seems available, ``False`` otherwise.
|
||||
|
||||
.. method:: Target.install_module(mod, **params)
|
||||
|
||||
:param mod: The module name or object to be installed to the target.
|
||||
:param params: Keyword arguments used to instantiate the module.
|
||||
|
||||
Installs an additional module to the target after the initial setup has been
|
||||
performed.
|
||||
|
||||
Linux Target
|
||||
------------
|
||||
|
||||
.. class:: LinuxTarget(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, conn_cls=SshConnection, is_container=False,)
|
||||
|
||||
:class:`LinuxTarget` is a subclass of :class:`~devlib.target.Target`
|
||||
with customisations specific to a device running linux.
|
||||
|
||||
|
||||
Local Linux Target
|
||||
------------------
|
||||
|
||||
.. class:: LocalLinuxTarget(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, conn_cls=SshConnection, is_container=False,)
|
||||
|
||||
:class:`LocalLinuxTarget` is a subclass of
|
||||
:class:`~devlib.target.LinuxTarget` with customisations specific to using
|
||||
the host machine running linux as the target.
|
||||
|
||||
|
||||
Android Target
|
||||
---------------
|
||||
|
||||
.. class:: AndroidTarget(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, conn_cls=AdbConnection, package_data_directory="/data/data")
|
||||
|
||||
:class:`AndroidTarget` is a subclass of :class:`Target` with additional features specific to a device running Android.
|
||||
:class:`AndroidTarget` is a subclass of :class:`~devlib.target.Target` with
|
||||
additional features specific to a device running Android.
|
||||
|
||||
:param package_data_directory: This is the location of the data stored
|
||||
for installed Android packages on the device.
|
||||
:param package_data_directory: This is the location of the data stored for
|
||||
installed Android packages on the device.
|
||||
|
||||
.. method:: AndroidTarget.set_rotation(rotation)
|
||||
|
||||
@@ -585,18 +700,58 @@ Android Target
|
||||
Returns ``True`` if the targets auto brightness is currently
|
||||
enabled and ``False`` otherwise.
|
||||
|
||||
.. method:: AndroidTarget.ensure_screen_is_off()
|
||||
.. method:: AndroidTarget.set_stay_on_never()
|
||||
|
||||
Sets the stay-on mode to ``0``, where the screen will turn off
|
||||
as standard after the timeout.
|
||||
|
||||
.. method:: AndroidTarget.set_stay_on_while_powered()
|
||||
|
||||
Sets the stay-on mode to ``7``, where the screen will stay on
|
||||
while the device is charging
|
||||
|
||||
.. method:: AndroidTarget.set_stay_on_mode(mode)
|
||||
|
||||
Sets the stay-on mode to the specified number between ``0`` and
|
||||
``7`` (inclusive).
|
||||
|
||||
.. method:: AndroidTarget.get_stay_on_mode()
|
||||
|
||||
Returns an integer between ``0`` and ``7`` representing the current
|
||||
stay-on mode of the device.
|
||||
|
||||
.. method:: AndroidTarget.ensure_screen_is_off(verify=True)
|
||||
|
||||
Checks if the devices screen is on and if so turns it off.
|
||||
If ``verify`` is set to ``True`` then a ``TargetStableError``
|
||||
will be raise if the display cannot be turned off. E.g. if
|
||||
always on mode is enabled.
|
||||
|
||||
.. method:: AndroidTarget.ensure_screen_is_on()
|
||||
.. method:: AndroidTarget.ensure_screen_is_on(verify=True)
|
||||
|
||||
Checks if the devices screen is off and if so turns it on.
|
||||
If ``verify`` is set to ``True`` then a ``TargetStableError``
|
||||
will be raise if the display cannot be turned on.
|
||||
|
||||
.. method:: AndroidTarget.ensure_screen_is_on_and_stays(verify=True, mode=7)
|
||||
|
||||
Calls ``AndroidTarget.ensure_screen_is_on(verify)`` then additionally
|
||||
sets the screen stay on mode to ``mode``.
|
||||
|
||||
.. method:: AndroidTarget.is_screen_on()
|
||||
|
||||
Returns ``True`` if the targets screen is currently on and ``False``
|
||||
otherwise.
|
||||
otherwise. If the display is in a "Doze" mode or similar always on state,
|
||||
this will return ``True``.
|
||||
|
||||
.. method:: AndroidTarget.wait_for_device(timeout=30)
|
||||
|
||||
Returns when the devices becomes available withing the given timeout
|
||||
otherwise returns a ``TimeoutError``.
|
||||
|
||||
.. method:: AndroidTarget.reboot_bootloader(timeout=30)
|
||||
|
||||
Attempts to reboot the target into it's bootloader.
|
||||
|
||||
.. method:: AndroidTarget.homescreen()
|
||||
|
||||
@@ -629,9 +784,9 @@ ChromeOS Target
|
||||
:class:`ChromeOsTarget` if the device supports android otherwise only the
|
||||
:class:`LinuxTarget` methods will be available.
|
||||
|
||||
:param working_directory: This is the location of the working
|
||||
directory to be used for the Linux target container. If not specified will
|
||||
default to ``"/mnt/stateful_partition/devlib-target"``.
|
||||
:param working_directory: This is the location of the working directory to
|
||||
be used for the Linux target container. If not specified will default to
|
||||
``"/mnt/stateful_partition/devlib-target"``.
|
||||
|
||||
:param android_working_directory: This is the location of the working
|
||||
directory to be used for the android container. If not specified it will
|
||||
@@ -639,7 +794,7 @@ ChromeOS Target
|
||||
|
||||
:param android_executables_directory: This is the location of the
|
||||
executables directory to be used for the android container. If not
|
||||
specified will default to a ``bin`` subfolder in the
|
||||
specified will default to a ``bin`` subdirectory in the
|
||||
``android_working_directory.``
|
||||
|
||||
:param package_data_directory: This is the location of the data stored
|
||||
|
46
setup.py
46
setup.py
@@ -41,23 +41,13 @@ except OSError:
|
||||
pass
|
||||
|
||||
|
||||
with open(os.path.join(devlib_dir, '__init__.py')) as fh:
|
||||
# Extract the version by parsing the text of the file,
|
||||
# as may not be able to load as a module yet.
|
||||
for line in fh:
|
||||
if '__version__' in line:
|
||||
parts = line.split("'")
|
||||
__version__ = parts[1]
|
||||
break
|
||||
else:
|
||||
raise RuntimeError('Did not see __version__')
|
||||
|
||||
vh_path = os.path.join(devlib_dir, 'utils', 'version.py')
|
||||
# can load this, as it does not have any devlib imports
|
||||
version_helper = imp.load_source('version_helper', vh_path)
|
||||
commit = version_helper.get_commit()
|
||||
if commit:
|
||||
__version__ = '{}+{}'.format(__version__, commit)
|
||||
vh_path = os.path.join(devlib_dir, 'utils', 'version.py')
|
||||
# can load this, as it does not have any devlib imports
|
||||
version_helper = imp.load_source('version_helper', vh_path)
|
||||
__version__ = version_helper.get_devlib_version()
|
||||
commit = version_helper.get_commit()
|
||||
if commit:
|
||||
__version__ = '{}+{}'.format(__version__, commit)
|
||||
|
||||
|
||||
packages = []
|
||||
@@ -79,9 +69,13 @@ for root, dirs, files in os.walk(devlib_dir):
|
||||
filepaths = [os.path.join(root, f) for f in files]
|
||||
data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths])
|
||||
|
||||
with open("README.rst", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
params = dict(
|
||||
name='devlib',
|
||||
description='A framework for automating workload execution and measurment collection on ARM devices.',
|
||||
description='A library for interacting with and instrumentation of remote devices.',
|
||||
long_description=long_description,
|
||||
version=__version__,
|
||||
packages=packages,
|
||||
package_data=data_files,
|
||||
@@ -92,20 +86,30 @@ params = dict(
|
||||
'python-dateutil', # converting between UTC and local time.
|
||||
'pexpect>=3.3', # Send/recieve to/from device
|
||||
'pyserial', # Serial port interface
|
||||
'paramiko', # SSH connection
|
||||
'scp', # SSH connection file transfers
|
||||
'wrapt', # Basic for construction of decorator functions
|
||||
'future', # Python 2-3 compatibility
|
||||
'enum34;python_version<"3.4"', # Enums for Python < 3.4
|
||||
'contextlib2;python_version<"3.0"', # Python 3 contextlib backport for Python 2
|
||||
'numpy<=1.16.4; python_version<"3"',
|
||||
'numpy; python_version>="3"',
|
||||
'pandas<=0.24.2; python_version<"3"',
|
||||
'pandas; python_version>"3"',
|
||||
'lxml', # More robust xml parsing
|
||||
],
|
||||
extras_require={
|
||||
'daq': ['daqpower'],
|
||||
'daq': ['daqpower>=2'],
|
||||
'doc': ['sphinx'],
|
||||
'monsoon': ['python-gflags'],
|
||||
'acme': ['pandas', 'numpy'],
|
||||
},
|
||||
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
|
||||
classifiers=[
|
||||
'Development Status :: 4 - Beta',
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
],
|
||||
)
|
||||
|
||||
|
6
src/get_clock_boottime/Makefile
Normal file
6
src/get_clock_boottime/Makefile
Normal file
@@ -0,0 +1,6 @@
|
||||
CFLAGS=-Wall --pedantic-errors -O2 -static
|
||||
|
||||
all: get_clock_boottime
|
||||
|
||||
get_clock_boottime: get_clock_boottime.c
|
||||
$(CC) $(CFLAGS) $^ -o $@
|
18
src/get_clock_boottime/get_clock_boottime.c
Normal file
18
src/get_clock_boottime/get_clock_boottime.c
Normal file
@@ -0,0 +1,18 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
int main(void) {
|
||||
int ret;
|
||||
struct timespec tp;
|
||||
|
||||
ret = clock_gettime(CLOCK_BOOTTIME, &tp);
|
||||
if (ret) {
|
||||
perror("clock_gettime()");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
printf("%ld.%ld\n", tp.tv_sec, tp.tv_nsec);
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
32
tests/test_target.py
Normal file
32
tests/test_target.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
from unittest import TestCase
|
||||
|
||||
from devlib import LocalLinuxTarget
|
||||
|
||||
|
||||
class TestReadTreeValues(TestCase):
|
||||
|
||||
def test_read_multiline_values(self):
|
||||
data = {
|
||||
'test1': '1',
|
||||
'test2': '2\n\n',
|
||||
'test3': '3\n\n4\n\n',
|
||||
}
|
||||
|
||||
tempdir = tempfile.mkdtemp(prefix='devlib-test-')
|
||||
for key, value in data.items():
|
||||
path = os.path.join(tempdir, key)
|
||||
with open(path, 'w') as wfh:
|
||||
wfh.write(value)
|
||||
|
||||
t = LocalLinuxTarget(connection_settings={'unrooted': True})
|
||||
raw_result = t.read_tree_values_flat(tempdir)
|
||||
result = {os.path.basename(k): v for k, v in raw_result.items()}
|
||||
|
||||
shutil.rmtree(tempdir)
|
||||
|
||||
self.assertEqual({k: v.strip()
|
||||
for k, v in data.items()},
|
||||
result)
|
Reference in New Issue
Block a user