mirror of
https://github.com/ARM-software/devlib.git
synced 2025-09-22 20:01:53 +01:00
Compare commits
236 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
5116d46141 | ||
|
beb3b011bd | ||
|
bf4e242129 | ||
|
b1538fd184 | ||
|
5b37dfc50b | ||
|
a948982700 | ||
|
d300b9e57f | ||
|
81db8200e2 | ||
|
9e9af8c6de | ||
|
5473031ab7 | ||
|
a82db5ed37 | ||
|
1381944e5b | ||
|
822c50273f | ||
|
8f3200679c | ||
|
2cfb076e4c | ||
|
98bc0a31e1 | ||
|
345a9ed199 | ||
|
1fc9f6cc94 | ||
|
4194b1dd5e | ||
|
ef2d1a6fa4 | ||
|
33397649b6 | ||
|
ebf1c1a2e1 | ||
|
1d1ba7811d | ||
|
dc7faf46e4 | ||
|
0498017bf0 | ||
|
b2950686a7 | ||
|
f2b5f85dab | ||
|
c0f26e536a | ||
|
1a02f77fdd | ||
|
117686996b | ||
|
8695344969 | ||
|
f23fbd22b6 | ||
|
24e6de67ae | ||
|
07bbf902ba | ||
|
590069f01f | ||
|
bef1ec3afc | ||
|
0c72763d2a | ||
|
2129d85422 | ||
|
80bddf38a2 | ||
|
00f3f5f690 | ||
|
bc9478c324 | ||
|
9a2c413372 | ||
|
3cb2793e51 | ||
|
1ad2e895b3 | ||
|
3d5a164338 | ||
|
af8c47151e | ||
|
20d1eabaf0 | ||
|
45ee68fdd4 | ||
|
b52462440c | ||
|
bae741dc81 | ||
|
b717deb8e4 | ||
|
ccde9de257 | ||
|
c25852b210 | ||
|
f7b7aaf527 | ||
|
569e4bd057 | ||
|
07cad78046 | ||
|
21cb10f550 | ||
|
d2aea077b4 | ||
|
d464053546 | ||
|
cfb28c47c0 | ||
|
b941c6c5a6 | ||
|
ea9f9c878b | ||
|
4f10387688 | ||
|
a4f9231707 | ||
|
3c85738f0d | ||
|
45881b9f0d | ||
|
a8ff622f33 | ||
|
fcd2439b50 | ||
|
3709e06b5c | ||
|
7c8573a416 | ||
|
6f1ffee2b7 | ||
|
7ade1b8bcc | ||
|
3c28c280de | ||
|
b9d50ec164 | ||
|
7780cfdd5c | ||
|
7c79a040b7 | ||
|
779b0cbc77 | ||
|
b6cab6467d | ||
|
ec0a5884c0 | ||
|
7f5e0f5b4d | ||
|
7e682ed97d | ||
|
62e24c5764 | ||
|
eb6fa93845 | ||
|
9d5d70564f | ||
|
922686a348 | ||
|
98e2e51d09 | ||
|
92e16ee873 | ||
|
72ded188fa | ||
|
dcab0b3718 | ||
|
37a6b4f96d | ||
|
1ddbb75e74 | ||
|
696dec9b91 | ||
|
17374cf2b4 | ||
|
9661c6bff3 | ||
|
0aeb5bc409 | ||
|
a5640502ac | ||
|
6fe78b4d47 | ||
|
5bda1c0eee | ||
|
0465a75c56 | ||
|
795c0f233f | ||
|
5ff278b133 | ||
|
b72fb470e7 | ||
|
a4fd57f023 | ||
|
cf8ebf6668 | ||
|
15a77a841d | ||
|
9bf9f2dd1b | ||
|
19887de71e | ||
|
baa7ad1650 | ||
|
75621022be | ||
|
01dd80df34 | ||
|
eb0661a6b4 | ||
|
f303d1326b | ||
|
abd88548d2 | ||
|
2a934288eb | ||
|
2bf4d8a433 | ||
|
cf26dee308 | ||
|
e7bd2a5b22 | ||
|
72be3d01f8 | ||
|
745dc9499a | ||
|
6c9f80ff76 | ||
|
182f4e7b3f | ||
|
4df2b9a4c4 | ||
|
aa64951398 | ||
|
0fa91d6c4c | ||
|
0e6280ae31 | ||
|
2650a534f3 | ||
|
c212ef2146 | ||
|
5b5da7c392 | ||
|
3801fe1d67 | ||
|
43673e3fc5 | ||
|
bbe3bb6adb | ||
|
656da00d2a | ||
|
6b0b12d833 | ||
|
56cdc2e6c3 | ||
|
def235064b | ||
|
4d1299d678 | ||
|
d4f3316120 | ||
|
76ef9e0364 | ||
|
249b8336b5 | ||
|
c5d06ee3d6 | ||
|
207291e940 | ||
|
6b72b50c40 | ||
|
c73266c3a9 | ||
|
0d6c6883dd | ||
|
bb1552151a | ||
|
5e69f06d77 | ||
|
9e6cfde832 | ||
|
4fe0b2cb64 | ||
|
b9654c694c | ||
|
ed135febde | ||
|
5d4315c5d2 | ||
|
9982f810e1 | ||
|
5601fdb108 | ||
|
4e36bad2ab | ||
|
72e4443b7d | ||
|
9ddf763650 | ||
|
18830b74da | ||
|
66de30799b | ||
|
156915f26f | ||
|
74edfcbe43 | ||
|
aa62a52ee3 | ||
|
9c86174ff5 | ||
|
ea19235aed | ||
|
e1fb6cf911 | ||
|
d9d187471f | ||
|
c944d34593 | ||
|
964fde2fef | ||
|
988de69b61 | ||
|
ded30eef00 | ||
|
71bd8b10ed | ||
|
986261bc7e | ||
|
dc5f4c6b49 | ||
|
88f8c9e9ac | ||
|
0c434e8a1b | ||
|
5848369846 | ||
|
002ade33a8 | ||
|
2e8d42db79 | ||
|
6b414cc291 | ||
|
0d798f1c4f | ||
|
1325e59b1a | ||
|
f141899dae | ||
|
984556bc8e | ||
|
03a469fc38 | ||
|
2d86474682 | ||
|
ada318f27b | ||
|
b8f7b24790 | ||
|
a9b9938b0f | ||
|
f619f1dd07 | ||
|
ad350c9267 | ||
|
8343794d34 | ||
|
f2bc5dbc14 | ||
|
6f42f67e95 | ||
|
ae7f01fd19 | ||
|
b5f36610ad | ||
|
4c8f2430e2 | ||
|
a8b6e56874 | ||
|
c92756d65a | ||
|
8512f116fc | ||
|
be8b87d559 | ||
|
d76c2d63fe | ||
|
8bfa050226 | ||
|
8871fe3c25 | ||
|
aa50b2d42d | ||
|
ebcb1664e7 | ||
|
0ff8628c9c | ||
|
c0d8a98d90 | ||
|
441eea9897 | ||
|
b0db2067a2 | ||
|
1417e81605 | ||
|
2e81a72b39 | ||
|
22f2c8b663 | ||
|
c2db6c17ab | ||
|
e01a76ef1b | ||
|
9fcca25031 | ||
|
a6b9542f0f | ||
|
413e83f5d6 | ||
|
ac19873423 | ||
|
17d4b22b9f | ||
|
f65130b7c7 | ||
|
5b51c2644e | ||
|
a752f55956 | ||
|
781f9b068d | ||
|
7e79eeb9cb | ||
|
911a9f2ef4 | ||
|
cc0679e40f | ||
|
5dea9f8bcf | ||
|
a9ee41855d | ||
|
c13e3c260b | ||
|
aabb74c8cb | ||
|
a4c22cef71 | ||
|
3da7fbc9dd | ||
|
f2a87ce61c | ||
|
2b6cb264cf | ||
|
7e0e6e8706 | ||
|
4fabcae0b4 | ||
|
3c4a282c29 |
10
README.rst
10
README.rst
@@ -14,6 +14,16 @@ Installation
|
||||
sudo -H pip install devlib
|
||||
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
``devlib`` should install all dependencies automatically, however if you run
|
||||
into issues please ensure you are using that latest version of pip.
|
||||
|
||||
On some systems there may additional steps required to install the dependency
|
||||
``paramiko`` please consult the `module documentation <http://www.paramiko.org/installing.html>`_
|
||||
for more information.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
|
@@ -45,18 +45,21 @@ from devlib.derived import DerivedMeasurements, DerivedMetric
|
||||
from devlib.derived.energy import DerivedEnergyMeasurements
|
||||
from devlib.derived.fps import DerivedGfxInfoStats, DerivedSurfaceFlingerStats
|
||||
|
||||
from devlib.trace.ftrace import FtraceCollector
|
||||
from devlib.trace.perf import PerfCollector
|
||||
from devlib.trace.serial_trace import SerialTraceCollector
|
||||
from devlib.collector.ftrace import FtraceCollector
|
||||
from devlib.collector.perf import PerfCollector
|
||||
from devlib.collector.serial_trace import SerialTraceCollector
|
||||
from devlib.collector.dmesg import DmesgCollector
|
||||
from devlib.collector.logcat import LogcatCollector
|
||||
|
||||
from devlib.host import LocalConnection
|
||||
from devlib.utils.android import AdbConnection
|
||||
from devlib.utils.ssh import SshConnection, TelnetConnection, Gem5Connection
|
||||
|
||||
from devlib.utils.version import get_commit as __get_commit
|
||||
from devlib.utils.version import (get_devlib_version as __get_devlib_version,
|
||||
get_commit as __get_commit)
|
||||
|
||||
|
||||
__version__ = '1.1.0'
|
||||
__version__ = __get_devlib_version()
|
||||
|
||||
__commit = __get_commit()
|
||||
if __commit:
|
||||
|
BIN
devlib/bin/arm/simpleperf
Executable file
BIN
devlib/bin/arm/simpleperf
Executable file
Binary file not shown.
BIN
devlib/bin/arm64/get_clock_boottime
Executable file
BIN
devlib/bin/arm64/get_clock_boottime
Executable file
Binary file not shown.
Binary file not shown.
BIN
devlib/bin/arm64/simpleperf
Executable file
BIN
devlib/bin/arm64/simpleperf
Executable file
Binary file not shown.
BIN
devlib/bin/armeabi/get_clock_boottime
Executable file
BIN
devlib/bin/armeabi/get_clock_boottime
Executable file
Binary file not shown.
Binary file not shown.
@@ -238,6 +238,19 @@ hotplug_online_all() {
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
################################################################################
|
||||
# Scheduler
|
||||
################################################################################
|
||||
|
||||
sched_get_kernel_attributes() {
|
||||
MATCH=${1:-'.*'}
|
||||
[ -d /proc/sys/kernel/ ] || exit 1
|
||||
$GREP '' /proc/sys/kernel/sched_* | \
|
||||
$SED -e 's|/proc/sys/kernel/sched_||' | \
|
||||
$GREP -e "$MATCH"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Misc
|
||||
################################################################################
|
||||
@@ -264,6 +277,34 @@ read_tree_values() {
|
||||
fi
|
||||
}
|
||||
|
||||
read_tree_tgz_b64() {
|
||||
BASEPATH=$1
|
||||
MAXDEPTH=$2
|
||||
TMPBASE=$3
|
||||
|
||||
if [ ! -e $BASEPATH ]; then
|
||||
echo "ERROR: $BASEPATH does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd $TMPBASE
|
||||
TMP_FOLDER=$($BUSYBOX realpath $($BUSYBOX mktemp -d XXXXXX))
|
||||
|
||||
# 'tar' doesn't work as expected on debugfs, so copy the tree first to
|
||||
# workaround the issue
|
||||
cd $BASEPATH
|
||||
for CUR_FILE in $($BUSYBOX find . -follow -type f -maxdepth $MAXDEPTH); do
|
||||
$BUSYBOX cp --parents $CUR_FILE $TMP_FOLDER/ 2> /dev/null
|
||||
done
|
||||
|
||||
cd $TMP_FOLDER
|
||||
$BUSYBOX tar cz * 2>/dev/null | $BUSYBOX base64
|
||||
|
||||
# Clean-up the tmp folder since we won't need it any more
|
||||
cd $TMPBASE
|
||||
rm -rf $TMP_FOLDER
|
||||
}
|
||||
|
||||
get_linux_system_id() {
|
||||
kernel=$($BUSYBOX uname -r)
|
||||
hardware=$($BUSYBOX ip a | $BUSYBOX grep 'link/ether' | $BUSYBOX sed 's/://g' | $BUSYBOX awk '{print $2}' | $BUSYBOX tr -d '\n')
|
||||
@@ -337,12 +378,18 @@ hotplug_online_all)
|
||||
read_tree_values)
|
||||
read_tree_values $*
|
||||
;;
|
||||
read_tree_tgz_b64)
|
||||
read_tree_tgz_b64 $*
|
||||
;;
|
||||
get_linux_system_id)
|
||||
get_linux_system_id $*
|
||||
;;
|
||||
get_android_system_id)
|
||||
get_android_system_id $*
|
||||
;;
|
||||
sched_get_kernel_attributes)
|
||||
sched_get_kernel_attributes $*
|
||||
;;
|
||||
*)
|
||||
echo "Command [$CMD] not supported"
|
||||
exit -1
|
||||
|
BIN
devlib/bin/x86/simpleperf
Executable file
BIN
devlib/bin/x86/simpleperf
Executable file
Binary file not shown.
BIN
devlib/bin/x86_64/simpleperf
Executable file
BIN
devlib/bin/x86_64/simpleperf
Executable file
Binary file not shown.
@@ -15,12 +15,14 @@
|
||||
|
||||
import logging
|
||||
|
||||
from devlib.utils.types import caseless_string
|
||||
|
||||
class TraceCollector(object):
|
||||
class CollectorBase(object):
|
||||
|
||||
def __init__(self, target):
|
||||
self.target = target
|
||||
self.logger = logging.getLogger(self.__class__.__name__)
|
||||
self.output_path = None
|
||||
|
||||
def reset(self):
|
||||
pass
|
||||
@@ -31,6 +33,12 @@ class TraceCollector(object):
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
return CollectorOutput()
|
||||
|
||||
def __enter__(self):
|
||||
self.reset()
|
||||
self.start()
|
||||
@@ -39,5 +47,29 @@ class TraceCollector(object):
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.stop()
|
||||
|
||||
def get_trace(self, outfile):
|
||||
pass
|
||||
class CollectorOutputEntry(object):
|
||||
|
||||
path_kinds = ['file', 'directory']
|
||||
|
||||
def __init__(self, path, path_kind):
|
||||
self.path = path
|
||||
|
||||
path_kind = caseless_string(path_kind)
|
||||
if path_kind not in self.path_kinds:
|
||||
msg = '{} is not a valid path_kind [{}]'
|
||||
raise ValueError(msg.format(path_kind, ' '.join(self.path_kinds)))
|
||||
self.path_kind = path_kind
|
||||
|
||||
def __str__(self):
|
||||
return self.path
|
||||
|
||||
def __repr__(self):
|
||||
return '<{} ({})>'.format(self.path, self.path_kind)
|
||||
|
||||
def __fspath__(self):
|
||||
"""Allow using with os.path operations"""
|
||||
return self.path
|
||||
|
||||
|
||||
class CollectorOutput(list):
|
||||
pass
|
208
devlib/collector/dmesg.py
Normal file
208
devlib/collector/dmesg.py
Normal file
@@ -0,0 +1,208 @@
|
||||
# Copyright 2019 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from __future__ import division
|
||||
import re
|
||||
from itertools import takewhile
|
||||
from datetime import timedelta
|
||||
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
|
||||
|
||||
class KernelLogEntry(object):
|
||||
"""
|
||||
Entry of the kernel ring buffer.
|
||||
|
||||
:param facility: facility the entry comes from
|
||||
:type facility: str
|
||||
|
||||
:param level: log level
|
||||
:type level: str
|
||||
|
||||
:param timestamp: Timestamp of the entry
|
||||
:type timestamp: datetime.timedelta
|
||||
|
||||
:param msg: Content of the entry
|
||||
:type msg: str
|
||||
"""
|
||||
|
||||
_TIMESTAMP_MSG_REGEX = re.compile(r'\[(.*?)\] (.*)')
|
||||
_RAW_LEVEL_REGEX = re.compile(r'<([0-9]+)>(.*)')
|
||||
_PRETTY_LEVEL_REGEX = re.compile(r'\s*([a-z]+)\s*:([a-z]+)\s*:\s*(.*)')
|
||||
|
||||
def __init__(self, facility, level, timestamp, msg):
|
||||
self.facility = facility
|
||||
self.level = level
|
||||
self.timestamp = timestamp
|
||||
self.msg = msg
|
||||
|
||||
@classmethod
|
||||
def from_str(cls, line):
|
||||
"""
|
||||
Parses a "dmesg --decode" output line, formatted as following:
|
||||
kern :err : [3618282.310743] nouveau 0000:01:00.0: systemd-logind[988]: nv50cal_space: -16
|
||||
|
||||
Or the more basic output given by "dmesg -r":
|
||||
<3>[3618282.310743] nouveau 0000:01:00.0: systemd-logind[988]: nv50cal_space: -16
|
||||
|
||||
"""
|
||||
|
||||
def parse_raw_level(line):
|
||||
match = cls._RAW_LEVEL_REGEX.match(line)
|
||||
if not match:
|
||||
raise ValueError('dmesg entry format not recognized: {}'.format(line))
|
||||
level, remainder = match.groups()
|
||||
levels = DmesgCollector.LOG_LEVELS
|
||||
# BusyBox dmesg can output numbers that need to wrap around
|
||||
level = levels[int(level) % len(levels)]
|
||||
return level, remainder
|
||||
|
||||
def parse_pretty_level(line):
|
||||
match = cls._PRETTY_LEVEL_REGEX.match(line)
|
||||
facility, level, remainder = match.groups()
|
||||
return facility, level, remainder
|
||||
|
||||
def parse_timestamp_msg(line):
|
||||
match = cls._TIMESTAMP_MSG_REGEX.match(line)
|
||||
timestamp, msg = match.groups()
|
||||
timestamp = timedelta(seconds=float(timestamp.strip()))
|
||||
return timestamp, msg
|
||||
|
||||
line = line.strip()
|
||||
|
||||
# If we can parse the raw prio directly, that is a basic line
|
||||
try:
|
||||
level, remainder = parse_raw_level(line)
|
||||
facility = None
|
||||
except ValueError:
|
||||
facility, level, remainder = parse_pretty_level(line)
|
||||
|
||||
timestamp, msg = parse_timestamp_msg(remainder)
|
||||
|
||||
return cls(
|
||||
facility=facility,
|
||||
level=level,
|
||||
timestamp=timestamp,
|
||||
msg=msg.strip(),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_dmesg_output(cls, dmesg_out):
|
||||
"""
|
||||
Return a generator of :class:`KernelLogEntry` for each line of the
|
||||
output of dmesg command.
|
||||
|
||||
.. note:: The same restrictions on the dmesg output format as for
|
||||
:meth:`from_str` apply.
|
||||
"""
|
||||
for line in dmesg_out.splitlines():
|
||||
if line.strip():
|
||||
yield cls.from_str(line)
|
||||
|
||||
def __str__(self):
|
||||
facility = self.facility + ': ' if self.facility else ''
|
||||
return '{facility}{level}: [{timestamp}] {msg}'.format(
|
||||
facility=facility,
|
||||
level=self.level,
|
||||
timestamp=self.timestamp.total_seconds(),
|
||||
msg=self.msg,
|
||||
)
|
||||
|
||||
|
||||
class DmesgCollector(CollectorBase):
|
||||
"""
|
||||
Dmesg output collector.
|
||||
|
||||
:param level: Minimum log level to enable. All levels that are more
|
||||
critical will be collected as well.
|
||||
:type level: str
|
||||
|
||||
:param facility: Facility to record, see dmesg --help for the list.
|
||||
:type level: str
|
||||
|
||||
.. warning:: If BusyBox dmesg is used, facility and level will be ignored,
|
||||
and the parsed entries will also lack that information.
|
||||
"""
|
||||
|
||||
# taken from "dmesg --help"
|
||||
# This list needs to be ordered by priority
|
||||
LOG_LEVELS = [
|
||||
"emerg", # system is unusable
|
||||
"alert", # action must be taken immediately
|
||||
"crit", # critical conditions
|
||||
"err", # error conditions
|
||||
"warn", # warning conditions
|
||||
"notice", # normal but significant condition
|
||||
"info", # informational
|
||||
"debug", # debug-level messages
|
||||
]
|
||||
|
||||
def __init__(self, target, level=LOG_LEVELS[-1], facility='kern'):
|
||||
super(DmesgCollector, self).__init__(target)
|
||||
self.output_path = None
|
||||
|
||||
if level not in self.LOG_LEVELS:
|
||||
raise ValueError('level needs to be one of: {}'.format(
|
||||
', '.join(self.LOG_LEVELS)
|
||||
))
|
||||
self.level = level
|
||||
|
||||
# Check if dmesg is the BusyBox one, or the one from util-linux in a
|
||||
# recent version.
|
||||
# Note: BusyBox dmesg does not support -h, but will still print the
|
||||
# help with an exit code of 1
|
||||
self.basic_dmesg = '--force-prefix' not in \
|
||||
self.target.execute('dmesg -h', check_exit_code=False)
|
||||
self.facility = facility
|
||||
self.reset()
|
||||
|
||||
@property
|
||||
def entries(self):
|
||||
return KernelLogEntry.from_dmesg_output(self.dmesg_out)
|
||||
|
||||
def reset(self):
|
||||
self.dmesg_out = None
|
||||
|
||||
def start(self):
|
||||
self.reset()
|
||||
# Empty the dmesg ring buffer
|
||||
self.target.execute('dmesg -c', as_root=True)
|
||||
|
||||
def stop(self):
|
||||
levels_list = list(takewhile(
|
||||
lambda level: level != self.level,
|
||||
self.LOG_LEVELS
|
||||
))
|
||||
levels_list.append(self.level)
|
||||
if self.basic_dmesg:
|
||||
cmd = 'dmesg -r'
|
||||
else:
|
||||
cmd = 'dmesg --facility={facility} --force-prefix --decode --level={levels}'.format(
|
||||
levels=','.join(levels_list),
|
||||
facility=self.facility,
|
||||
)
|
||||
|
||||
self.dmesg_out = self.target.execute(cmd)
|
||||
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
with open(self.output_path, 'wt') as f:
|
||||
f.write(self.dmesg_out + '\n')
|
||||
return CollectorOutput([CollectorOutputEntry(self.output_path, 'file')])
|
@@ -20,11 +20,14 @@ import time
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import contextlib
|
||||
from pipes import quote
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.exception import TargetStableError, HostError
|
||||
from devlib.utils.misc import check_output, which
|
||||
from devlib.utils.misc import check_output, which, memoized
|
||||
|
||||
|
||||
TRACE_MARKER_START = 'TRACE_MARKER_START'
|
||||
@@ -48,12 +51,14 @@ TIMEOUT = 180
|
||||
CPU_RE = re.compile(r' Function \(CPU([0-9]+)\)')
|
||||
STATS_RE = re.compile(r'([^ ]*) +([0-9]+) +([0-9.]+) us +([0-9.]+) us +([0-9.]+) us')
|
||||
|
||||
class FtraceCollector(TraceCollector):
|
||||
class FtraceCollector(CollectorBase):
|
||||
|
||||
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
|
||||
def __init__(self, target,
|
||||
events=None,
|
||||
functions=None,
|
||||
tracer=None,
|
||||
trace_children_functions=False,
|
||||
buffer_size=None,
|
||||
buffer_size_step=1000,
|
||||
tracing_path='/sys/kernel/debug/tracing',
|
||||
@@ -63,26 +68,34 @@ class FtraceCollector(TraceCollector):
|
||||
no_install=False,
|
||||
strict=False,
|
||||
report_on_target=False,
|
||||
trace_clock='local',
|
||||
saved_cmdlines_nr=4096,
|
||||
):
|
||||
super(FtraceCollector, self).__init__(target)
|
||||
self.events = events if events is not None else DEFAULT_EVENTS
|
||||
self.functions = functions
|
||||
self.tracer = tracer
|
||||
self.trace_children_functions = trace_children_functions
|
||||
self.buffer_size = buffer_size
|
||||
self.buffer_size_step = buffer_size_step
|
||||
self.tracing_path = tracing_path
|
||||
self.automark = automark
|
||||
self.autoreport = autoreport
|
||||
self.autoview = autoview
|
||||
self.strict = strict
|
||||
self.report_on_target = report_on_target
|
||||
self.target_output_file = target.path.join(self.target.working_directory, OUTPUT_TRACE_FILE)
|
||||
text_file_name = target.path.splitext(OUTPUT_TRACE_FILE)[0] + '.txt'
|
||||
self.target_text_file = target.path.join(self.target.working_directory, text_file_name)
|
||||
self.output_path = None
|
||||
self.target_binary = None
|
||||
self.host_binary = None
|
||||
self.start_time = None
|
||||
self.stop_time = None
|
||||
self.event_string = None
|
||||
self.function_string = None
|
||||
self.trace_clock = trace_clock
|
||||
self.saved_cmdlines_nr = saved_cmdlines_nr
|
||||
self._reset_needed = True
|
||||
|
||||
# pylint: disable=bad-whitespace
|
||||
@@ -94,6 +107,9 @@ class FtraceCollector(TraceCollector):
|
||||
self.function_profile_file = self.target.path.join(self.tracing_path, 'function_profile_enabled')
|
||||
self.marker_file = self.target.path.join(self.tracing_path, 'trace_marker')
|
||||
self.ftrace_filter_file = self.target.path.join(self.tracing_path, 'set_ftrace_filter')
|
||||
self.trace_clock_file = self.target.path.join(self.tracing_path, 'trace_clock')
|
||||
self.save_cmdlines_size_file = self.target.path.join(self.tracing_path, 'saved_cmdlines_size')
|
||||
self.available_tracers_file = self.target.path.join(self.tracing_path, 'available_tracers')
|
||||
|
||||
self.host_binary = which('trace-cmd')
|
||||
self.kernelshark = which('kernelshark')
|
||||
@@ -113,65 +129,146 @@ class FtraceCollector(TraceCollector):
|
||||
self.target_binary = 'trace-cmd'
|
||||
|
||||
# Validate required events to be traced
|
||||
available_events = self.target.execute(
|
||||
'cat {}'.format(self.available_events_file),
|
||||
as_root=True).splitlines()
|
||||
selected_events = []
|
||||
for event in self.events:
|
||||
# Convert globs supported by FTrace into valid regexp globs
|
||||
_event = event
|
||||
if event[0] != '*':
|
||||
_event = '*' + event
|
||||
event_re = re.compile(_event.replace('*', '.*'))
|
||||
# Select events matching the required ones
|
||||
if not list(filter(event_re.match, available_events)):
|
||||
message = 'Event [{}] not available for tracing'.format(event)
|
||||
if strict:
|
||||
raise TargetStableError(message)
|
||||
self.target.logger.warning(message)
|
||||
def event_to_regex(event):
|
||||
if not event.startswith('*'):
|
||||
event = '*' + event
|
||||
|
||||
return re.compile(event.replace('*', '.*'))
|
||||
|
||||
def event_is_in_list(event, events):
|
||||
return any(
|
||||
event_to_regex(event).match(_event)
|
||||
for _event in events
|
||||
)
|
||||
|
||||
unavailable_events = [
|
||||
event
|
||||
for event in self.events
|
||||
if not event_is_in_list(event, self.available_events)
|
||||
]
|
||||
if unavailable_events:
|
||||
message = 'Events not available for tracing: {}'.format(
|
||||
', '.join(unavailable_events)
|
||||
)
|
||||
if self.strict:
|
||||
raise TargetStableError(message)
|
||||
else:
|
||||
selected_events.append(event)
|
||||
# If function profiling is enabled we always need at least one event.
|
||||
# Thus, if not other events have been specified, try to add at least
|
||||
# a tracepoint which is always available and possibly triggered few
|
||||
# times.
|
||||
if self.functions and not selected_events:
|
||||
selected_events = ['sched_wakeup_new']
|
||||
self.event_string = _build_trace_events(selected_events)
|
||||
self.target.logger.warning(message)
|
||||
|
||||
selected_events = sorted(set(self.events) - set(unavailable_events))
|
||||
|
||||
if self.tracer and self.tracer not in self.available_tracers:
|
||||
raise TargetStableError('Unsupported tracer "{}". Available tracers: {}'.format(
|
||||
self.tracer, ', '.join(self.available_tracers)))
|
||||
|
||||
# Check for function tracing support
|
||||
if self.functions:
|
||||
if not self.target.file_exists(self.function_profile_file):
|
||||
raise TargetStableError('Function profiling not supported. '\
|
||||
'A kernel build with CONFIG_FUNCTION_PROFILER enable is required')
|
||||
# Validate required functions to be traced
|
||||
available_functions = self.target.execute(
|
||||
'cat {}'.format(self.available_functions_file),
|
||||
as_root=True).splitlines()
|
||||
selected_functions = []
|
||||
for function in self.functions:
|
||||
if function not in available_functions:
|
||||
message = 'Function [{}] not available for profiling'.format(function)
|
||||
if strict:
|
||||
if function not in self.available_functions:
|
||||
message = 'Function [{}] not available for tracing/profiling'.format(function)
|
||||
if self.strict:
|
||||
raise TargetStableError(message)
|
||||
self.target.logger.warning(message)
|
||||
else:
|
||||
selected_functions.append(function)
|
||||
self.function_string = _build_trace_functions(selected_functions)
|
||||
|
||||
# Function profiling
|
||||
if self.tracer is None:
|
||||
if not self.target.file_exists(self.function_profile_file):
|
||||
raise TargetStableError('Function profiling not supported. '\
|
||||
'A kernel build with CONFIG_FUNCTION_PROFILER enable is required')
|
||||
self.function_string = _build_trace_functions(selected_functions)
|
||||
# If function profiling is enabled we always need at least one event.
|
||||
# Thus, if not other events have been specified, try to add at least
|
||||
# a tracepoint which is always available and possibly triggered few
|
||||
# times.
|
||||
if not selected_events:
|
||||
selected_events = ['sched_wakeup_new']
|
||||
|
||||
# Function tracing
|
||||
elif self.tracer == 'function':
|
||||
self.function_string = _build_graph_functions(selected_functions, False)
|
||||
|
||||
# Function graphing
|
||||
elif self.tracer == 'function_graph':
|
||||
self.function_string = _build_graph_functions(selected_functions, trace_children_functions)
|
||||
|
||||
self.event_string = _build_trace_events(selected_events)
|
||||
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def available_tracers(self):
|
||||
"""
|
||||
List of ftrace tracers supported by the target's kernel.
|
||||
"""
|
||||
return self.target.read_value(self.available_tracers_file).split(' ')
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def available_events(self):
|
||||
"""
|
||||
List of ftrace events supported by the target's kernel.
|
||||
"""
|
||||
return self.target.read_value(self.available_events_file).splitlines()
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def available_functions(self):
|
||||
"""
|
||||
List of functions whose tracing/profiling is supported by the target's kernel.
|
||||
"""
|
||||
return self.target.read_value(self.available_functions_file).splitlines()
|
||||
|
||||
def reset(self):
|
||||
if self.buffer_size:
|
||||
self._set_buffer_size()
|
||||
self.target.execute('{} reset'.format(self.target_binary),
|
||||
as_root=True, timeout=TIMEOUT)
|
||||
if self.functions:
|
||||
self.target.write_value(self.function_profile_file, 0, verify=False)
|
||||
self._reset_needed = False
|
||||
|
||||
def start(self):
|
||||
self.start_time = time.time()
|
||||
if self._reset_needed:
|
||||
self.reset()
|
||||
self.target.execute('{} start {}'.format(self.target_binary, self.event_string),
|
||||
as_root=True)
|
||||
|
||||
if self.tracer is not None and 'function' in self.tracer:
|
||||
tracecmd_functions = self.function_string
|
||||
else:
|
||||
tracecmd_functions = ''
|
||||
|
||||
tracer_string = '-p {}'.format(self.tracer) if self.tracer else ''
|
||||
|
||||
# Ensure kallsyms contains addresses if possible, so that function the
|
||||
# collected trace contains enough data for pretty printing
|
||||
with contextlib.suppress(TargetStableError):
|
||||
self.target.write_value('/proc/sys/kernel/kptr_restrict', 0)
|
||||
|
||||
self.target.write_value(self.trace_clock_file, self.trace_clock, verify=False)
|
||||
try:
|
||||
self.target.write_value(self.save_cmdlines_size_file, self.saved_cmdlines_nr)
|
||||
except TargetStableError as e:
|
||||
message = 'Could not set "save_cmdlines_size"'
|
||||
if self.strict:
|
||||
self.logger.error(message)
|
||||
raise e
|
||||
else:
|
||||
self.logger.warning(message)
|
||||
self.logger.debug(e)
|
||||
|
||||
self.target.execute(
|
||||
'{} start {events} {tracer} {functions}'.format(
|
||||
self.target_binary,
|
||||
events=self.event_string,
|
||||
tracer=tracer_string,
|
||||
functions=tracecmd_functions,
|
||||
),
|
||||
as_root=True,
|
||||
)
|
||||
if self.automark:
|
||||
self.mark_start()
|
||||
if 'cpufreq' in self.target.modules:
|
||||
@@ -181,7 +278,7 @@ class FtraceCollector(TraceCollector):
|
||||
self.logger.debug('Trace CPUIdle states')
|
||||
self.target.cpuidle.perturb_cpus()
|
||||
# Enable kernel function profiling
|
||||
if self.functions:
|
||||
if self.functions and self.tracer is None:
|
||||
self.target.execute('echo nop > {}'.format(self.current_tracer_file),
|
||||
as_root=True)
|
||||
self.target.execute('echo 0 > {}'.format(self.function_profile_file),
|
||||
@@ -194,8 +291,8 @@ class FtraceCollector(TraceCollector):
|
||||
|
||||
def stop(self):
|
||||
# Disable kernel function profiling
|
||||
if self.functions:
|
||||
self.target.execute('echo 1 > {}'.format(self.function_profile_file),
|
||||
if self.functions and self.tracer is None:
|
||||
self.target.execute('echo 0 > {}'.format(self.function_profile_file),
|
||||
as_root=True)
|
||||
if 'cpufreq' in self.target.modules:
|
||||
self.logger.debug('Trace CPUFreq frequencies')
|
||||
@@ -207,9 +304,14 @@ class FtraceCollector(TraceCollector):
|
||||
timeout=TIMEOUT, as_root=True)
|
||||
self._reset_needed = True
|
||||
|
||||
def get_trace(self, outfile):
|
||||
if os.path.isdir(outfile):
|
||||
outfile = os.path.join(outfile, os.path.basename(self.target_output_file))
|
||||
def set_output(self, output_path):
|
||||
if os.path.isdir(output_path):
|
||||
output_path = os.path.join(output_path, os.path.basename(self.target_output_file))
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
self.target.execute('{0} extract -o {1}; chmod 666 {1}'.format(self.target_binary,
|
||||
self.target_output_file),
|
||||
timeout=TIMEOUT, as_root=True)
|
||||
@@ -218,23 +320,27 @@ class FtraceCollector(TraceCollector):
|
||||
# Therefore timout for the pull command must also be adjusted
|
||||
# accordingly.
|
||||
pull_timeout = 10 * (self.stop_time - self.start_time)
|
||||
self.target.pull(self.target_output_file, outfile, timeout=pull_timeout)
|
||||
if not os.path.isfile(outfile):
|
||||
self.target.pull(self.target_output_file, self.output_path, timeout=pull_timeout)
|
||||
output = CollectorOutput()
|
||||
if not os.path.isfile(self.output_path):
|
||||
self.logger.warning('Binary trace not pulled from device.')
|
||||
else:
|
||||
output.append(CollectorOutputEntry(self.output_path, 'file'))
|
||||
if self.autoreport:
|
||||
textfile = os.path.splitext(outfile)[0] + '.txt'
|
||||
textfile = os.path.splitext(self.output_path)[0] + '.txt'
|
||||
if self.report_on_target:
|
||||
self.generate_report_on_target()
|
||||
self.target.pull(self.target_text_file,
|
||||
textfile, timeout=pull_timeout)
|
||||
else:
|
||||
self.report(outfile, textfile)
|
||||
self.report(self.output_path, textfile)
|
||||
output.append(CollectorOutputEntry(textfile, 'file'))
|
||||
if self.autoview:
|
||||
self.view(outfile)
|
||||
self.view(self.output_path)
|
||||
return output
|
||||
|
||||
def get_stats(self, outfile):
|
||||
if not self.functions:
|
||||
if not (self.functions and self.tracer is None):
|
||||
return
|
||||
|
||||
if os.path.isdir(outfile):
|
||||
@@ -351,3 +457,10 @@ def _build_trace_events(events):
|
||||
def _build_trace_functions(functions):
|
||||
function_string = " ".join(functions)
|
||||
return function_string
|
||||
|
||||
def _build_graph_functions(functions, trace_children_functions):
|
||||
opt = 'g' if trace_children_functions else 'l'
|
||||
return ' '.join(
|
||||
'-{} {}'.format(opt, quote(f))
|
||||
for f in functions
|
||||
)
|
@@ -16,14 +16,17 @@
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
from devlib.utils.android import LogcatMonitor
|
||||
|
||||
class LogcatCollector(TraceCollector):
|
||||
class LogcatCollector(CollectorBase):
|
||||
|
||||
def __init__(self, target, regexps=None):
|
||||
def __init__(self, target, regexps=None, logcat_format=None):
|
||||
super(LogcatCollector, self).__init__(target)
|
||||
self.regexps = regexps
|
||||
self.logcat_format = logcat_format
|
||||
self.output_path = None
|
||||
self._collecting = False
|
||||
self._prev_log = None
|
||||
self._monitor = None
|
||||
@@ -45,12 +48,14 @@ class LogcatCollector(TraceCollector):
|
||||
"""
|
||||
Start collecting logcat lines
|
||||
"""
|
||||
self._monitor = LogcatMonitor(self.target, self.regexps)
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
self._monitor = LogcatMonitor(self.target, self.regexps, logcat_format=self.logcat_format)
|
||||
if self._prev_log:
|
||||
# Append new data collection to previous collection
|
||||
self._monitor.start(self._prev_log)
|
||||
else:
|
||||
self._monitor.start()
|
||||
self._monitor.start(self.output_path)
|
||||
|
||||
self._collecting = True
|
||||
|
||||
@@ -65,9 +70,10 @@ class LogcatCollector(TraceCollector):
|
||||
self._collecting = False
|
||||
self._prev_log = self._monitor.logfile
|
||||
|
||||
def get_trace(self, outfile):
|
||||
"""
|
||||
Output collected logcat lines to designated file
|
||||
"""
|
||||
# copy self._monitor.logfile to outfile
|
||||
shutil.copy(self._monitor.logfile, outfile)
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("No data collected.")
|
||||
return CollectorOutput([CollectorOutputEntry(self.output_path, 'file')])
|
253
devlib/collector/perf.py
Normal file
253
devlib/collector/perf.py
Normal file
@@ -0,0 +1,253 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from past.builtins import basestring, zip
|
||||
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
from devlib.utils.misc import ensure_file_directory_exists as _f
|
||||
|
||||
|
||||
PERF_COMMAND_TEMPLATE = '{binary} {command} {options} {events} sleep 1000 > {outfile} 2>&1 '
|
||||
PERF_REPORT_COMMAND_TEMPLATE= '{binary} report {options} -i {datafile} > {outfile} 2>&1 '
|
||||
PERF_RECORD_COMMAND_TEMPLATE= '{binary} record {options} {events} -o {outfile}'
|
||||
|
||||
PERF_DEFAULT_EVENTS = [
|
||||
'cpu-migrations',
|
||||
'context-switches',
|
||||
]
|
||||
|
||||
SIMPLEPERF_DEFAULT_EVENTS = [
|
||||
'raw-cpu-cycles',
|
||||
'raw-l1-dcache',
|
||||
'raw-l1-dcache-refill',
|
||||
'raw-br-mis-pred',
|
||||
'raw-instruction-retired',
|
||||
]
|
||||
|
||||
DEFAULT_EVENTS = {'perf':PERF_DEFAULT_EVENTS, 'simpleperf':SIMPLEPERF_DEFAULT_EVENTS}
|
||||
|
||||
class PerfCollector(CollectorBase):
|
||||
"""
|
||||
Perf is a Linux profiling with performance counters.
|
||||
Simpleperf is an Android profiling tool with performance counters.
|
||||
|
||||
It is highly recomended to use perf_type = simpleperf when using this instrument
|
||||
on android devices, since it recognises android symbols in record mode and is much more stable
|
||||
when reporting record .data files. For more information see simpleperf documentation at:
|
||||
https://android.googlesource.com/platform/system/extras/+/master/simpleperf/doc/README.md
|
||||
|
||||
Performance counters are CPU hardware registers that count hardware events
|
||||
such as instructions executed, cache-misses suffered, or branches
|
||||
mispredicted. They form a basis for profiling applications to trace dynamic
|
||||
control flow and identify hotspots.
|
||||
|
||||
pref accepts options and events. If no option is given the default '-a' is
|
||||
used. For events, the default events are migrations and cs for perf and raw-cpu-cycles,
|
||||
raw-l1-dcache, raw-l1-dcache-refill, raw-instructions-retired. They both can
|
||||
be specified in the config file.
|
||||
|
||||
Events must be provided as a list that contains them and they will look like
|
||||
this ::
|
||||
|
||||
perf_events = ['migrations', 'cs']
|
||||
|
||||
Events can be obtained by typing the following in the command line on the
|
||||
device ::
|
||||
|
||||
perf list
|
||||
simpleperf list
|
||||
|
||||
Whereas options, they can be provided as a single string as following ::
|
||||
|
||||
perf_options = '-a -i'
|
||||
|
||||
Options can be obtained by running the following in the command line ::
|
||||
|
||||
man perf-stat
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
target,
|
||||
perf_type='perf',
|
||||
command='stat',
|
||||
events=None,
|
||||
optionstring=None,
|
||||
report_options=None,
|
||||
labels=None,
|
||||
force_install=False):
|
||||
super(PerfCollector, self).__init__(target)
|
||||
self.force_install = force_install
|
||||
self.labels = labels
|
||||
self.report_options = report_options
|
||||
self.output_path = None
|
||||
|
||||
# Validate parameters
|
||||
if isinstance(optionstring, list):
|
||||
self.optionstrings = optionstring
|
||||
else:
|
||||
self.optionstrings = [optionstring]
|
||||
if perf_type in ['perf', 'simpleperf']:
|
||||
self.perf_type = perf_type
|
||||
else:
|
||||
raise ValueError('Invalid perf type: {}, must be perf or simpleperf'.format(perf_type))
|
||||
if not events:
|
||||
self.events = DEFAULT_EVENTS[self.perf_type]
|
||||
else:
|
||||
self.events = events
|
||||
if isinstance(self.events, basestring):
|
||||
self.events = [self.events]
|
||||
if not self.labels:
|
||||
self.labels = ['perf_{}'.format(i) for i in range(len(self.optionstrings))]
|
||||
if len(self.labels) != len(self.optionstrings):
|
||||
raise ValueError('The number of labels must match the number of optstrings provided for perf.')
|
||||
if command in ['stat', 'record']:
|
||||
self.command = command
|
||||
else:
|
||||
raise ValueError('Unsupported perf command, must be stat or record')
|
||||
|
||||
self.binary = self.target.get_installed(self.perf_type)
|
||||
if self.force_install or not self.binary:
|
||||
self.binary = self._deploy_perf()
|
||||
|
||||
self._validate_events(self.events)
|
||||
|
||||
self.commands = self._build_commands()
|
||||
|
||||
def reset(self):
|
||||
self.target.killall(self.perf_type, as_root=self.target.is_rooted)
|
||||
self.target.remove(self.target.get_workpath('TemporaryFile*'))
|
||||
for label in self.labels:
|
||||
filepath = self._get_target_file(label, 'data')
|
||||
self.target.remove(filepath)
|
||||
filepath = self._get_target_file(label, 'rpt')
|
||||
self.target.remove(filepath)
|
||||
|
||||
def start(self):
|
||||
for command in self.commands:
|
||||
self.target.kick_off(command)
|
||||
|
||||
def stop(self):
|
||||
self.target.killall(self.perf_type, signal='SIGINT',
|
||||
as_root=self.target.is_rooted)
|
||||
# perf doesn't transmit the signal to its sleep call so handled here:
|
||||
self.target.killall('sleep', as_root=self.target.is_rooted)
|
||||
# NB: we hope that no other "important" sleep is on-going
|
||||
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
|
||||
output = CollectorOutput()
|
||||
|
||||
for label in self.labels:
|
||||
if self.command == 'record':
|
||||
self._wait_for_data_file_write(label, self.output_path)
|
||||
path = self._pull_target_file_to_host(label, 'rpt', self.output_path)
|
||||
output.append(CollectorOutputEntry(path, 'file'))
|
||||
else:
|
||||
path = self._pull_target_file_to_host(label, 'out', self.output_path)
|
||||
output.append(CollectorOutputEntry(path, 'file'))
|
||||
return output
|
||||
|
||||
def _deploy_perf(self):
|
||||
host_executable = os.path.join(PACKAGE_BIN_DIRECTORY,
|
||||
self.target.abi, self.perf_type)
|
||||
return self.target.install(host_executable)
|
||||
|
||||
def _get_target_file(self, label, extension):
|
||||
return self.target.get_workpath('{}.{}'.format(label, extension))
|
||||
|
||||
def _build_commands(self):
|
||||
commands = []
|
||||
for opts, label in zip(self.optionstrings, self.labels):
|
||||
if self.command == 'stat':
|
||||
commands.append(self._build_perf_stat_command(opts, self.events, label))
|
||||
else:
|
||||
commands.append(self._build_perf_record_command(opts, label))
|
||||
return commands
|
||||
|
||||
def _build_perf_stat_command(self, options, events, label):
|
||||
event_string = ' '.join(['-e {}'.format(e) for e in events])
|
||||
command = PERF_COMMAND_TEMPLATE.format(binary = self.binary,
|
||||
command = self.command,
|
||||
options = options or '',
|
||||
events = event_string,
|
||||
outfile = self._get_target_file(label, 'out'))
|
||||
return command
|
||||
|
||||
def _build_perf_report_command(self, report_options, label):
|
||||
command = PERF_REPORT_COMMAND_TEMPLATE.format(binary=self.binary,
|
||||
options=report_options or '',
|
||||
datafile=self._get_target_file(label, 'data'),
|
||||
outfile=self._get_target_file(label, 'rpt'))
|
||||
return command
|
||||
|
||||
def _build_perf_record_command(self, options, label):
|
||||
event_string = ' '.join(['-e {}'.format(e) for e in self.events])
|
||||
command = PERF_RECORD_COMMAND_TEMPLATE.format(binary=self.binary,
|
||||
options=options or '',
|
||||
events=event_string,
|
||||
outfile=self._get_target_file(label, 'data'))
|
||||
return command
|
||||
|
||||
def _pull_target_file_to_host(self, label, extension, output_path):
|
||||
target_file = self._get_target_file(label, extension)
|
||||
host_relpath = os.path.basename(target_file)
|
||||
host_file = _f(os.path.join(output_path, host_relpath))
|
||||
self.target.pull(target_file, host_file)
|
||||
return host_file
|
||||
|
||||
def _wait_for_data_file_write(self, label, output_path):
|
||||
data_file_finished_writing = False
|
||||
max_tries = 80
|
||||
current_tries = 0
|
||||
while not data_file_finished_writing:
|
||||
files = self.target.execute('cd {} && ls'.format(self.target.get_workpath('')))
|
||||
# Perf stores data in tempory files whilst writing to data output file. Check if they have been removed.
|
||||
if 'TemporaryFile' in files and current_tries <= max_tries:
|
||||
time.sleep(0.25)
|
||||
current_tries += 1
|
||||
else:
|
||||
if current_tries >= max_tries:
|
||||
self.logger.warning('''writing {}.data file took longer than expected,
|
||||
file may not have written correctly'''.format(label))
|
||||
data_file_finished_writing = True
|
||||
report_command = self._build_perf_report_command(self.report_options, label)
|
||||
self.target.execute(report_command)
|
||||
|
||||
def _validate_events(self, events):
|
||||
available_events_string = self.target.execute('{} list | {} cat'.format(self.perf_type, self.target.busybox))
|
||||
available_events = available_events_string.splitlines()
|
||||
for available_event in available_events:
|
||||
if available_event == '':
|
||||
continue
|
||||
if 'OR' in available_event:
|
||||
available_events.append(available_event.split('OR')[1])
|
||||
available_events[available_events.index(available_event)] = available_event.split()[0].strip()
|
||||
# Raw hex event codes can also be passed in that do not appear on perf/simpleperf list, prefixed with 'r'
|
||||
raw_event_code_regex = re.compile(r"^r(0x|0X)?[A-Fa-f0-9]+$")
|
||||
for event in events:
|
||||
if event in available_events or re.match(raw_event_code_regex, event):
|
||||
continue
|
||||
else:
|
||||
raise ValueError('Event: {} is not in available event list for {}'.format(event, self.perf_type))
|
@@ -19,13 +19,14 @@ import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
from devlib.exception import WorkerThreadError
|
||||
|
||||
|
||||
class ScreenCapturePoller(threading.Thread):
|
||||
|
||||
def __init__(self, target, period, output_path=None, timeout=30):
|
||||
def __init__(self, target, period, timeout=30):
|
||||
super(ScreenCapturePoller, self).__init__()
|
||||
self.target = target
|
||||
self.logger = logging.getLogger('screencapture')
|
||||
@@ -36,11 +37,16 @@ class ScreenCapturePoller(threading.Thread):
|
||||
self.last_poll = 0
|
||||
self.daemon = True
|
||||
self.exc = None
|
||||
self.output_path = None
|
||||
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def run(self):
|
||||
self.logger.debug('Starting screen capture polling')
|
||||
try:
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
while True:
|
||||
if self.stop_signal.is_set():
|
||||
break
|
||||
@@ -66,24 +72,33 @@ class ScreenCapturePoller(threading.Thread):
|
||||
self.target.capture_screen(os.path.join(self.output_path, "screencap_{ts}.png"))
|
||||
|
||||
|
||||
class ScreenCaptureCollector(TraceCollector):
|
||||
class ScreenCaptureCollector(CollectorBase):
|
||||
|
||||
def __init__(self, target, output_path=None, period=None):
|
||||
def __init__(self, target, period=None):
|
||||
super(ScreenCaptureCollector, self).__init__(target)
|
||||
self._collecting = False
|
||||
self.output_path = output_path
|
||||
self.output_path = None
|
||||
self.period = period
|
||||
self.target = target
|
||||
self._poller = ScreenCapturePoller(self.target, self.period,
|
||||
self.output_path)
|
||||
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def reset(self):
|
||||
pass
|
||||
self._poller = ScreenCapturePoller(self.target, self.period)
|
||||
|
||||
def get_data(self):
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("No data collected.")
|
||||
return CollectorOutput([CollectorOutputEntry(self.output_path, 'directory')])
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start collecting the screenshots
|
||||
"""
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
self._poller.set_output(self.output_path)
|
||||
self._poller.start()
|
||||
self._collecting = True
|
||||
|
@@ -17,11 +17,12 @@ import shutil
|
||||
from tempfile import NamedTemporaryFile
|
||||
from pexpect.exceptions import TIMEOUT
|
||||
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
from devlib.utils.serial_port import get_connection
|
||||
|
||||
|
||||
class SerialTraceCollector(TraceCollector):
|
||||
class SerialTraceCollector(CollectorBase):
|
||||
|
||||
@property
|
||||
def collecting(self):
|
||||
@@ -32,33 +33,35 @@ class SerialTraceCollector(TraceCollector):
|
||||
self.serial_port = serial_port
|
||||
self.baudrate = baudrate
|
||||
self.timeout = timeout
|
||||
self.output_path = None
|
||||
|
||||
self._serial_target = None
|
||||
self._conn = None
|
||||
self._tmpfile = None
|
||||
self._outfile_fh = None
|
||||
self._collecting = False
|
||||
|
||||
def reset(self):
|
||||
if self._collecting:
|
||||
raise RuntimeError("reset was called whilst collecting")
|
||||
|
||||
if self._tmpfile:
|
||||
self._tmpfile.close()
|
||||
self._tmpfile = None
|
||||
if self._outfile_fh:
|
||||
self._outfile_fh.close()
|
||||
self._outfile_fh = None
|
||||
|
||||
def start(self):
|
||||
if self._collecting:
|
||||
raise RuntimeError("start was called whilst collecting")
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
|
||||
|
||||
self._tmpfile = NamedTemporaryFile()
|
||||
self._outfile_fh = open(self.output_path, 'wb')
|
||||
start_marker = "-------- Starting serial logging --------\n"
|
||||
self._tmpfile.write(start_marker.encode('utf-8'))
|
||||
self._outfile_fh.write(start_marker.encode('utf-8'))
|
||||
|
||||
self._serial_target, self._conn = get_connection(port=self.serial_port,
|
||||
baudrate=self.baudrate,
|
||||
timeout=self.timeout,
|
||||
logfile=self._tmpfile,
|
||||
logfile=self._outfile_fh,
|
||||
init_dtr=0)
|
||||
self._collecting = True
|
||||
|
||||
@@ -78,17 +81,19 @@ class SerialTraceCollector(TraceCollector):
|
||||
del self._conn
|
||||
|
||||
stop_marker = "-------- Stopping serial logging --------\n"
|
||||
self._tmpfile.write(stop_marker.encode('utf-8'))
|
||||
self._outfile_fh.write(stop_marker.encode('utf-8'))
|
||||
self._outfile_fh.flush()
|
||||
self._outfile_fh.close()
|
||||
self._outfile_fh = None
|
||||
|
||||
self._collecting = False
|
||||
|
||||
def get_trace(self, outfile):
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
if self._collecting:
|
||||
raise RuntimeError("get_trace was called whilst collecting")
|
||||
|
||||
self._tmpfile.flush()
|
||||
|
||||
shutil.copy(self._tmpfile.name, outfile)
|
||||
|
||||
self._tmpfile.close()
|
||||
self._tmpfile = None
|
||||
raise RuntimeError("get_data was called whilst collecting")
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("No data collected.")
|
||||
return CollectorOutput([CollectorOutputEntry(self.output_path, 'file')])
|
@@ -19,8 +19,9 @@ import subprocess
|
||||
from shutil import copyfile
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||
CollectorOutputEntry)
|
||||
from devlib.exception import TargetStableError, HostError
|
||||
from devlib.trace import TraceCollector
|
||||
import devlib.utils.android
|
||||
from devlib.utils.misc import memoized
|
||||
|
||||
@@ -33,7 +34,7 @@ DEFAULT_CATEGORIES = [
|
||||
'idle'
|
||||
]
|
||||
|
||||
class SystraceCollector(TraceCollector):
|
||||
class SystraceCollector(CollectorBase):
|
||||
"""
|
||||
A trace collector based on Systrace
|
||||
|
||||
@@ -74,9 +75,10 @@ class SystraceCollector(TraceCollector):
|
||||
|
||||
self.categories = categories or DEFAULT_CATEGORIES
|
||||
self.buffer_size = buffer_size
|
||||
self.output_path = None
|
||||
|
||||
self._systrace_process = None
|
||||
self._tmpfile = None
|
||||
self._outfile_fh = None
|
||||
|
||||
# Try to find a systrace binary
|
||||
self.systrace_binary = None
|
||||
@@ -104,12 +106,12 @@ class SystraceCollector(TraceCollector):
|
||||
self.reset()
|
||||
|
||||
def _build_cmd(self):
|
||||
self._tmpfile = NamedTemporaryFile()
|
||||
self._outfile_fh = open(self.output_path, 'w')
|
||||
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
self.systrace_cmd = '{} -o {} -e {}'.format(
|
||||
self.systrace_cmd = 'python2 -u {} -o {} -e {}'.format(
|
||||
self.systrace_binary,
|
||||
self._tmpfile.name,
|
||||
self._outfile_fh.name,
|
||||
self.target.adb_name
|
||||
)
|
||||
|
||||
@@ -122,13 +124,11 @@ class SystraceCollector(TraceCollector):
|
||||
if self._systrace_process:
|
||||
self.stop()
|
||||
|
||||
if self._tmpfile:
|
||||
self._tmpfile.close()
|
||||
self._tmpfile = None
|
||||
|
||||
def start(self):
|
||||
if self._systrace_process:
|
||||
raise RuntimeError("Tracing is already underway, call stop() first")
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("Output path was not set.")
|
||||
|
||||
self.reset()
|
||||
|
||||
@@ -137,9 +137,11 @@ class SystraceCollector(TraceCollector):
|
||||
self._systrace_process = subprocess.Popen(
|
||||
self.systrace_cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
shell=True,
|
||||
universal_newlines=True
|
||||
)
|
||||
self._systrace_process.stdout.read(1)
|
||||
|
||||
def stop(self):
|
||||
if not self._systrace_process:
|
||||
@@ -149,11 +151,16 @@ class SystraceCollector(TraceCollector):
|
||||
self._systrace_process.communicate('\n')
|
||||
self._systrace_process = None
|
||||
|
||||
def get_trace(self, outfile):
|
||||
if self._outfile_fh:
|
||||
self._outfile_fh.close()
|
||||
self._outfile_fh = None
|
||||
|
||||
def set_output(self, output_path):
|
||||
self.output_path = output_path
|
||||
|
||||
def get_data(self):
|
||||
if self._systrace_process:
|
||||
raise RuntimeError("Tracing is underway, call stop() first")
|
||||
|
||||
if not self._tmpfile:
|
||||
raise RuntimeError("No tracing data available")
|
||||
|
||||
copyfile(self._tmpfile.name, outfile)
|
||||
if self.output_path is None:
|
||||
raise RuntimeError("No data collected.")
|
||||
return CollectorOutput([CollectorOutputEntry(self.output_path, 'file')])
|
523
devlib/connection.py
Normal file
523
devlib/connection.py
Normal file
@@ -0,0 +1,523 @@
|
||||
# Copyright 2019 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
from weakref import WeakSet
|
||||
from shlex import quote
|
||||
from time import monotonic
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
import threading
|
||||
import time
|
||||
import logging
|
||||
|
||||
from devlib.utils.misc import InitCheckpoint
|
||||
|
||||
_KILL_TIMEOUT = 3
|
||||
|
||||
|
||||
def _kill_pgid_cmd(pgid, sig):
|
||||
return 'kill -{} -{}'.format(sig.value, pgid)
|
||||
|
||||
|
||||
class ConnectionBase(InitCheckpoint):
|
||||
"""
|
||||
Base class for all connections.
|
||||
"""
|
||||
def __init__(self):
|
||||
self._current_bg_cmds = WeakSet()
|
||||
self._closed = False
|
||||
self._close_lock = threading.Lock()
|
||||
self.busybox = None
|
||||
|
||||
def cancel_running_command(self):
|
||||
bg_cmds = set(self._current_bg_cmds)
|
||||
for bg_cmd in bg_cmds:
|
||||
bg_cmd.cancel()
|
||||
|
||||
@abstractmethod
|
||||
def _close(self):
|
||||
"""
|
||||
Close the connection.
|
||||
|
||||
The public :meth:`close` method makes sure that :meth:`_close` will
|
||||
only be called once, and will serialize accesses to it if it happens to
|
||||
be called from multiple threads at once.
|
||||
"""
|
||||
|
||||
def close(self):
|
||||
# Locking the closing allows any thread to safely call close() as long
|
||||
# as the connection can be closed from a thread that is not the one it
|
||||
# started its life in.
|
||||
with self._close_lock:
|
||||
if not self._closed:
|
||||
self._close()
|
||||
self._closed = True
|
||||
|
||||
# Ideally, that should not be relied upon but that will improve the chances
|
||||
# of the connection being properly cleaned up when it's not in use anymore.
|
||||
def __del__(self):
|
||||
# Since __del__ will be called if an exception is raised in __init__
|
||||
# (e.g. we cannot connect), we only run close() when we are sure
|
||||
# __init__ has completed successfully.
|
||||
if self.initialized:
|
||||
self.close()
|
||||
|
||||
|
||||
class BackgroundCommand(ABC):
|
||||
"""
|
||||
Allows managing a running background command using a subset of the
|
||||
:class:`subprocess.Popen` API.
|
||||
|
||||
Instances of this class can be used as context managers, with the same
|
||||
semantic as :class:`subprocess.Popen`.
|
||||
"""
|
||||
@abstractmethod
|
||||
def send_signal(self, sig):
|
||||
"""
|
||||
Send a POSIX signal to the background command's process group ID
|
||||
(PGID).
|
||||
|
||||
:param signal: Signal to send.
|
||||
:type signal: signal.Signals
|
||||
"""
|
||||
|
||||
def kill(self):
|
||||
"""
|
||||
Send SIGKILL to the background command.
|
||||
"""
|
||||
self.send_signal(signal.SIGKILL)
|
||||
|
||||
@abstractmethod
|
||||
def cancel(self, kill_timeout=_KILL_TIMEOUT):
|
||||
"""
|
||||
Try to gracefully terminate the process by sending ``SIGTERM``, then
|
||||
waiting for ``kill_timeout`` to send ``SIGKILL``.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def wait(self):
|
||||
"""
|
||||
Block until the background command completes, and return its exit code.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def poll(self):
|
||||
"""
|
||||
Return exit code if the command has exited, None otherwise.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stdin(self):
|
||||
"""
|
||||
File-like object connected to the background's command stdin.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stdout(self):
|
||||
"""
|
||||
File-like object connected to the background's command stdout.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stderr(self):
|
||||
"""
|
||||
File-like object connected to the background's command stderr.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def pid(self):
|
||||
"""
|
||||
Process Group ID (PGID) of the background command.
|
||||
|
||||
Since the command is usually wrapped in shell processes for IO
|
||||
redirections, sudo etc, the PID cannot be assumed to be the actual PID
|
||||
of the command passed by the user. It's is guaranteed to be a PGID
|
||||
instead, which means signals sent to it as such will target all
|
||||
subprocesses involved in executing that command.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def close(self):
|
||||
"""
|
||||
Close all opened streams and then wait for command completion.
|
||||
|
||||
:returns: Exit code of the command.
|
||||
|
||||
.. note:: If the command is writing to its stdout/stderr, it might be
|
||||
blocked on that and die when the streams are closed.
|
||||
"""
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
self.close()
|
||||
|
||||
|
||||
class PopenBackgroundCommand(BackgroundCommand):
|
||||
"""
|
||||
:class:`subprocess.Popen`-based background command.
|
||||
"""
|
||||
|
||||
def __init__(self, popen):
|
||||
self.popen = popen
|
||||
|
||||
def send_signal(self, sig):
|
||||
return os.killpg(self.popen.pid, sig)
|
||||
|
||||
@property
|
||||
def stdin(self):
|
||||
return self.popen.stdin
|
||||
|
||||
@property
|
||||
def stdout(self):
|
||||
return self.popen.stdout
|
||||
|
||||
@property
|
||||
def stderr(self):
|
||||
return self.popen.stderr
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
return self.popen.pid
|
||||
|
||||
def wait(self):
|
||||
return self.popen.wait()
|
||||
|
||||
def poll(self):
|
||||
return self.popen.poll()
|
||||
|
||||
def cancel(self, kill_timeout=_KILL_TIMEOUT):
|
||||
popen = self.popen
|
||||
os.killpg(os.getpgid(popen.pid), signal.SIGTERM)
|
||||
try:
|
||||
popen.wait(timeout=_KILL_TIMEOUT)
|
||||
except subprocess.TimeoutExpired:
|
||||
os.killpg(os.getpgid(popen.pid), signal.SIGKILL)
|
||||
|
||||
def close(self):
|
||||
self.popen.__exit__(None, None, None)
|
||||
return self.popen.returncode
|
||||
|
||||
def __enter__(self):
|
||||
self.popen.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
self.popen.__exit__(*args, **kwargs)
|
||||
|
||||
|
||||
class ParamikoBackgroundCommand(BackgroundCommand):
|
||||
"""
|
||||
:mod:`paramiko`-based background command.
|
||||
"""
|
||||
def __init__(self, conn, chan, pid, as_root, stdin, stdout, stderr, redirect_thread):
|
||||
self.chan = chan
|
||||
self.as_root = as_root
|
||||
self.conn = conn
|
||||
self._pid = pid
|
||||
self._stdin = stdin
|
||||
self._stdout = stdout
|
||||
self._stderr = stderr
|
||||
self.redirect_thread = redirect_thread
|
||||
|
||||
def send_signal(self, sig):
|
||||
# If the command has already completed, we don't want to send a signal
|
||||
# to another process that might have gotten that PID in the meantime.
|
||||
if self.poll() is not None:
|
||||
return
|
||||
# Use -PGID to target a process group rather than just the process
|
||||
# itself
|
||||
cmd = _kill_pgid_cmd(self.pid, sig)
|
||||
self.conn.execute(cmd, as_root=self.as_root)
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
return self._pid
|
||||
|
||||
def wait(self):
|
||||
return self.chan.recv_exit_status()
|
||||
|
||||
def poll(self):
|
||||
if self.chan.exit_status_ready():
|
||||
return self.wait()
|
||||
else:
|
||||
return None
|
||||
|
||||
def cancel(self, kill_timeout=_KILL_TIMEOUT):
|
||||
self.send_signal(signal.SIGTERM)
|
||||
# Check if the command terminated quickly
|
||||
time.sleep(10e-3)
|
||||
# Otherwise wait for the full timeout and kill it
|
||||
if self.poll() is None:
|
||||
time.sleep(kill_timeout)
|
||||
self.send_signal(signal.SIGKILL)
|
||||
self.wait()
|
||||
|
||||
@property
|
||||
def stdin(self):
|
||||
return self._stdin
|
||||
|
||||
@property
|
||||
def stdout(self):
|
||||
return self._stdout
|
||||
|
||||
@property
|
||||
def stderr(self):
|
||||
return self._stderr
|
||||
|
||||
def close(self):
|
||||
for x in (self.stdin, self.stdout, self.stderr):
|
||||
if x is not None:
|
||||
x.close()
|
||||
|
||||
exit_code = self.wait()
|
||||
thread = self.redirect_thread
|
||||
if thread:
|
||||
thread.join()
|
||||
|
||||
return exit_code
|
||||
|
||||
|
||||
class AdbBackgroundCommand(BackgroundCommand):
|
||||
"""
|
||||
``adb``-based background command.
|
||||
"""
|
||||
|
||||
def __init__(self, conn, adb_popen, pid, as_root):
|
||||
self.conn = conn
|
||||
self.as_root = as_root
|
||||
self.adb_popen = adb_popen
|
||||
self._pid = pid
|
||||
|
||||
def send_signal(self, sig):
|
||||
self.conn.execute(
|
||||
_kill_pgid_cmd(self.pid, sig),
|
||||
as_root=self.as_root,
|
||||
)
|
||||
|
||||
@property
|
||||
def stdin(self):
|
||||
return self.adb_popen.stdin
|
||||
|
||||
@property
|
||||
def stdout(self):
|
||||
return self.adb_popen.stdout
|
||||
|
||||
@property
|
||||
def stderr(self):
|
||||
return self.adb_popen.stderr
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
return self._pid
|
||||
|
||||
def wait(self):
|
||||
return self.adb_popen.wait()
|
||||
|
||||
def poll(self):
|
||||
return self.adb_popen.poll()
|
||||
|
||||
def cancel(self, kill_timeout=_KILL_TIMEOUT):
|
||||
self.send_signal(signal.SIGTERM)
|
||||
try:
|
||||
self.adb_popen.wait(timeout=_KILL_TIMEOUT)
|
||||
except subprocess.TimeoutExpired:
|
||||
self.send_signal(signal.SIGKILL)
|
||||
self.adb_popen.kill()
|
||||
|
||||
def close(self):
|
||||
self.adb_popen.__exit__(None, None, None)
|
||||
return self.adb_popen.returncode
|
||||
|
||||
def __enter__(self):
|
||||
self.adb_popen.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
self.adb_popen.__exit__(*args, **kwargs)
|
||||
|
||||
|
||||
class TransferManagerBase(ABC):
|
||||
|
||||
def _pull_dest_size(self, dest):
|
||||
if os.path.isdir(dest):
|
||||
return sum(
|
||||
os.stat(os.path.join(dirpath, f)).st_size
|
||||
for dirpath, _, fnames in os.walk(dest)
|
||||
for f in fnames
|
||||
)
|
||||
else:
|
||||
return os.stat(dest).st_size
|
||||
return 0
|
||||
|
||||
def _push_dest_size(self, dest):
|
||||
cmd = '{} du -s {}'.format(quote(self.conn.busybox), quote(dest))
|
||||
out = self.conn.execute(cmd)
|
||||
try:
|
||||
return int(out.split()[0])
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
def __init__(self, conn, poll_period, start_transfer_poll_delay, total_timeout):
|
||||
self.conn = conn
|
||||
self.poll_period = poll_period
|
||||
self.total_timeout = total_timeout
|
||||
self.start_transfer_poll_delay = start_transfer_poll_delay
|
||||
|
||||
self.logger = logging.getLogger('FileTransfer')
|
||||
self.managing = threading.Event()
|
||||
self.transfer_started = threading.Event()
|
||||
self.transfer_completed = threading.Event()
|
||||
self.transfer_aborted = threading.Event()
|
||||
|
||||
self.monitor_thread = None
|
||||
self.sources = None
|
||||
self.dest = None
|
||||
self.direction = None
|
||||
|
||||
@abstractmethod
|
||||
def _cancel(self):
|
||||
pass
|
||||
|
||||
def cancel(self, reason=None):
|
||||
msg = 'Cancelling file transfer {} -> {}'.format(self.sources, self.dest)
|
||||
if reason is not None:
|
||||
msg += ' due to \'{}\''.format(reason)
|
||||
self.logger.warning(msg)
|
||||
self.transfer_aborted.set()
|
||||
self._cancel()
|
||||
|
||||
@abstractmethod
|
||||
def isactive(self):
|
||||
pass
|
||||
|
||||
@contextmanager
|
||||
def manage(self, sources, dest, direction):
|
||||
try:
|
||||
self.sources, self.dest, self.direction = sources, dest, direction
|
||||
m_thread = threading.Thread(target=self._monitor)
|
||||
|
||||
self.transfer_completed.clear()
|
||||
self.transfer_aborted.clear()
|
||||
self.transfer_started.set()
|
||||
|
||||
m_thread.start()
|
||||
yield self
|
||||
except BaseException:
|
||||
self.cancel(reason='exception during transfer')
|
||||
raise
|
||||
finally:
|
||||
self.transfer_completed.set()
|
||||
self.transfer_started.set()
|
||||
m_thread.join()
|
||||
self.transfer_started.clear()
|
||||
self.transfer_completed.clear()
|
||||
self.transfer_aborted.clear()
|
||||
|
||||
def _monitor(self):
|
||||
start_t = monotonic()
|
||||
self.transfer_completed.wait(self.start_transfer_poll_delay)
|
||||
while not self.transfer_completed.wait(self.poll_period):
|
||||
if not self.isactive():
|
||||
self.cancel(reason='transfer inactive')
|
||||
elif monotonic() - start_t > self.total_timeout:
|
||||
self.cancel(reason='transfer timed out')
|
||||
|
||||
|
||||
class PopenTransferManager(TransferManagerBase):
|
||||
|
||||
def __init__(self, conn, poll_period=30, start_transfer_poll_delay=30, total_timeout=3600):
|
||||
super().__init__(conn, poll_period, start_transfer_poll_delay, total_timeout)
|
||||
self.transfer = None
|
||||
self.last_sample = None
|
||||
|
||||
def _cancel(self):
|
||||
if self.transfer:
|
||||
self.transfer.cancel()
|
||||
self.transfer = None
|
||||
|
||||
def isactive(self):
|
||||
size_fn = self._push_dest_size if self.direction == 'push' else self._pull_dest_size
|
||||
curr_size = size_fn(self.dest)
|
||||
self.logger.debug('Polled file transfer, destination size {}'.format(curr_size))
|
||||
active = True if self.last_sample is None else curr_size > self.last_sample
|
||||
self.last_sample = curr_size
|
||||
return active
|
||||
|
||||
def set_transfer_and_wait(self, popen_bg_cmd):
|
||||
self.transfer = popen_bg_cmd
|
||||
ret = self.transfer.wait()
|
||||
|
||||
if ret and not self.transfer_aborted.is_set():
|
||||
raise subprocess.CalledProcessError(ret, self.transfer.popen.args)
|
||||
elif self.transfer_aborted.is_set():
|
||||
raise TimeoutError(self.transfer.popen.args)
|
||||
|
||||
|
||||
class SSHTransferManager(TransferManagerBase):
|
||||
|
||||
def __init__(self, conn, poll_period=30, start_transfer_poll_delay=30, total_timeout=3600):
|
||||
super().__init__(conn, poll_period, start_transfer_poll_delay, total_timeout)
|
||||
self.transferer = None
|
||||
self.progressed = False
|
||||
self.transferred = None
|
||||
self.to_transfer = None
|
||||
|
||||
def _cancel(self):
|
||||
self.transferer.close()
|
||||
|
||||
def isactive(self):
|
||||
progressed = self.progressed
|
||||
self.progressed = False
|
||||
msg = 'Polled transfer: {}% [{}B/{}B]'
|
||||
pc = format((self.transferred / self.to_transfer) * 100, '.2f')
|
||||
self.logger.debug(msg.format(pc, self.transferred, self.to_transfer))
|
||||
return progressed
|
||||
|
||||
@contextmanager
|
||||
def manage(self, sources, dest, direction, transferer):
|
||||
with super().manage(sources, dest, direction):
|
||||
try:
|
||||
self.progressed = False
|
||||
self.transferer = transferer # SFTPClient or SCPClient
|
||||
yield self
|
||||
except socket.error as e:
|
||||
if self.transfer_aborted.is_set():
|
||||
self.transfer_aborted.clear()
|
||||
method = 'SCP' if self.conn.use_scp else 'SFTP'
|
||||
raise TimeoutError('{} {}: {} -> {}'.format(method, self.direction, sources, self.dest))
|
||||
else:
|
||||
raise e
|
||||
|
||||
def progress_cb(self, *args):
|
||||
if self.transfer_started.is_set():
|
||||
self.progressed = True
|
||||
if len(args) == 3: # For SCPClient callbacks
|
||||
self.transferred = args[2]
|
||||
self.to_transfer = args[1]
|
||||
elif len(args) == 2: # For SFTPClient callbacks
|
||||
self.transferred = args[0]
|
||||
self.to_transfer = args[1]
|
@@ -106,17 +106,17 @@ class DerivedGfxInfoStats(DerivedFpsStats):
|
||||
frame_count += 1
|
||||
|
||||
if start_vsync is None:
|
||||
start_vsync = frame_data.Vsync_time_us
|
||||
end_vsync = frame_data.Vsync_time_us
|
||||
start_vsync = frame_data.Vsync_time_ns
|
||||
end_vsync = frame_data.Vsync_time_ns
|
||||
|
||||
frame_time = frame_data.FrameCompleted_time_us - frame_data.IntendedVsync_time_us
|
||||
frame_time = frame_data.FrameCompleted_time_ns - frame_data.IntendedVsync_time_ns
|
||||
pff = 1e9 / frame_time
|
||||
if pff > self.drop_threshold:
|
||||
per_frame_fps.append([pff])
|
||||
|
||||
if frame_count:
|
||||
duration = end_vsync - start_vsync
|
||||
fps = (1e6 * frame_count) / float(duration)
|
||||
fps = (1e9 * frame_count) / float(duration)
|
||||
else:
|
||||
duration = 0
|
||||
fps = 0
|
||||
@@ -133,15 +133,15 @@ class DerivedGfxInfoStats(DerivedFpsStats):
|
||||
def _process_with_pandas(self, measurements_csv):
|
||||
data = pd.read_csv(measurements_csv.path)
|
||||
data = data[data.Flags_flags == 0]
|
||||
frame_time = data.FrameCompleted_time_us - data.IntendedVsync_time_us
|
||||
per_frame_fps = (1e6 / frame_time)
|
||||
frame_time = data.FrameCompleted_time_ns - data.IntendedVsync_time_ns
|
||||
per_frame_fps = (1e9 / frame_time)
|
||||
keep_filter = per_frame_fps > self.drop_threshold
|
||||
per_frame_fps = per_frame_fps[keep_filter]
|
||||
per_frame_fps.name = 'fps'
|
||||
|
||||
frame_count = data.index.size
|
||||
if frame_count > 1:
|
||||
duration = data.Vsync_time_us.iloc[-1] - data.Vsync_time_us.iloc[0]
|
||||
duration = data.Vsync_time_ns.iloc[-1] - data.Vsync_time_ns.iloc[0]
|
||||
fps = (1e9 * frame_count) / float(duration)
|
||||
else:
|
||||
duration = 0
|
||||
|
@@ -15,11 +15,17 @@
|
||||
|
||||
class DevlibError(Exception):
|
||||
"""Base class for all Devlib exceptions."""
|
||||
|
||||
def __init__(self, *args):
|
||||
message = args[0] if args else None
|
||||
self._message = message
|
||||
|
||||
@property
|
||||
def message(self):
|
||||
if self.args:
|
||||
return self.args[0]
|
||||
return str(self)
|
||||
if self._message is not None:
|
||||
return self._message
|
||||
else:
|
||||
return str(self)
|
||||
|
||||
|
||||
class DevlibStableError(DevlibError):
|
||||
@@ -105,6 +111,16 @@ class WorkerThreadError(DevlibError):
|
||||
super(WorkerThreadError, self).__init__(message)
|
||||
|
||||
|
||||
class KernelConfigKeyError(KeyError, IndexError, DevlibError):
|
||||
"""
|
||||
Exception raised when a kernel config option cannot be found.
|
||||
|
||||
It inherits from :exc:`IndexError` for backward compatibility, and
|
||||
:exc:`KeyError` to behave like a regular mapping.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def get_traceback(exc=None):
|
||||
"""
|
||||
Returns the string with the traceback for the specifiec exc
|
||||
@@ -117,7 +133,7 @@ def get_traceback(exc=None):
|
||||
if not exc:
|
||||
return None
|
||||
tb = exc[2]
|
||||
sio = io.BytesIO()
|
||||
sio = io.StringIO()
|
||||
traceback.print_tb(tb, file=sio)
|
||||
del tb # needs to be done explicitly see: http://docs.python.org/2/library/sys.html#sys.exc_info
|
||||
return sio.getvalue()
|
||||
|
106
devlib/host.py
106
devlib/host.py
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from glob import iglob
|
||||
import glob
|
||||
import os
|
||||
import signal
|
||||
import shutil
|
||||
@@ -24,9 +24,12 @@ from pipes import quote
|
||||
|
||||
from devlib.exception import TargetTransientError, TargetStableError
|
||||
from devlib.utils.misc import check_output
|
||||
from devlib.connection import ConnectionBase, PopenBackgroundCommand
|
||||
|
||||
|
||||
PACKAGE_BIN_DIRECTORY = os.path.join(os.path.dirname(__file__), 'bin')
|
||||
|
||||
|
||||
# pylint: disable=redefined-outer-name
|
||||
def kill_children(pid, signal=signal.SIGKILL):
|
||||
with open('/proc/{0}/task/{0}/children'.format(pid), 'r') as fd:
|
||||
@@ -34,47 +37,75 @@ def kill_children(pid, signal=signal.SIGKILL):
|
||||
kill_children(cpid, signal)
|
||||
os.kill(cpid, signal)
|
||||
|
||||
class LocalConnection(object):
|
||||
|
||||
class LocalConnection(ConnectionBase):
|
||||
|
||||
name = 'local'
|
||||
host = 'localhost'
|
||||
|
||||
@property
|
||||
def connected_as_root(self):
|
||||
if self._connected_as_root is None:
|
||||
result = self.execute('id', as_root=False)
|
||||
self._connected_as_root = 'uid=0(' in result
|
||||
return self._connected_as_root
|
||||
|
||||
@connected_as_root.setter
|
||||
def connected_as_root(self, state):
|
||||
self._connected_as_root = state
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def __init__(self, platform=None, keep_password=True, unrooted=False,
|
||||
password=None, timeout=None):
|
||||
super().__init__()
|
||||
self._connected_as_root = None
|
||||
self.logger = logging.getLogger('local_connection')
|
||||
self.keep_password = keep_password
|
||||
self.unrooted = unrooted
|
||||
self.password = password
|
||||
|
||||
def push(self, source, dest, timeout=None, as_root=False): # pylint: disable=unused-argument
|
||||
self.logger.debug('cp {} {}'.format(source, dest))
|
||||
shutil.copy(source, dest)
|
||||
|
||||
def pull(self, source, dest, timeout=None, as_root=False): # pylint: disable=unused-argument
|
||||
self.logger.debug('cp {} {}'.format(source, dest))
|
||||
if ('*' in source or '?' in source) and os.path.isdir(dest):
|
||||
# Pull all files matching a wildcard expression
|
||||
for each_source in iglob(source):
|
||||
shutil.copy(each_source, dest)
|
||||
def _copy_path(self, source, dest):
|
||||
self.logger.debug('copying {} to {}'.format(source, dest))
|
||||
if os.path.isdir(source):
|
||||
# Behave similarly as cp, scp, adb push, etc. by creating a new
|
||||
# folder instead of merging hierarchies
|
||||
if os.path.exists(dest):
|
||||
dest = os.path.join(dest, os.path.basename(os.path.normpath(src)))
|
||||
|
||||
# Use distutils copy_tree since it behaves the same as
|
||||
# shutils.copytree except that it won't fail if some folders
|
||||
# already exist.
|
||||
#
|
||||
# Mirror the behavior of all other targets which only copy the
|
||||
# content without metadata
|
||||
copy_tree(source, dest, preserve_mode=False, preserve_times=False)
|
||||
else:
|
||||
if os.path.isdir(source):
|
||||
# Use distutils to allow copying into an existing directory structure.
|
||||
copy_tree(source, dest)
|
||||
else:
|
||||
shutil.copy(source, dest)
|
||||
shutil.copy(source, dest)
|
||||
|
||||
def _copy_paths(self, sources, dest):
|
||||
for source in sources:
|
||||
self._copy_path(source, dest)
|
||||
|
||||
def push(self, sources, dest, timeout=None, as_root=False): # pylint: disable=unused-argument
|
||||
self._copy_paths(sources, dest)
|
||||
|
||||
def pull(self, sources, dest, timeout=None, as_root=False): # pylint: disable=unused-argument
|
||||
self._copy_paths(sources, dest)
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def execute(self, command, timeout=None, check_exit_code=True,
|
||||
as_root=False, strip_colors=True, will_succeed=False):
|
||||
self.logger.debug(command)
|
||||
if as_root:
|
||||
use_sudo = as_root and not self.connected_as_root
|
||||
if use_sudo:
|
||||
if self.unrooted:
|
||||
raise TargetStableError('unrooted')
|
||||
password = self._get_password()
|
||||
command = 'echo {} | sudo -S '.format(quote(password)) + command
|
||||
command = "echo {} | sudo -p ' ' -S -- sh -c {}".format(quote(password), quote(command))
|
||||
ignore = None if check_exit_code else 'all'
|
||||
try:
|
||||
return check_output(command, shell=True, timeout=timeout, ignore=ignore)[0]
|
||||
stdout, stderr = check_output(command, shell=True, timeout=timeout, ignore=ignore)
|
||||
except subprocess.CalledProcessError as e:
|
||||
message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'.format(
|
||||
e.returncode, command, e.output)
|
||||
@@ -83,20 +114,49 @@ class LocalConnection(object):
|
||||
else:
|
||||
raise TargetStableError(message)
|
||||
|
||||
# Remove the one-character prompt of sudo -S -p
|
||||
if use_sudo and stderr:
|
||||
stderr = stderr[1:]
|
||||
|
||||
return stdout + stderr
|
||||
|
||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
|
||||
if as_root:
|
||||
if as_root and not self.connected_as_root:
|
||||
if self.unrooted:
|
||||
raise TargetStableError('unrooted')
|
||||
password = self._get_password()
|
||||
command = 'echo {} | sudo -S '.format(quote(password)) + command
|
||||
return subprocess.Popen(command, stdout=stdout, stderr=stderr, shell=True)
|
||||
# The sudo prompt will add a space on stderr, but we cannot filter
|
||||
# it out here
|
||||
command = "echo {} | sudo -p ' ' -S -- sh -c {}".format(quote(password), quote(command))
|
||||
|
||||
def close(self):
|
||||
# Make sure to get a new PGID so PopenBackgroundCommand() can kill
|
||||
# all sub processes that could be started without troubles.
|
||||
def preexec_fn():
|
||||
os.setpgrp()
|
||||
|
||||
popen = subprocess.Popen(
|
||||
command,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
shell=True,
|
||||
preexec_fn=preexec_fn,
|
||||
)
|
||||
bg_cmd = PopenBackgroundCommand(popen)
|
||||
self._current_bg_cmds.add(bg_cmd)
|
||||
return bg_cmd
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def cancel_running_command(self):
|
||||
pass
|
||||
|
||||
def wait_for_device(self, timeout=30):
|
||||
return
|
||||
|
||||
def reboot_bootloader(self, timeout=30):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _get_password(self):
|
||||
if self.password:
|
||||
return self.password
|
||||
|
@@ -97,20 +97,30 @@ _measurement_types = [
|
||||
# covert without being familar with individual instruments.
|
||||
MeasurementType('time', 'seconds', 'time',
|
||||
conversions={
|
||||
'time_us': lambda x: x * 1000000,
|
||||
'time_ms': lambda x: x * 1000,
|
||||
'time_us': lambda x: x * 1e6,
|
||||
'time_ms': lambda x: x * 1e3,
|
||||
'time_ns': lambda x: x * 1e9,
|
||||
}
|
||||
),
|
||||
MeasurementType('time_us', 'microseconds', 'time',
|
||||
conversions={
|
||||
'time': lambda x: x / 1000000,
|
||||
'time_ms': lambda x: x / 1000,
|
||||
'time': lambda x: x / 1e6,
|
||||
'time_ms': lambda x: x / 1e3,
|
||||
'time_ns': lambda x: x * 1e3,
|
||||
}
|
||||
),
|
||||
MeasurementType('time_ms', 'milliseconds', 'time',
|
||||
conversions={
|
||||
'time': lambda x: x / 1000,
|
||||
'time_us': lambda x: x * 1000,
|
||||
'time': lambda x: x / 1e3,
|
||||
'time_us': lambda x: x * 1e3,
|
||||
'time_ns': lambda x: x * 1e6,
|
||||
}
|
||||
),
|
||||
MeasurementType('time_ns', 'nanoseconds', 'time',
|
||||
conversions={
|
||||
'time': lambda x: x / 1e9,
|
||||
'time_ms': lambda x: x / 1e6,
|
||||
'time_us': lambda x: x / 1e3,
|
||||
}
|
||||
),
|
||||
|
||||
|
@@ -58,12 +58,14 @@ class AcmeCapeInstrument(Instrument):
|
||||
iio_capture=which('iio-capture'),
|
||||
host='baylibre-acme.local',
|
||||
iio_device='iio:device0',
|
||||
buffer_size=256):
|
||||
buffer_size=256,
|
||||
keep_raw=False):
|
||||
super(AcmeCapeInstrument, self).__init__(target)
|
||||
self.iio_capture = iio_capture
|
||||
self.host = host
|
||||
self.iio_device = iio_device
|
||||
self.buffer_size = buffer_size
|
||||
self.keep_raw = keep_raw
|
||||
self.sample_rate_hz = 100
|
||||
if self.iio_capture is None:
|
||||
raise HostError('Missing iio-capture binary')
|
||||
@@ -87,7 +89,8 @@ class AcmeCapeInstrument(Instrument):
|
||||
params = dict(
|
||||
iio_capture=self.iio_capture,
|
||||
host=self.host,
|
||||
buffer_size=self.buffer_size,
|
||||
# This must be a string for quote()
|
||||
buffer_size=str(self.buffer_size),
|
||||
iio_device=self.iio_device,
|
||||
outfile=self.raw_data_file
|
||||
)
|
||||
@@ -158,3 +161,8 @@ class AcmeCapeInstrument(Instrument):
|
||||
|
||||
def get_raw(self):
|
||||
return [self.raw_data_file]
|
||||
|
||||
def teardown(self):
|
||||
if not self.keep_raw:
|
||||
if os.path.isfile(self.raw_data_file):
|
||||
os.remove(self.raw_data_file)
|
||||
|
@@ -71,7 +71,7 @@ class ArmEnergyProbeInstrument(Instrument):
|
||||
|
||||
MAX_CHANNELS = 12 # 4 Arm Energy Probes
|
||||
|
||||
def __init__(self, target, config_file='./config-aep', ):
|
||||
def __init__(self, target, config_file='./config-aep', keep_raw=False):
|
||||
super(ArmEnergyProbeInstrument, self).__init__(target)
|
||||
self.arm_probe = which('arm-probe')
|
||||
if self.arm_probe is None:
|
||||
@@ -80,6 +80,7 @@ class ArmEnergyProbeInstrument(Instrument):
|
||||
self.attributes = ['power', 'voltage', 'current']
|
||||
self.sample_rate_hz = 10000
|
||||
self.config_file = config_file
|
||||
self.keep_raw = keep_raw
|
||||
|
||||
self.parser = AepParser()
|
||||
#TODO make it generic
|
||||
@@ -142,3 +143,8 @@ class ArmEnergyProbeInstrument(Instrument):
|
||||
|
||||
def get_raw(self):
|
||||
return [self.output_file_raw]
|
||||
|
||||
def teardown(self):
|
||||
if not self.keep_raw:
|
||||
if os.path.isfile(self.output_file_raw):
|
||||
os.remove(self.output_file_raw)
|
||||
|
@@ -14,20 +14,23 @@
|
||||
#
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
from itertools import chain
|
||||
import time
|
||||
from itertools import chain, zip_longest
|
||||
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.instrument import Instrument, MeasurementsCsv, CONTINUOUS
|
||||
from devlib.exception import HostError
|
||||
from devlib.utils.csvutil import csvwriter, create_reader
|
||||
from devlib.utils.misc import unique
|
||||
|
||||
try:
|
||||
from daqpower.client import execute_command, Status
|
||||
from daqpower.config import DeviceConfiguration, ServerConfiguration
|
||||
from daqpower.client import DaqClient
|
||||
from daqpower.config import DeviceConfiguration
|
||||
except ImportError as e:
|
||||
execute_command, Status = None, None
|
||||
DeviceConfiguration, ServerConfiguration, ConfigurationError = None, None, None
|
||||
DaqClient = None
|
||||
DeviceConfiguration = None
|
||||
import_error_mesg = e.args[0] if e.args else str(e)
|
||||
|
||||
|
||||
@@ -44,26 +47,30 @@ class DaqInstrument(Instrument):
|
||||
dv_range=0.2,
|
||||
sample_rate_hz=10000,
|
||||
channel_map=(0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23),
|
||||
keep_raw=False,
|
||||
time_as_clock_boottime=True
|
||||
):
|
||||
# pylint: disable=no-member
|
||||
super(DaqInstrument, self).__init__(target)
|
||||
self.keep_raw = keep_raw
|
||||
self._need_reset = True
|
||||
self._raw_files = []
|
||||
if execute_command is None:
|
||||
self.tempdir = None
|
||||
self.target_boottime_clock_at_start = 0.0
|
||||
if DaqClient is None:
|
||||
raise HostError('Could not import "daqpower": {}'.format(import_error_mesg))
|
||||
if labels is None:
|
||||
labels = ['PORT_{}'.format(i) for i in range(len(resistor_values))]
|
||||
if len(labels) != len(resistor_values):
|
||||
raise ValueError('"labels" and "resistor_values" must be of the same length')
|
||||
self.server_config = ServerConfiguration(host=host,
|
||||
port=port)
|
||||
result = self.execute('list_devices')
|
||||
if result.status == Status.OK:
|
||||
if device_id not in result.data:
|
||||
self.daq_client = DaqClient(host, port)
|
||||
try:
|
||||
devices = self.daq_client.list_devices()
|
||||
if device_id not in devices:
|
||||
msg = 'Device "{}" is not found on the DAQ server. Available devices are: "{}"'
|
||||
raise ValueError(msg.format(device_id, ', '.join(result.data)))
|
||||
elif result.status != Status.OKISH:
|
||||
raise HostError('Problem querying DAQ server: {}'.format(result.message))
|
||||
raise ValueError(msg.format(device_id, ', '.join(devices)))
|
||||
except Exception as e:
|
||||
raise HostError('Problem querying DAQ server: {}'.format(e))
|
||||
|
||||
self.device_config = DeviceConfiguration(device_id=device_id,
|
||||
v_range=v_range,
|
||||
@@ -73,36 +80,63 @@ class DaqInstrument(Instrument):
|
||||
channel_map=channel_map,
|
||||
labels=labels)
|
||||
self.sample_rate_hz = sample_rate_hz
|
||||
self.time_as_clock_boottime = time_as_clock_boottime
|
||||
|
||||
self.add_channel('Time', 'time')
|
||||
for label in labels:
|
||||
for kind in ['power', 'voltage']:
|
||||
self.add_channel(label, kind)
|
||||
|
||||
if time_as_clock_boottime:
|
||||
host_path = os.path.join(PACKAGE_BIN_DIRECTORY, self.target.abi,
|
||||
'get_clock_boottime')
|
||||
self.clock_boottime_cmd = self.target.install_if_needed(host_path,
|
||||
search_system_binaries=False)
|
||||
|
||||
def calculate_boottime_offset(self):
|
||||
time_before = time.time()
|
||||
out = self.target.execute(self.clock_boottime_cmd)
|
||||
time_after = time.time()
|
||||
|
||||
remote_clock_boottime = float(out)
|
||||
propagation_delay = (time_after - time_before) / 2
|
||||
boottime_at_end = remote_clock_boottime + propagation_delay
|
||||
|
||||
return time_after - boottime_at_end
|
||||
|
||||
def reset(self, sites=None, kinds=None, channels=None):
|
||||
super(DaqInstrument, self).reset(sites, kinds, channels)
|
||||
self.execute('close')
|
||||
result = self.execute('configure', config=self.device_config)
|
||||
if not result.status == Status.OK: # pylint: disable=no-member
|
||||
raise HostError(result.message)
|
||||
self.daq_client.close()
|
||||
self.daq_client.configure(self.device_config)
|
||||
self._need_reset = False
|
||||
self._raw_files = []
|
||||
|
||||
def start(self):
|
||||
if self._need_reset:
|
||||
self.reset()
|
||||
self.execute('start')
|
||||
# Preserve channel order
|
||||
self.reset(channels=self.channels.keys())
|
||||
|
||||
if self.time_as_clock_boottime:
|
||||
target_boottime_offset = self.calculate_boottime_offset()
|
||||
time_start = time.time()
|
||||
|
||||
self.daq_client.start()
|
||||
|
||||
if self.time_as_clock_boottime:
|
||||
time_end = time.time()
|
||||
self.target_boottime_clock_at_start = (time_start + time_end) / 2 - target_boottime_offset
|
||||
|
||||
def stop(self):
|
||||
self.execute('stop')
|
||||
self.daq_client.stop()
|
||||
self._need_reset = True
|
||||
|
||||
def get_data(self, outfile): # pylint: disable=R0914
|
||||
tempdir = tempfile.mkdtemp(prefix='daq-raw-')
|
||||
self.execute('get_data', output_directory=tempdir)
|
||||
self.tempdir = tempfile.mkdtemp(prefix='daq-raw-')
|
||||
self.daq_client.get_data(self.tempdir)
|
||||
raw_file_map = {}
|
||||
for entry in os.listdir(tempdir):
|
||||
for entry in os.listdir(self.tempdir):
|
||||
site = os.path.splitext(entry)[0]
|
||||
path = os.path.join(tempdir, entry)
|
||||
path = os.path.join(self.tempdir, entry)
|
||||
raw_file_map[site] = path
|
||||
self._raw_files.append(path)
|
||||
|
||||
@@ -117,32 +151,32 @@ class DaqInstrument(Instrument):
|
||||
site_readers[site] = reader
|
||||
file_handles.append(fh)
|
||||
except KeyError:
|
||||
message = 'Could not get DAQ trace for {}; Obtained traces are in {}'
|
||||
raise HostError(message.format(site, tempdir))
|
||||
if not site.startswith("Time"):
|
||||
message = 'Could not get DAQ trace for {}; Obtained traces are in {}'
|
||||
raise HostError(message.format(site, self.tempdir))
|
||||
|
||||
# The first row is the headers
|
||||
channel_order = []
|
||||
channel_order = ['Time_time']
|
||||
for site, reader in site_readers.items():
|
||||
channel_order.extend(['{}_{}'.format(site, kind)
|
||||
for kind in next(reader)])
|
||||
|
||||
def _read_next_rows():
|
||||
parts = []
|
||||
for reader in site_readers.values():
|
||||
try:
|
||||
parts.extend(next(reader))
|
||||
except StopIteration:
|
||||
parts.extend([None, None])
|
||||
return list(chain(parts))
|
||||
def _read_rows():
|
||||
row_iter = zip_longest(*site_readers.values(), fillvalue=(None, None))
|
||||
for raw_row in row_iter:
|
||||
raw_row = list(chain.from_iterable(raw_row))
|
||||
raw_row.insert(0, _read_rows.row_time_s)
|
||||
yield raw_row
|
||||
_read_rows.row_time_s += 1.0 / self.sample_rate_hz
|
||||
|
||||
_read_rows.row_time_s = self.target_boottime_clock_at_start
|
||||
|
||||
with csvwriter(outfile) as writer:
|
||||
field_names = [c.label for c in self.active_channels]
|
||||
writer.writerow(field_names)
|
||||
raw_row = _read_next_rows()
|
||||
while any(raw_row):
|
||||
for raw_row in _read_rows():
|
||||
row = [raw_row[channel_order.index(f)] for f in field_names]
|
||||
writer.writerow(row)
|
||||
raw_row = _read_next_rows()
|
||||
|
||||
return MeasurementsCsv(outfile, self.active_channels, self.sample_rate_hz)
|
||||
finally:
|
||||
@@ -153,7 +187,7 @@ class DaqInstrument(Instrument):
|
||||
return self._raw_files
|
||||
|
||||
def teardown(self):
|
||||
self.execute('close')
|
||||
|
||||
def execute(self, command, **kwargs):
|
||||
return execute_command(self.server_config, command, **kwargs)
|
||||
self.daq_client.close()
|
||||
if not self.keep_raw:
|
||||
if self.tempdir and os.path.isdir(self.tempdir):
|
||||
shutil.rmtree(self.tempdir)
|
||||
|
@@ -34,9 +34,11 @@ class EnergyProbeInstrument(Instrument):
|
||||
def __init__(self, target, resistor_values,
|
||||
labels=None,
|
||||
device_entry='/dev/ttyACM0',
|
||||
keep_raw=False
|
||||
):
|
||||
super(EnergyProbeInstrument, self).__init__(target)
|
||||
self.resistor_values = resistor_values
|
||||
self.keep_raw = keep_raw
|
||||
if labels is not None:
|
||||
self.labels = labels
|
||||
else:
|
||||
@@ -126,3 +128,8 @@ class EnergyProbeInstrument(Instrument):
|
||||
|
||||
def get_raw(self):
|
||||
return [self.raw_data_file]
|
||||
|
||||
def teardown(self):
|
||||
if self.keep_raw:
|
||||
if os.path.isfile(self.raw_data_file):
|
||||
os.remove(self.raw_data_file)
|
||||
|
@@ -14,6 +14,8 @@
|
||||
#
|
||||
|
||||
from __future__ import division
|
||||
import os
|
||||
|
||||
from devlib.instrument import (Instrument, CONTINUOUS,
|
||||
MeasurementsCsv, MeasurementType)
|
||||
from devlib.utils.rendering import (GfxinfoFrameCollector,
|
||||
@@ -70,6 +72,11 @@ class FramesInstrument(Instrument):
|
||||
def _init_channels(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def teardown(self):
|
||||
if not self.keep_raw:
|
||||
if os.path.isfile(self._raw_file):
|
||||
os.remove(self._raw_file)
|
||||
|
||||
|
||||
class GfxInfoFramesInstrument(FramesInstrument):
|
||||
|
||||
@@ -82,7 +89,7 @@ class GfxInfoFramesInstrument(FramesInstrument):
|
||||
if entry == 'Flags':
|
||||
self.add_channel('Flags', MeasurementType('flags', 'flags'))
|
||||
else:
|
||||
self.add_channel(entry, 'time_us')
|
||||
self.add_channel(entry, 'time_ns')
|
||||
self.header = [chan.label for chan in self.channels.values()]
|
||||
|
||||
|
||||
|
@@ -91,7 +91,7 @@ class FlashModule(Module):
|
||||
|
||||
kind = 'flash'
|
||||
|
||||
def __call__(self, image_bundle=None, images=None, boot_config=None):
|
||||
def __call__(self, image_bundle=None, images=None, boot_config=None, connect=True):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
|
@@ -54,7 +54,7 @@ class FastbootFlashModule(FlashModule):
|
||||
def probe(target):
|
||||
return target.os == 'android'
|
||||
|
||||
def __call__(self, image_bundle=None, images=None, bootargs=None):
|
||||
def __call__(self, image_bundle=None, images=None, bootargs=None, connect=True):
|
||||
if bootargs:
|
||||
raise ValueError('{} does not support boot configuration'.format(self.name))
|
||||
self.prelude_done = False
|
||||
@@ -67,7 +67,8 @@ class FastbootFlashModule(FlashModule):
|
||||
self.logger.debug('flashing {}'.format(partition))
|
||||
self._flash_image(self.target, partition, expand_path(image_path))
|
||||
fastboot_command('reboot')
|
||||
self.target.connect(timeout=180)
|
||||
if connect:
|
||||
self.target.connect(timeout=180)
|
||||
|
||||
def _validate_image_bundle(self, image_bundle):
|
||||
if not tarfile.is_tarfile(image_bundle):
|
||||
|
@@ -124,11 +124,10 @@ class Controller(object):
|
||||
def move_tasks(self, source, dest, exclude=None):
|
||||
if exclude is None:
|
||||
exclude = []
|
||||
try:
|
||||
srcg = self._cgroups[source]
|
||||
dstg = self._cgroups[dest]
|
||||
except KeyError as e:
|
||||
raise ValueError('Unknown group: {}'.format(e))
|
||||
|
||||
srcg = self.cgroup(source)
|
||||
dstg = self.cgroup(dest)
|
||||
|
||||
self.target._execute_util( # pylint: disable=protected-access
|
||||
'cgroups_tasks_move {} {} \'{}\''.format(
|
||||
srcg.directory, dstg.directory, exclude),
|
||||
@@ -158,18 +157,18 @@ class Controller(object):
|
||||
raise ValueError('wrong type for "exclude" parameter, '
|
||||
'it must be a str or a list')
|
||||
|
||||
logging.debug('Moving all tasks into %s', dest)
|
||||
self.logger.debug('Moving all tasks into %s', dest)
|
||||
|
||||
# Build list of tasks to exclude
|
||||
grep_filters = ''
|
||||
for comm in exclude:
|
||||
grep_filters += '-e {} '.format(comm)
|
||||
logging.debug(' using grep filter: %s', grep_filters)
|
||||
self.logger.debug(' using grep filter: %s', grep_filters)
|
||||
if grep_filters != '':
|
||||
logging.debug(' excluding tasks which name matches:')
|
||||
logging.debug(' %s', ', '.join(exclude))
|
||||
self.logger.debug(' excluding tasks which name matches:')
|
||||
self.logger.debug(' %s', ', '.join(exclude))
|
||||
|
||||
for cgroup in self._cgroups:
|
||||
for cgroup in self.list_all():
|
||||
if cgroup != dest:
|
||||
self.move_tasks(cgroup, dest, grep_filters)
|
||||
|
||||
@@ -262,8 +261,9 @@ class CGroup(object):
|
||||
|
||||
# Control cgroup path
|
||||
self.directory = controller.mount_point
|
||||
|
||||
if name != '/':
|
||||
self.directory = self.target.path.join(controller.mount_point, name[1:])
|
||||
self.directory = self.target.path.join(controller.mount_point, name.strip('/'))
|
||||
|
||||
# Setup path for tasks file
|
||||
self.tasks_file = self.target.path.join(self.directory, 'tasks')
|
||||
@@ -287,10 +287,8 @@ class CGroup(object):
|
||||
def get(self):
|
||||
conf = {}
|
||||
|
||||
logging.debug('Reading %s attributes from:',
|
||||
self.controller.kind)
|
||||
logging.debug(' %s',
|
||||
self.directory)
|
||||
self.logger.debug('Reading %s attributes from:', self.controller.kind)
|
||||
self.logger.debug(' %s', self.directory)
|
||||
output = self.target._execute_util( # pylint: disable=protected-access
|
||||
'cgroups_get_attributes {} {}'.format(
|
||||
self.directory, self.controller.kind),
|
||||
@@ -329,7 +327,7 @@ class CGroup(object):
|
||||
|
||||
def get_tasks(self):
|
||||
task_ids = self.target.read_value(self.tasks_file).split()
|
||||
logging.debug('Tasks: %s', task_ids)
|
||||
self.logger.debug('Tasks: %s', task_ids)
|
||||
return list(map(int, task_ids))
|
||||
|
||||
def add_task(self, tid):
|
||||
|
@@ -111,7 +111,7 @@ class CpufreqModule(Module):
|
||||
:Keyword Arguments: Governor tunables, See :meth:`set_governor_tunables`
|
||||
"""
|
||||
if not cpus:
|
||||
cpus = range(self.target.number_of_cpus)
|
||||
cpus = self.target.list_online_cpus()
|
||||
|
||||
# Setting a governor & tunables for a cpu will set them for all cpus
|
||||
# in the same clock domain, so only manipulating one cpu per domain
|
||||
@@ -212,7 +212,7 @@ class CpufreqModule(Module):
|
||||
|
||||
@memoized
|
||||
def list_frequencies(self, cpu):
|
||||
"""Returns a list of frequencies supported by the cpu or an empty list
|
||||
"""Returns a sorted list of frequencies supported by the cpu or an empty list
|
||||
if not could be found."""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
@@ -234,7 +234,7 @@ class CpufreqModule(Module):
|
||||
raise
|
||||
|
||||
available_frequencies = list(map(int, reversed([f for f, _ in zip(out_iter, out_iter)])))
|
||||
return available_frequencies
|
||||
return sorted(available_frequencies)
|
||||
|
||||
@memoized
|
||||
def get_max_available_frequency(self, cpu):
|
||||
|
@@ -15,6 +15,9 @@
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
from past.builtins import basestring
|
||||
|
||||
from operator import attrgetter
|
||||
from pprint import pformat
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.utils.types import integer, boolean
|
||||
|
||||
@@ -96,40 +99,35 @@ class Cpuidle(Module):
|
||||
|
||||
def __init__(self, target):
|
||||
super(Cpuidle, self).__init__(target)
|
||||
self._states = {}
|
||||
|
||||
basepath = '/sys/devices/system/cpu/'
|
||||
values_tree = self.target.read_tree_values(basepath, depth=4, check_exit_code=False)
|
||||
i = 0
|
||||
cpu_id = 'cpu{}'.format(i)
|
||||
while cpu_id in values_tree:
|
||||
cpu_node = values_tree[cpu_id]
|
||||
|
||||
if 'cpuidle' in cpu_node:
|
||||
idle_node = cpu_node['cpuidle']
|
||||
self._states[cpu_id] = []
|
||||
j = 0
|
||||
state_id = 'state{}'.format(j)
|
||||
while state_id in idle_node:
|
||||
state_node = idle_node[state_id]
|
||||
state = CpuidleState(
|
||||
self._states = {
|
||||
cpu_name: sorted(
|
||||
(
|
||||
CpuidleState(
|
||||
self.target,
|
||||
index=j,
|
||||
path=self.target.path.join(basepath, cpu_id, 'cpuidle', state_id),
|
||||
# state_name is formatted as "state42"
|
||||
index=int(state_name[len('state'):]),
|
||||
path=self.target.path.join(basepath, cpu_name, 'cpuidle', state_name),
|
||||
name=state_node['name'],
|
||||
desc=state_node['desc'],
|
||||
power=int(state_node['power']),
|
||||
latency=int(state_node['latency']),
|
||||
residency=int(state_node['residency']) if 'residency' in state_node else None,
|
||||
)
|
||||
msg = 'Adding {} state {}: {} {}'
|
||||
self.logger.debug(msg.format(cpu_id, j, state.name, state.desc))
|
||||
self._states[cpu_id].append(state)
|
||||
j += 1
|
||||
state_id = 'state{}'.format(j)
|
||||
for state_name, state_node in cpu_node['cpuidle'].items()
|
||||
if state_name.startswith('state')
|
||||
),
|
||||
key=attrgetter('index'),
|
||||
)
|
||||
|
||||
i += 1
|
||||
cpu_id = 'cpu{}'.format(i)
|
||||
for cpu_name, cpu_node in values_tree.items()
|
||||
if cpu_name.startswith('cpu') and 'cpuidle' in cpu_node
|
||||
}
|
||||
|
||||
self.logger.debug('Adding cpuidle states:\n{}'.format(pformat(self._states)))
|
||||
|
||||
def get_states(self, cpu=0):
|
||||
if isinstance(cpu, int):
|
||||
@@ -173,4 +171,7 @@ class Cpuidle(Module):
|
||||
return self.target.read_value(self.target.path.join(self.root_path, 'current_driver'))
|
||||
|
||||
def get_governor(self):
|
||||
return self.target.read_value(self.target.path.join(self.root_path, 'current_governor_ro'))
|
||||
path = self.target.path.join(self.root_path, 'current_governor_ro')
|
||||
if not self.target.file_exists(path):
|
||||
path = self.target.path.join(self.root_path, 'current_governor')
|
||||
return self.target.read_value(path)
|
||||
|
@@ -137,7 +137,7 @@ class HwmonModule(Module):
|
||||
self.scan()
|
||||
|
||||
def scan(self):
|
||||
values_tree = self.target.read_tree_values(self.root, depth=3)
|
||||
values_tree = self.target.read_tree_values(self.root, depth=3, tar=True)
|
||||
for entry_id, fields in values_tree.items():
|
||||
path = self.target.path.join(self.root, entry_id)
|
||||
name = fields.pop('name', None)
|
||||
|
@@ -21,6 +21,7 @@ from past.builtins import basestring
|
||||
|
||||
from devlib.module import Module
|
||||
from devlib.utils.misc import memoized
|
||||
from devlib.utils.types import boolean
|
||||
|
||||
|
||||
class SchedProcFSNode(object):
|
||||
@@ -51,6 +52,12 @@ class SchedProcFSNode(object):
|
||||
|
||||
_re_procfs_node = re.compile(r"(?P<name>.*\D)(?P<digits>\d+)$")
|
||||
|
||||
PACKABLE_ENTRIES = [
|
||||
"cpu",
|
||||
"domain",
|
||||
"group"
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _ends_with_digits(node):
|
||||
if not isinstance(node, basestring):
|
||||
@@ -70,18 +77,19 @@ class SchedProcFSNode(object):
|
||||
"""
|
||||
:returns: The name of the procfs node
|
||||
"""
|
||||
return re.search(SchedProcFSNode._re_procfs_node, node).group("name")
|
||||
match = re.search(SchedProcFSNode._re_procfs_node, node)
|
||||
if match:
|
||||
return match.group("name")
|
||||
|
||||
@staticmethod
|
||||
def _packable(node, entries):
|
||||
return node
|
||||
|
||||
@classmethod
|
||||
def _packable(cls, node):
|
||||
"""
|
||||
:returns: Whether it makes sense to pack a node into a common entry
|
||||
"""
|
||||
return (SchedProcFSNode._ends_with_digits(node) and
|
||||
any([SchedProcFSNode._ends_with_digits(x) and
|
||||
SchedProcFSNode._node_digits(x) != SchedProcFSNode._node_digits(node) and
|
||||
SchedProcFSNode._node_name(x) == SchedProcFSNode._node_name(node)
|
||||
for x in entries]))
|
||||
SchedProcFSNode._node_name(node) in cls.PACKABLE_ENTRIES)
|
||||
|
||||
@staticmethod
|
||||
def _build_directory(node_name, node_data):
|
||||
@@ -118,7 +126,7 @@ class SchedProcFSNode(object):
|
||||
# Find which entries can be packed into a common entry
|
||||
packables = {
|
||||
node : SchedProcFSNode._node_name(node) + "s"
|
||||
for node in list(nodes.keys()) if SchedProcFSNode._packable(node, list(nodes.keys()))
|
||||
for node in list(nodes.keys()) if SchedProcFSNode._packable(node)
|
||||
}
|
||||
|
||||
self._dyn_attrs = {}
|
||||
@@ -227,13 +235,13 @@ class SchedProcFSData(SchedProcFSNode):
|
||||
# Even if we have a CPU entry, it can be empty (e.g. hotplugged out)
|
||||
# Make sure some data is there
|
||||
for cpu in cpus:
|
||||
if target.file_exists(target.path.join(path, cpu, "domain0", "name")):
|
||||
if target.file_exists(target.path.join(path, cpu, "domain0", "flags")):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def __init__(self, target, path=None):
|
||||
if not path:
|
||||
if path is None:
|
||||
path = self.sched_domain_root
|
||||
|
||||
procfs = target.read_tree_values(path, depth=self._read_depth)
|
||||
@@ -251,7 +259,128 @@ class SchedModule(Module):
|
||||
logger = logging.getLogger(SchedModule.name)
|
||||
SchedDomainFlag.check_version(target, logger)
|
||||
|
||||
return SchedProcFSData.available(target)
|
||||
# It makes sense to load this module if at least one of those
|
||||
# functionalities is enabled
|
||||
schedproc = SchedProcFSData.available(target)
|
||||
debug = SchedModule.target_has_debug(target)
|
||||
dmips = any([target.file_exists(SchedModule.cpu_dmips_capacity_path(target, cpu))
|
||||
for cpu in target.list_online_cpus()])
|
||||
|
||||
logger.info("Scheduler sched_domain procfs entries %s",
|
||||
"found" if schedproc else "not found")
|
||||
logger.info("Detected kernel compiled with SCHED_DEBUG=%s",
|
||||
"y" if debug else "n")
|
||||
logger.info("CPU capacity sysfs entries %s",
|
||||
"found" if dmips else "not found")
|
||||
|
||||
return schedproc or debug or dmips
|
||||
|
||||
def get_kernel_attributes(self, matching=None, check_exit_code=True):
|
||||
"""
|
||||
Get the value of scheduler attributes.
|
||||
|
||||
:param matching: an (optional) substring to filter the scheduler
|
||||
attributes to be returned.
|
||||
|
||||
The scheduler exposes a list of tunable attributes under:
|
||||
/proc/sys/kernel
|
||||
all starting with the "sched_" prefix.
|
||||
|
||||
This method returns a dictionary of all the "sched_" attributes exposed
|
||||
by the target kernel, within the prefix removed.
|
||||
It's possible to restrict the list of attributes by specifying a
|
||||
substring to be matched.
|
||||
|
||||
returns: a dictionary of scheduler tunables
|
||||
"""
|
||||
command = 'sched_get_kernel_attributes {}'.format(
|
||||
matching if matching else ''
|
||||
)
|
||||
output = self.target._execute_util(command, as_root=self.target.is_rooted,
|
||||
check_exit_code=check_exit_code)
|
||||
result = {}
|
||||
for entry in output.strip().split('\n'):
|
||||
if ':' not in entry:
|
||||
continue
|
||||
path, value = entry.strip().split(':', 1)
|
||||
if value in ['0', '1']:
|
||||
value = bool(int(value))
|
||||
elif value.isdigit():
|
||||
value = int(value)
|
||||
result[path] = value
|
||||
return result
|
||||
|
||||
def set_kernel_attribute(self, attr, value, verify=True):
|
||||
"""
|
||||
Set the value of a scheduler attribute.
|
||||
|
||||
:param attr: the attribute to set, without the "sched_" prefix
|
||||
:param value: the value to set
|
||||
:param verify: true to check that the requested value has been set
|
||||
|
||||
:raise TargetError: if the attribute cannot be set
|
||||
"""
|
||||
if isinstance(value, bool):
|
||||
value = '1' if value else '0'
|
||||
elif isinstance(value, int):
|
||||
value = str(value)
|
||||
path = '/proc/sys/kernel/sched_' + attr
|
||||
self.target.write_value(path, value, verify)
|
||||
|
||||
@classmethod
|
||||
def target_has_debug(cls, target):
|
||||
if target.config.get('SCHED_DEBUG') != 'y':
|
||||
return False
|
||||
return target.file_exists('/sys/kernel/debug/sched_features')
|
||||
|
||||
@property
|
||||
@memoized
|
||||
def has_debug(self):
|
||||
return self.target_has_debug(self.target)
|
||||
|
||||
def get_features(self):
|
||||
"""
|
||||
Get the status of each sched feature
|
||||
|
||||
:returns: a dictionary of features and their "is enabled" status
|
||||
"""
|
||||
if not self.has_debug:
|
||||
raise RuntimeError("sched_features not available")
|
||||
feats = self.target.read_value('/sys/kernel/debug/sched_features')
|
||||
features = {}
|
||||
for feat in feats.split():
|
||||
value = True
|
||||
if feat.startswith('NO'):
|
||||
feat = feat.replace('NO_', '', 1)
|
||||
value = False
|
||||
features[feat] = value
|
||||
return features
|
||||
|
||||
def set_feature(self, feature, enable, verify=True):
|
||||
"""
|
||||
Set the status of a specified scheduler feature
|
||||
|
||||
:param feature: the feature name to set
|
||||
:param enable: true to enable the feature, false otherwise
|
||||
|
||||
:raise ValueError: if the specified enable value is not bool
|
||||
:raise RuntimeError: if the specified feature cannot be set
|
||||
"""
|
||||
if not self.has_debug:
|
||||
raise RuntimeError("sched_features not available")
|
||||
feature = feature.upper()
|
||||
feat_value = feature
|
||||
if not boolean(enable):
|
||||
feat_value = 'NO_' + feat_value
|
||||
self.target.write_value('/sys/kernel/debug/sched_features',
|
||||
feat_value, verify=False)
|
||||
if not verify:
|
||||
return
|
||||
msg = 'Failed to set {}, feature not supported?'.format(feat_value)
|
||||
features = self.get_features()
|
||||
feat_value = features.get(feature, not enable)
|
||||
if feat_value != enable:
|
||||
raise RuntimeError(msg)
|
||||
|
||||
def get_cpu_sd_info(self, cpu):
|
||||
"""
|
||||
@@ -282,17 +411,26 @@ class SchedModule(Module):
|
||||
:returns: Whether energy model data is available for 'cpu'
|
||||
"""
|
||||
if not sd:
|
||||
sd = SchedProcFSData(self.target, cpu)
|
||||
sd = self.get_cpu_sd_info(cpu)
|
||||
|
||||
return sd.procfs["domain0"].get("group0", {}).get("energy", {}).get("cap_states") != None
|
||||
|
||||
@classmethod
|
||||
def cpu_dmips_capacity_path(cls, target, cpu):
|
||||
"""
|
||||
:returns: The target sysfs path where the dmips capacity data should be
|
||||
"""
|
||||
return target.path.join(
|
||||
cls.cpu_sysfs_root,
|
||||
'cpu{}/cpu_capacity'.format(cpu))
|
||||
|
||||
@memoized
|
||||
def has_dmips_capacity(self, cpu):
|
||||
"""
|
||||
:returns: Whether dmips capacity data is available for 'cpu'
|
||||
"""
|
||||
return self.target.file_exists(
|
||||
self.target.path.join(self.cpu_sysfs_root, 'cpu{}/cpu_capacity'.format(cpu))
|
||||
self.cpu_dmips_capacity_path(self.target, cpu)
|
||||
)
|
||||
|
||||
@memoized
|
||||
@@ -301,10 +439,13 @@ class SchedModule(Module):
|
||||
:returns: The maximum capacity value exposed by the EAS energy model
|
||||
"""
|
||||
if not sd:
|
||||
sd = SchedProcFSData(self.target, cpu)
|
||||
sd = self.get_cpu_sd_info(cpu)
|
||||
|
||||
cap_states = sd.domains[0].groups[0].energy.cap_states
|
||||
return int(cap_states.split('\t')[-2])
|
||||
cap_states_list = cap_states.split('\t')
|
||||
num_cap_states = sd.domains[0].groups[0].energy.nr_cap_states
|
||||
max_cap_index = -1 * int(len(cap_states_list) / num_cap_states)
|
||||
return int(cap_states_list[max_cap_index])
|
||||
|
||||
@memoized
|
||||
def get_dmips_capacity(self, cpu):
|
||||
@@ -312,14 +453,9 @@ class SchedModule(Module):
|
||||
:returns: The capacity value generated from the capacity-dmips-mhz DT entry
|
||||
"""
|
||||
return self.target.read_value(
|
||||
self.target.path.join(
|
||||
self.cpu_sysfs_root,
|
||||
'cpu{}/cpu_capacity'.format(cpu)
|
||||
),
|
||||
int
|
||||
self.cpu_dmips_capacity_path(self.target, cpu), int
|
||||
)
|
||||
|
||||
@memoized
|
||||
def get_capacities(self, default=None):
|
||||
"""
|
||||
:param default: Default capacity value to find if no data is
|
||||
@@ -330,16 +466,30 @@ class SchedModule(Module):
|
||||
:raises RuntimeError: Raised when no capacity information is
|
||||
found and 'default' is None
|
||||
"""
|
||||
cpus = list(range(self.target.number_of_cpus))
|
||||
cpus = self.target.list_online_cpus()
|
||||
|
||||
capacities = {}
|
||||
sd_info = self.get_sd_info()
|
||||
|
||||
for cpu in cpus:
|
||||
if self.has_dmips_capacity(cpu):
|
||||
capacities[cpu] = self.get_dmips_capacity(cpu)
|
||||
|
||||
missing_cpus = set(cpus).difference(capacities.keys())
|
||||
if not missing_cpus:
|
||||
return capacities
|
||||
|
||||
if not SchedProcFSData.available(self.target):
|
||||
if default != None:
|
||||
capacities.update({cpu : default for cpu in missing_cpus})
|
||||
return capacities
|
||||
else:
|
||||
raise RuntimeError(
|
||||
'No capacity data for cpus {}'.format(sorted(missing_cpus)))
|
||||
|
||||
sd_info = self.get_sd_info()
|
||||
for cpu in missing_cpus:
|
||||
if self.has_em(cpu, sd_info.cpus[cpu]):
|
||||
capacities[cpu] = self.get_em_capacity(cpu, sd_info.cpus[cpu])
|
||||
elif self.has_dmips_capacity(cpu):
|
||||
capacities[cpu] = self.get_dmips_capacity(cpu)
|
||||
else:
|
||||
if default != None:
|
||||
capacities[cpu] = default
|
||||
|
@@ -48,7 +48,7 @@ class ThermalZone(object):
|
||||
self.path = target.path.join(root, self.name)
|
||||
self.trip_points = {}
|
||||
|
||||
for entry in self.target.list_directory(self.path):
|
||||
for entry in self.target.list_directory(self.path, as_root=target.is_rooted):
|
||||
re_match = re.match('^trip_point_([0-9]+)_temp', entry)
|
||||
if re_match is not None:
|
||||
self.add_trip_point(re_match.group(1))
|
||||
@@ -88,6 +88,9 @@ class ThermalModule(Module):
|
||||
|
||||
for entry in target.list_directory(self.thermal_root):
|
||||
re_match = re.match('^(thermal_zone|cooling_device)([0-9]+)', entry)
|
||||
if not re_match:
|
||||
self.logger.warning('unknown thermal entry: %s', entry)
|
||||
continue
|
||||
|
||||
if re_match.group(1) == 'thermal_zone':
|
||||
self.add_thermal_zone(re_match.group(2))
|
||||
|
@@ -130,7 +130,7 @@ class VexpressBootModule(BootModule):
|
||||
init_dtr=0) as tty:
|
||||
self.get_through_early_boot(tty)
|
||||
self.perform_boot_sequence(tty)
|
||||
self.wait_for_android_prompt(tty)
|
||||
self.wait_for_shell_prompt(tty)
|
||||
|
||||
def perform_boot_sequence(self, tty):
|
||||
raise NotImplementedError()
|
||||
@@ -159,8 +159,8 @@ class VexpressBootModule(BootModule):
|
||||
menu.wait(timeout=self.timeout)
|
||||
return menu
|
||||
|
||||
def wait_for_android_prompt(self, tty):
|
||||
self.logger.debug('Waiting for the Android prompt.')
|
||||
def wait_for_shell_prompt(self, tty):
|
||||
self.logger.debug('Waiting for the shell prompt.')
|
||||
tty.expect(self.target.shell_prompt, timeout=self.timeout)
|
||||
# This delay is needed to allow the platform some time to finish
|
||||
# initilizing; querying the ip address too early from connect() may
|
||||
@@ -325,7 +325,7 @@ class VersatileExpressFlashModule(FlashModule):
|
||||
self.timeout = timeout
|
||||
self.short_delay = short_delay
|
||||
|
||||
def __call__(self, image_bundle=None, images=None, bootargs=None):
|
||||
def __call__(self, image_bundle=None, images=None, bootargs=None, connect=True):
|
||||
self.target.hard_reset()
|
||||
with open_serial_connection(port=self.target.platform.serial_port,
|
||||
baudrate=self.target.platform.baudrate,
|
||||
@@ -346,7 +346,8 @@ class VersatileExpressFlashModule(FlashModule):
|
||||
msg = 'Could not deploy images to {}; got: {}'
|
||||
raise TargetStableError(msg.format(self.vemsd_mount, e))
|
||||
self.target.boot()
|
||||
self.target.connect(timeout=30)
|
||||
if connect:
|
||||
self.target.connect(timeout=30)
|
||||
|
||||
def _deploy_image_bundle(self, bundle):
|
||||
self.logger.debug('Validating {}'.format(bundle))
|
||||
|
@@ -78,7 +78,16 @@ class Platform(object):
|
||||
|
||||
def _set_model_from_target(self, target):
|
||||
if target.os == 'android':
|
||||
self.model = target.getprop('ro.product.model')
|
||||
try:
|
||||
self.model = target.getprop(prop='ro.product.device')
|
||||
except KeyError:
|
||||
self.model = target.getprop('ro.product.model')
|
||||
elif target.file_exists("/proc/device-tree/model"):
|
||||
# There is currently no better way to do this cross platform.
|
||||
# ARM does not have dmidecode
|
||||
raw_model = target.execute("cat /proc/device-tree/model")
|
||||
device_model_to_return = '_'.join(raw_model.split()[:2])
|
||||
return device_model_to_return.rstrip(' \t\r\n\0')
|
||||
elif target.is_rooted:
|
||||
try:
|
||||
self.model = target.execute('dmidecode -s system-version',
|
||||
|
@@ -90,9 +90,6 @@ class VersatileExpressPlatform(Platform):
|
||||
def _init_android_target(self, target):
|
||||
if target.connection_settings.get('device') is None:
|
||||
addr = self._get_target_ip_address(target)
|
||||
if sys.version_info[0] == 3:
|
||||
# Convert bytes to string for Python3 compatibility
|
||||
addr = addr.decode("utf-8")
|
||||
target.connection_settings['device'] = addr + ':5555'
|
||||
|
||||
def _init_linux_target(self, target):
|
||||
@@ -108,7 +105,7 @@ class VersatileExpressPlatform(Platform):
|
||||
init_dtr=0) as tty:
|
||||
tty.sendline('su') # this is, apprently, required to query network device
|
||||
# info by name on recent Juno builds...
|
||||
self.logger.debug('Waiting for the Android shell prompt.')
|
||||
self.logger.debug('Waiting for the shell prompt.')
|
||||
tty.expect(target.shell_prompt)
|
||||
|
||||
self.logger.debug('Waiting for IP address...')
|
||||
@@ -119,7 +116,7 @@ class VersatileExpressPlatform(Platform):
|
||||
time.sleep(1)
|
||||
try:
|
||||
tty.expect(r'inet ([1-9]\d*.\d+.\d+.\d+)', timeout=10)
|
||||
return tty.match.group(1)
|
||||
return tty.match.group(1).decode('utf-8')
|
||||
except pexpect.TIMEOUT:
|
||||
pass # We have our own timeout -- see below.
|
||||
if (time.time() - wait_start_time) > self.ready_timeout:
|
||||
|
889
devlib/target.py
889
devlib/target.py
File diff suppressed because it is too large
Load Diff
@@ -1,137 +0,0 @@
|
||||
# Copyright 2018 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import os
|
||||
import re
|
||||
from past.builtins import basestring, zip
|
||||
|
||||
from devlib.host import PACKAGE_BIN_DIRECTORY
|
||||
from devlib.trace import TraceCollector
|
||||
from devlib.utils.misc import ensure_file_directory_exists as _f
|
||||
|
||||
|
||||
PERF_COMMAND_TEMPLATE = '{} stat {} {} sleep 1000 > {} 2>&1 '
|
||||
|
||||
PERF_COUNT_REGEX = re.compile(r'^(CPU\d+)?\s*(\d+)\s*(.*?)\s*(\[\s*\d+\.\d+%\s*\])?\s*$')
|
||||
|
||||
DEFAULT_EVENTS = [
|
||||
'migrations',
|
||||
'cs',
|
||||
]
|
||||
|
||||
|
||||
class PerfCollector(TraceCollector):
|
||||
"""
|
||||
Perf is a Linux profiling with performance counters.
|
||||
|
||||
Performance counters are CPU hardware registers that count hardware events
|
||||
such as instructions executed, cache-misses suffered, or branches
|
||||
mispredicted. They form a basis for profiling applications to trace dynamic
|
||||
control flow and identify hotspots.
|
||||
|
||||
pref accepts options and events. If no option is given the default '-a' is
|
||||
used. For events, the default events are migrations and cs. They both can
|
||||
be specified in the config file.
|
||||
|
||||
Events must be provided as a list that contains them and they will look like
|
||||
this ::
|
||||
|
||||
perf_events = ['migrations', 'cs']
|
||||
|
||||
Events can be obtained by typing the following in the command line on the
|
||||
device ::
|
||||
|
||||
perf list
|
||||
|
||||
Whereas options, they can be provided as a single string as following ::
|
||||
|
||||
perf_options = '-a -i'
|
||||
|
||||
Options can be obtained by running the following in the command line ::
|
||||
|
||||
man perf-stat
|
||||
"""
|
||||
|
||||
def __init__(self, target,
|
||||
events=None,
|
||||
optionstring=None,
|
||||
labels=None,
|
||||
force_install=False):
|
||||
super(PerfCollector, self).__init__(target)
|
||||
self.events = events if events else DEFAULT_EVENTS
|
||||
self.force_install = force_install
|
||||
self.labels = labels
|
||||
|
||||
# Validate parameters
|
||||
if isinstance(optionstring, list):
|
||||
self.optionstrings = optionstring
|
||||
else:
|
||||
self.optionstrings = [optionstring]
|
||||
if self.events and isinstance(self.events, basestring):
|
||||
self.events = [self.events]
|
||||
if not self.labels:
|
||||
self.labels = ['perf_{}'.format(i) for i in range(len(self.optionstrings))]
|
||||
if len(self.labels) != len(self.optionstrings):
|
||||
raise ValueError('The number of labels must match the number of optstrings provided for perf.')
|
||||
|
||||
self.binary = self.target.get_installed('perf')
|
||||
if self.force_install or not self.binary:
|
||||
self.binary = self._deploy_perf()
|
||||
|
||||
self.commands = self._build_commands()
|
||||
|
||||
def reset(self):
|
||||
self.target.killall('perf', as_root=self.target.is_rooted)
|
||||
for label in self.labels:
|
||||
filepath = self._get_target_outfile(label)
|
||||
self.target.remove(filepath)
|
||||
|
||||
def start(self):
|
||||
for command in self.commands:
|
||||
self.target.kick_off(command)
|
||||
|
||||
def stop(self):
|
||||
self.target.killall('sleep', as_root=self.target.is_rooted)
|
||||
|
||||
# pylint: disable=arguments-differ
|
||||
def get_trace(self, outdir):
|
||||
for label in self.labels:
|
||||
target_file = self._get_target_outfile(label)
|
||||
host_relpath = os.path.basename(target_file)
|
||||
host_file = _f(os.path.join(outdir, host_relpath))
|
||||
self.target.pull(target_file, host_file)
|
||||
|
||||
def _deploy_perf(self):
|
||||
host_executable = os.path.join(PACKAGE_BIN_DIRECTORY,
|
||||
self.target.abi, 'perf')
|
||||
return self.target.install(host_executable)
|
||||
|
||||
def _build_commands(self):
|
||||
commands = []
|
||||
for opts, label in zip(self.optionstrings, self.labels):
|
||||
commands.append(self._build_perf_command(opts, self.events, label))
|
||||
return commands
|
||||
|
||||
def _get_target_outfile(self, label):
|
||||
return self.target.get_workpath('{}.out'.format(label))
|
||||
|
||||
def _build_perf_command(self, options, events, label):
|
||||
event_string = ' '.join(['-e {}'.format(e) for e in events])
|
||||
command = PERF_COMMAND_TEMPLATE.format(self.binary,
|
||||
options or '',
|
||||
event_string,
|
||||
self._get_target_outfile(label))
|
||||
return command
|
@@ -19,6 +19,7 @@ Utility functions for working with Android devices through adb.
|
||||
|
||||
"""
|
||||
# pylint: disable=E1103
|
||||
import glob
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
@@ -28,21 +29,31 @@ import tempfile
|
||||
import subprocess
|
||||
from collections import defaultdict
|
||||
import pexpect
|
||||
from pipes import quote
|
||||
import xml.etree.ElementTree
|
||||
import zipfile
|
||||
import uuid
|
||||
|
||||
try:
|
||||
from shlex import quote
|
||||
except ImportError:
|
||||
from pipes import quote
|
||||
|
||||
from devlib.exception import TargetTransientError, TargetStableError, HostError
|
||||
from devlib.utils.misc import check_output, which, ABI_MAP
|
||||
from devlib.utils.misc import check_output, which, ABI_MAP, redirect_streams, get_subprocess
|
||||
from devlib.connection import ConnectionBase, AdbBackgroundCommand, PopenBackgroundCommand, PopenTransferManager
|
||||
|
||||
|
||||
logger = logging.getLogger('android')
|
||||
|
||||
MAX_ATTEMPTS = 5
|
||||
AM_START_ERROR = re.compile(r"Error: Activity.*")
|
||||
AAPT_BADGING_OUTPUT = re.compile(r"no dump ((file)|(apk)) specified", re.IGNORECASE)
|
||||
|
||||
# See:
|
||||
# http://developer.android.com/guide/topics/manifest/uses-sdk-element.html#ApiLevels
|
||||
ANDROID_VERSION_MAP = {
|
||||
28: 'P',
|
||||
29: 'Q',
|
||||
28: 'PIE',
|
||||
27: 'OREO_MR1',
|
||||
26: 'OREO',
|
||||
25: 'NOUGAT_MR1',
|
||||
@@ -84,6 +95,7 @@ android_home = None
|
||||
platform_tools = None
|
||||
adb = None
|
||||
aapt = None
|
||||
aapt_version = None
|
||||
fastboot = None
|
||||
|
||||
|
||||
@@ -132,6 +144,7 @@ class ApkInfo(object):
|
||||
version_regex = re.compile(r"name='(?P<name>[^']+)' versionCode='(?P<vcode>[^']+)' versionName='(?P<vname>[^']+)'")
|
||||
name_regex = re.compile(r"name='(?P<name>[^']+)'")
|
||||
permission_regex = re.compile(r"name='(?P<permission>[^']+)'")
|
||||
activity_regex = re.compile(r'\s*A:\s*android:name\(0x\d+\)=".(?P<name>\w+)"')
|
||||
|
||||
def __init__(self, path=None):
|
||||
self.path = path
|
||||
@@ -142,20 +155,16 @@ class ApkInfo(object):
|
||||
self.version_code = None
|
||||
self.native_code = None
|
||||
self.permissions = []
|
||||
self.parse(path)
|
||||
self._apk_path = None
|
||||
self._activities = None
|
||||
self._methods = None
|
||||
if path:
|
||||
self.parse(path)
|
||||
|
||||
# pylint: disable=too-many-branches
|
||||
def parse(self, apk_path):
|
||||
_check_env()
|
||||
command = [aapt, 'dump', 'badging', apk_path]
|
||||
logger.debug(' '.join(command))
|
||||
try:
|
||||
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
|
||||
if sys.version_info[0] == 3:
|
||||
output = output.decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise HostError('Error parsing APK file {}. `aapt` says:\n{}'
|
||||
.format(apk_path, e.output))
|
||||
output = self._run([aapt, 'dump', 'badging', apk_path])
|
||||
for line in output.split('\n'):
|
||||
if line.startswith('application-label:'):
|
||||
self.label = line.split(':')[1].strip().replace('\'', '')
|
||||
@@ -188,19 +197,187 @@ class ApkInfo(object):
|
||||
else:
|
||||
pass # not interested
|
||||
|
||||
self._apk_path = apk_path
|
||||
self._activities = None
|
||||
self._methods = None
|
||||
|
||||
class AdbConnection(object):
|
||||
@property
|
||||
def activities(self):
|
||||
if self._activities is None:
|
||||
cmd = [aapt, 'dump', 'xmltree', self._apk_path]
|
||||
if aapt_version == 2:
|
||||
cmd += ['--file']
|
||||
cmd += ['AndroidManifest.xml']
|
||||
matched_activities = self.activity_regex.finditer(self._run(cmd))
|
||||
self._activities = [m.group('name') for m in matched_activities]
|
||||
return self._activities
|
||||
|
||||
@property
|
||||
def methods(self):
|
||||
if self._methods is None:
|
||||
# Only try to extract once
|
||||
self._methods = []
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
with zipfile.ZipFile(self._apk_path, 'r') as z:
|
||||
try:
|
||||
extracted = z.extract('classes.dex', tmp_dir)
|
||||
except KeyError:
|
||||
return []
|
||||
dexdump = os.path.join(os.path.dirname(aapt), 'dexdump')
|
||||
command = [dexdump, '-l', 'xml', extracted]
|
||||
dump = self._run(command)
|
||||
|
||||
xml_tree = xml.etree.ElementTree.fromstring(dump)
|
||||
|
||||
package = next((i for i in xml_tree.iter('package')
|
||||
if i.attrib['name'] == self.package), None)
|
||||
|
||||
self._methods = [(meth.attrib['name'], klass.attrib['name'])
|
||||
for klass in package.iter('class')
|
||||
for meth in klass.iter('method')] if package else []
|
||||
return self._methods
|
||||
|
||||
def _run(self, command):
|
||||
logger.debug(' '.join(command))
|
||||
try:
|
||||
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
|
||||
if sys.version_info[0] == 3:
|
||||
output = output.decode(sys.stdout.encoding or 'utf-8', 'replace')
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise HostError('Error while running "{}":\n{}'
|
||||
.format(command, e.output))
|
||||
return output
|
||||
|
||||
|
||||
class AdbConnection(ConnectionBase):
|
||||
|
||||
# maintains the count of parallel active connections to a device, so that
|
||||
# adb disconnect is not invoked untill all connections are closed
|
||||
active_connections = defaultdict(int)
|
||||
# Track connected as root status per device
|
||||
_connected_as_root = defaultdict(lambda: None)
|
||||
default_timeout = 10
|
||||
ls_command = 'ls'
|
||||
su_cmd = 'su -c {}'
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.device
|
||||
|
||||
@property
|
||||
def connected_as_root(self):
|
||||
if self._connected_as_root[self.device] is None:
|
||||
result = self.execute('id')
|
||||
self._connected_as_root[self.device] = 'uid=0(' in result
|
||||
return self._connected_as_root[self.device]
|
||||
|
||||
@connected_as_root.setter
|
||||
def connected_as_root(self, state):
|
||||
self._connected_as_root[self.device] = state
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def __init__(self, device=None, timeout=None, platform=None, adb_server=None,
|
||||
adb_as_root=False, connection_attempts=MAX_ATTEMPTS,
|
||||
poll_transfers=False,
|
||||
start_transfer_poll_delay=30,
|
||||
total_transfer_timeout=3600,
|
||||
transfer_poll_period=30,):
|
||||
super().__init__()
|
||||
self.timeout = timeout if timeout is not None else self.default_timeout
|
||||
if device is None:
|
||||
device = adb_get_device(timeout=timeout, adb_server=adb_server)
|
||||
self.device = device
|
||||
self.adb_server = adb_server
|
||||
self.adb_as_root = adb_as_root
|
||||
self.poll_transfers = poll_transfers
|
||||
if poll_transfers:
|
||||
transfer_opts = {'start_transfer_poll_delay': start_transfer_poll_delay,
|
||||
'total_timeout': total_transfer_timeout,
|
||||
'poll_period': transfer_poll_period,
|
||||
}
|
||||
self.transfer_mgr = PopenTransferManager(self, **transfer_opts) if poll_transfers else None
|
||||
if self.adb_as_root:
|
||||
self.adb_root(enable=True)
|
||||
adb_connect(self.device, adb_server=self.adb_server, attempts=connection_attempts)
|
||||
AdbConnection.active_connections[self.device] += 1
|
||||
self._setup_ls()
|
||||
self._setup_su()
|
||||
|
||||
def push(self, sources, dest, timeout=None):
|
||||
return self._push_pull('push', sources, dest, timeout)
|
||||
|
||||
def pull(self, sources, dest, timeout=None):
|
||||
return self._push_pull('pull', sources, dest, timeout)
|
||||
|
||||
def _push_pull(self, action, sources, dest, timeout):
|
||||
paths = sources + [dest]
|
||||
|
||||
# Quote twice to avoid expansion by host shell, then ADB globbing
|
||||
do_quote = lambda x: quote(glob.escape(x))
|
||||
paths = ' '.join(map(do_quote, paths))
|
||||
|
||||
command = "{} {}".format(action, paths)
|
||||
if timeout or not self.poll_transfers:
|
||||
adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
else:
|
||||
with self.transfer_mgr.manage(sources, dest, action):
|
||||
bg_cmd = adb_command_background(self.device, command, adb_server=self.adb_server)
|
||||
self.transfer_mgr.set_transfer_and_wait(bg_cmd)
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def execute(self, command, timeout=None, check_exit_code=False,
|
||||
as_root=False, strip_colors=True, will_succeed=False):
|
||||
try:
|
||||
return adb_shell(self.device, command, timeout, check_exit_code,
|
||||
as_root, adb_server=self.adb_server, su_cmd=self.su_cmd)
|
||||
except TargetStableError as e:
|
||||
if will_succeed:
|
||||
raise TargetTransientError(e)
|
||||
else:
|
||||
raise
|
||||
|
||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
|
||||
bg_cmd = self._background(command, stdout, stderr, as_root)
|
||||
self._current_bg_cmds.add(bg_cmd)
|
||||
return bg_cmd
|
||||
|
||||
def _background(self, command, stdout, stderr, as_root):
|
||||
adb_shell, pid = adb_background_shell(self, command, stdout, stderr, as_root)
|
||||
bg_cmd = AdbBackgroundCommand(
|
||||
conn=self,
|
||||
adb_popen=adb_shell,
|
||||
pid=pid,
|
||||
as_root=as_root
|
||||
)
|
||||
return bg_cmd
|
||||
|
||||
def _close(self):
|
||||
AdbConnection.active_connections[self.device] -= 1
|
||||
if AdbConnection.active_connections[self.device] <= 0:
|
||||
if self.adb_as_root:
|
||||
self.adb_root(enable=False)
|
||||
adb_disconnect(self.device, self.adb_server)
|
||||
del AdbConnection.active_connections[self.device]
|
||||
|
||||
def cancel_running_command(self):
|
||||
# adbd multiplexes commands so that they don't interfer with each
|
||||
# other, so there is no need to explicitly cancel a running command
|
||||
# before the next one can be issued.
|
||||
pass
|
||||
|
||||
def adb_root(self, enable=True):
|
||||
cmd = 'root' if enable else 'unroot'
|
||||
output = adb_command(self.device, cmd, timeout=30, adb_server=self.adb_server)
|
||||
if 'cannot run as root in production builds' in output:
|
||||
raise TargetStableError(output)
|
||||
AdbConnection._connected_as_root[self.device] = enable
|
||||
|
||||
def wait_for_device(self, timeout=30):
|
||||
adb_command(self.device, 'wait-for-device', timeout, self.adb_server)
|
||||
|
||||
def reboot_bootloader(self, timeout=30):
|
||||
adb_command(self.device, 'reboot-bootloader', timeout, self.adb_server)
|
||||
|
||||
# Again, we need to handle boards where the default output format from ls is
|
||||
# single column *and* boards where the default output is multi-column.
|
||||
# We need to do this purely because the '-1' option causes errors on older
|
||||
@@ -221,66 +398,16 @@ class AdbConnection(object):
|
||||
self.ls_command = 'ls'
|
||||
logger.debug("ls command is set to {}".format(self.ls_command))
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def __init__(self, device=None, timeout=None, platform=None, adb_server=None):
|
||||
self.timeout = timeout if timeout is not None else self.default_timeout
|
||||
if device is None:
|
||||
device = adb_get_device(timeout=timeout, adb_server=adb_server)
|
||||
self.device = device
|
||||
self.adb_server = adb_server
|
||||
adb_connect(self.device)
|
||||
AdbConnection.active_connections[self.device] += 1
|
||||
self._setup_ls()
|
||||
|
||||
def push(self, source, dest, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
command = "push {} {}".format(quote(source), quote(dest))
|
||||
if not os.path.exists(source):
|
||||
raise HostError('No such file "{}"'.format(source))
|
||||
return adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
|
||||
def pull(self, source, dest, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
# Pull all files matching a wildcard expression
|
||||
if os.path.isdir(dest) and \
|
||||
('*' in source or '?' in source):
|
||||
command = 'shell {} {}'.format(self.ls_command, source)
|
||||
output = adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
for line in output.splitlines():
|
||||
command = "pull {} {}".format(quote(line.strip()), quote(dest))
|
||||
adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
return
|
||||
command = "pull {} {}".format(quote(source), quote(dest))
|
||||
return adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def execute(self, command, timeout=None, check_exit_code=False,
|
||||
as_root=False, strip_colors=True, will_succeed=False):
|
||||
def _setup_su(self):
|
||||
try:
|
||||
return adb_shell(self.device, command, timeout, check_exit_code,
|
||||
as_root, adb_server=self.adb_server)
|
||||
except TargetStableError as e:
|
||||
if will_succeed:
|
||||
raise TargetTransientError(e)
|
||||
else:
|
||||
raise
|
||||
|
||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
|
||||
return adb_background_shell(self.device, command, stdout, stderr, as_root)
|
||||
|
||||
def close(self):
|
||||
AdbConnection.active_connections[self.device] -= 1
|
||||
if AdbConnection.active_connections[self.device] <= 0:
|
||||
adb_disconnect(self.device)
|
||||
del AdbConnection.active_connections[self.device]
|
||||
|
||||
def cancel_running_command(self):
|
||||
# adbd multiplexes commands so that they don't interfer with each
|
||||
# other, so there is no need to explicitly cancel a running command
|
||||
# before the next one can be issued.
|
||||
pass
|
||||
# Try the new style of invoking `su`
|
||||
self.execute('ls', timeout=self.timeout, as_root=True,
|
||||
check_exit_code=True)
|
||||
# If failure assume either old style or unrooted. Here we will assume
|
||||
# old style and root status will be verified later.
|
||||
except (TargetStableError, TargetTransientError, TimeoutError):
|
||||
self.su_cmd = 'echo {} | su'
|
||||
logger.debug("su command is set to {}".format(quote(self.su_cmd)))
|
||||
|
||||
|
||||
def fastboot_command(command, timeout=None, device=None):
|
||||
@@ -333,7 +460,7 @@ def adb_get_device(timeout=None, adb_server=None):
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def adb_connect(device, timeout=None, attempts=MAX_ATTEMPTS):
|
||||
def adb_connect(device, timeout=None, attempts=MAX_ATTEMPTS, adb_server=None):
|
||||
_check_env()
|
||||
tries = 0
|
||||
output = None
|
||||
@@ -341,10 +468,17 @@ def adb_connect(device, timeout=None, attempts=MAX_ATTEMPTS):
|
||||
tries += 1
|
||||
if device:
|
||||
if "." in device: # Connect is required only for ADB-over-IP
|
||||
command = 'adb connect {}'.format(quote(device))
|
||||
# ADB does not automatically remove a network device from it's
|
||||
# devices list when the connection is broken by the remote, so the
|
||||
# adb connection may have gone "stale", resulting in adb blocking
|
||||
# indefinitely when making calls to the device. To avoid this,
|
||||
# always disconnect first.
|
||||
adb_disconnect(device, adb_server)
|
||||
adb_cmd = get_adb_command(None, 'connect', adb_server)
|
||||
command = '{} {}'.format(adb_cmd, quote(device))
|
||||
logger.debug(command)
|
||||
output, _ = check_output(command, shell=True, timeout=timeout)
|
||||
if _ping(device):
|
||||
if _ping(device, adb_server):
|
||||
break
|
||||
time.sleep(10)
|
||||
else: # did not connect to the device
|
||||
@@ -354,22 +488,23 @@ def adb_connect(device, timeout=None, attempts=MAX_ATTEMPTS):
|
||||
raise HostError(message)
|
||||
|
||||
|
||||
def adb_disconnect(device):
|
||||
def adb_disconnect(device, adb_server=None):
|
||||
_check_env()
|
||||
if not device:
|
||||
return
|
||||
if ":" in device and device in adb_list_devices():
|
||||
command = "adb disconnect " + device
|
||||
if ":" in device and device in adb_list_devices(adb_server):
|
||||
adb_cmd = get_adb_command(None, 'disconnect', adb_server)
|
||||
command = "{} {}".format(adb_cmd, device)
|
||||
logger.debug(command)
|
||||
retval = subprocess.call(command, stdout=open(os.devnull, 'wb'), shell=True)
|
||||
if retval:
|
||||
raise TargetTransientError('"{}" returned {}'.format(command, retval))
|
||||
|
||||
|
||||
def _ping(device):
|
||||
def _ping(device, adb_server=None):
|
||||
_check_env()
|
||||
device_string = ' -s {}'.format(quote(device)) if device else ''
|
||||
command = "adb{} shell \"ls /data/local/tmp > /dev/null\"".format(device_string)
|
||||
adb_cmd = get_adb_command(device, 'shell', adb_server)
|
||||
command = "{} {}".format(adb_cmd, quote('ls /data/local/tmp > /dev/null'))
|
||||
logger.debug(command)
|
||||
result = subprocess.call(command, stderr=subprocess.PIPE, shell=True)
|
||||
if not result: # pylint: disable=simplifiable-if-statement
|
||||
@@ -380,25 +515,27 @@ def _ping(device):
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
def adb_shell(device, command, timeout=None, check_exit_code=False,
|
||||
as_root=False, adb_server=None): # NOQA
|
||||
as_root=False, adb_server=None, su_cmd='su -c {}'): # NOQA
|
||||
_check_env()
|
||||
if as_root:
|
||||
command = 'echo {} | su'.format(quote(command))
|
||||
device_part = []
|
||||
if adb_server:
|
||||
device_part = ['-H', adb_server]
|
||||
device_part += ['-s', device] if device else []
|
||||
|
||||
# On older combinations of ADB/Android versions, the adb host command always
|
||||
# exits with 0 if it was able to run the command on the target, even if the
|
||||
# command failed (https://code.google.com/p/android/issues/detail?id=3254).
|
||||
# Homogenise this behaviour by running the command then echoing the exit
|
||||
# code.
|
||||
adb_shell_command = '({}); echo \"\n$?\"'.format(command)
|
||||
actual_command = ['adb'] + device_part + ['shell', adb_shell_command]
|
||||
logger.debug('adb {} shell {}'.format(' '.join(device_part), command))
|
||||
# code of the executed command itself.
|
||||
command = r'({}); echo "\n$?"'.format(command)
|
||||
|
||||
parts = ['adb']
|
||||
if adb_server is not None:
|
||||
parts += ['-H', adb_server]
|
||||
if device is not None:
|
||||
parts += ['-s', device]
|
||||
parts += ['shell',
|
||||
command if not as_root else su_cmd.format(quote(command))]
|
||||
|
||||
logger.debug(' '.join(quote(part) for part in parts))
|
||||
try:
|
||||
raw_output, _ = check_output(actual_command, timeout, shell=False, combined_output=True)
|
||||
raw_output, error = check_output(parts, timeout, shell=False)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise TargetStableError(str(e))
|
||||
|
||||
@@ -418,8 +555,8 @@ def adb_shell(device, command, timeout=None, check_exit_code=False,
|
||||
if exit_code.isdigit():
|
||||
if int(exit_code):
|
||||
message = ('Got exit code {}\nfrom target command: {}\n'
|
||||
'OUTPUT: {}')
|
||||
raise TargetStableError(message.format(exit_code, command, output))
|
||||
'OUTPUT: {}\nSTDERR: {}\n')
|
||||
raise TargetStableError(message.format(exit_code, command, output, error))
|
||||
elif re_search:
|
||||
message = 'Could not start activity; got the following:\n{}'
|
||||
raise TargetStableError(message.format(re_search[0]))
|
||||
@@ -430,25 +567,50 @@ def adb_shell(device, command, timeout=None, check_exit_code=False,
|
||||
else:
|
||||
message = 'adb has returned early; did not get an exit code. '\
|
||||
'Was kill-server invoked?\nOUTPUT:\n-----\n{}\n'\
|
||||
'-----'
|
||||
raise TargetTransientError(message.format(raw_output))
|
||||
'-----\nSTDERR:\n-----\n{}\n-----'
|
||||
raise TargetTransientError(message.format(raw_output, error))
|
||||
|
||||
return output
|
||||
return output + error
|
||||
|
||||
|
||||
def adb_background_shell(device, command,
|
||||
def adb_background_shell(conn, command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
as_root=False):
|
||||
"""Runs the sepcified command in a subprocess, returning the the Popen object."""
|
||||
device = conn.device
|
||||
adb_server = conn.adb_server
|
||||
|
||||
_check_env()
|
||||
stdout, stderr, command = redirect_streams(stdout, stderr, command)
|
||||
if as_root:
|
||||
command = 'echo {} | su'.format(quote(command))
|
||||
device_string = ' -s {}'.format(device) if device else ''
|
||||
full_command = 'adb{} shell {}'.format(device_string, quote(command))
|
||||
logger.debug(full_command)
|
||||
return subprocess.Popen(full_command, stdout=stdout, stderr=stderr, shell=True)
|
||||
|
||||
# Attach a unique UUID to the command line so it can be looked for without
|
||||
# any ambiguity with ps
|
||||
uuid_ = uuid.uuid4().hex
|
||||
uuid_var = 'BACKGROUND_COMMAND_UUID={}'.format(uuid_)
|
||||
command = "{} sh -c {}".format(uuid_var, quote(command))
|
||||
|
||||
adb_cmd = get_adb_command(device, 'shell', adb_server)
|
||||
full_command = '{} {}'.format(adb_cmd, quote(command))
|
||||
logger.debug(full_command)
|
||||
p = subprocess.Popen(full_command, stdout=stdout, stderr=stderr, shell=True)
|
||||
|
||||
# Out of band PID lookup, to avoid conflicting needs with stdout redirection
|
||||
find_pid = 'ps -A -o pid,args | grep {}'.format(quote(uuid_var))
|
||||
ps_out = conn.execute(find_pid)
|
||||
pids = [
|
||||
int(line.strip().split(' ', 1)[0])
|
||||
for line in ps_out.splitlines()
|
||||
]
|
||||
# The line we are looking for is the first one, since it was started before
|
||||
# any look up command
|
||||
pid = sorted(pids)[0]
|
||||
return (p, pid)
|
||||
|
||||
def adb_kill_server(timeout=30, adb_server=None):
|
||||
adb_command(None, 'kill-server', timeout, adb_server)
|
||||
|
||||
def adb_list_devices(adb_server=None):
|
||||
output = adb_command(None, 'devices', adb_server=adb_server)
|
||||
@@ -468,12 +630,22 @@ def get_adb_command(device, command, adb_server=None):
|
||||
device_string += ' -s {}'.format(device) if device else ''
|
||||
return "adb{} {}".format(device_string, command)
|
||||
|
||||
|
||||
def adb_command(device, command, timeout=None, adb_server=None):
|
||||
full_command = get_adb_command(device, command, adb_server)
|
||||
logger.debug(full_command)
|
||||
output, _ = check_output(full_command, timeout, shell=True)
|
||||
return output
|
||||
|
||||
|
||||
def adb_command_background(device, command, adb_server=None):
|
||||
full_command = get_adb_command(device, command, adb_server)
|
||||
logger.debug(full_command)
|
||||
proc = get_subprocess(full_command, shell=True)
|
||||
cmd = PopenBackgroundCommand(proc)
|
||||
return cmd
|
||||
|
||||
|
||||
def grant_app_permissions(target, package):
|
||||
"""
|
||||
Grant an app all the permissions it may ask for
|
||||
@@ -481,7 +653,7 @@ def grant_app_permissions(target, package):
|
||||
dumpsys = target.execute('dumpsys package {}'.format(package))
|
||||
|
||||
permissions = re.search(
|
||||
'requested permissions:\s*(?P<permissions>(android.permission.+\s*)+)', dumpsys
|
||||
r'requested permissions:\s*(?P<permissions>(android.permission.+\s*)+)', dumpsys
|
||||
)
|
||||
if permissions is None:
|
||||
return
|
||||
@@ -501,8 +673,10 @@ class _AndroidEnvironment(object):
|
||||
def __init__(self):
|
||||
self.android_home = None
|
||||
self.platform_tools = None
|
||||
self.build_tools = None
|
||||
self.adb = None
|
||||
self.aapt = None
|
||||
self.aapt_version = None
|
||||
self.fastboot = None
|
||||
|
||||
|
||||
@@ -528,28 +702,73 @@ def _initialize_without_android_home(env):
|
||||
_init_common(env)
|
||||
return env
|
||||
|
||||
|
||||
def _init_common(env):
|
||||
_discover_build_tools(env)
|
||||
_discover_aapt(env)
|
||||
|
||||
def _discover_build_tools(env):
|
||||
logger.debug('ANDROID_HOME: {}'.format(env.android_home))
|
||||
build_tools_directory = os.path.join(env.android_home, 'build-tools')
|
||||
if not os.path.isdir(build_tools_directory):
|
||||
msg = '''ANDROID_HOME ({}) does not appear to have valid Android SDK install
|
||||
(cannot find build-tools)'''
|
||||
raise HostError(msg.format(env.android_home))
|
||||
versions = os.listdir(build_tools_directory)
|
||||
for version in reversed(sorted(versions)):
|
||||
aapt_path = os.path.join(build_tools_directory, version, 'aapt')
|
||||
if os.path.isfile(aapt_path):
|
||||
logger.debug('Using aapt for version {}'.format(version))
|
||||
env.aapt = aapt_path
|
||||
break
|
||||
else:
|
||||
raise HostError('aapt not found. Please make sure at least one Android '
|
||||
'platform is installed.')
|
||||
if os.path.isdir(build_tools_directory):
|
||||
env.build_tools = build_tools_directory
|
||||
|
||||
def _check_supported_aapt2(binary):
|
||||
# At time of writing the version argument of aapt2 is not helpful as
|
||||
# the output is only a placeholder that does not distinguish between versions
|
||||
# with and without support for badging. Unfortunately aapt has been
|
||||
# deprecated and fails to parse some valid apks so we will try to favour
|
||||
# aapt2 if possible else will fall back to aapt.
|
||||
# Try to execute the badging command and check if we get an expected error
|
||||
# message as opposed to an unknown command error to determine if we have a
|
||||
# suitable version.
|
||||
cmd = '{} dump badging'.format(binary)
|
||||
result = subprocess.run(cmd.encode('utf-8'), shell=True, stderr=subprocess.PIPE)
|
||||
supported = bool(AAPT_BADGING_OUTPUT.search(result.stderr.decode('utf-8')))
|
||||
msg = 'Found a {} aapt2 binary at: {}'
|
||||
logger.debug(msg.format('supported' if supported else 'unsupported', binary))
|
||||
return supported
|
||||
|
||||
def _discover_aapt(env):
|
||||
if env.build_tools:
|
||||
aapt_path = ''
|
||||
aapt2_path = ''
|
||||
versions = os.listdir(env.build_tools)
|
||||
for version in reversed(sorted(versions)):
|
||||
if not aapt2_path and not os.path.isfile(aapt2_path):
|
||||
aapt2_path = os.path.join(env.build_tools, version, 'aapt2')
|
||||
if not aapt_path and not os.path.isfile(aapt_path):
|
||||
aapt_path = os.path.join(env.build_tools, version, 'aapt')
|
||||
aapt_version = 1
|
||||
break
|
||||
|
||||
# Use aapt2 only if present and we have a suitable version
|
||||
if aapt2_path and _check_supported_aapt2(aapt2_path):
|
||||
aapt_path = aapt2_path
|
||||
aapt_version = 2
|
||||
|
||||
# Use the aapt version discoverted from build tools.
|
||||
if aapt_path:
|
||||
logger.debug('Using {} for version {}'.format(aapt_path, version))
|
||||
env.aapt = aapt_path
|
||||
env.aapt_version = aapt_version
|
||||
return
|
||||
|
||||
# Try detecting aapt2 and aapt from PATH
|
||||
if not env.aapt:
|
||||
aapt2_path = which('aapt2')
|
||||
if _check_supported_aapt2(aapt2_path):
|
||||
env.aapt = aapt2_path
|
||||
env.aapt_version = 2
|
||||
else:
|
||||
env.aapt = which('aapt')
|
||||
env.aapt_version = 1
|
||||
|
||||
if not env.aapt:
|
||||
raise HostError('aapt/aapt2 not found. Please make sure it is avaliable in PATH'
|
||||
' or at least one Android platform is installed')
|
||||
|
||||
def _check_env():
|
||||
global android_home, platform_tools, adb, aapt # pylint: disable=W0603
|
||||
global android_home, platform_tools, adb, aapt, aapt_version # pylint: disable=W0603
|
||||
if not android_home:
|
||||
android_home = os.getenv('ANDROID_HOME')
|
||||
if android_home:
|
||||
@@ -560,6 +779,7 @@ def _check_env():
|
||||
platform_tools = _env.platform_tools
|
||||
adb = _env.adb
|
||||
aapt = _env.aapt
|
||||
aapt_version = _env.aapt_version
|
||||
|
||||
class LogcatMonitor(object):
|
||||
"""
|
||||
@@ -578,11 +798,12 @@ class LogcatMonitor(object):
|
||||
def logfile(self):
|
||||
return self._logfile
|
||||
|
||||
def __init__(self, target, regexps=None):
|
||||
def __init__(self, target, regexps=None, logcat_format=None):
|
||||
super(LogcatMonitor, self).__init__()
|
||||
|
||||
self.target = target
|
||||
self._regexps = regexps
|
||||
self._logcat_format = logcat_format
|
||||
self._logcat = None
|
||||
self._logfile = None
|
||||
|
||||
@@ -596,7 +817,7 @@ class LogcatMonitor(object):
|
||||
if outfile:
|
||||
self._logfile = open(outfile, 'w')
|
||||
else:
|
||||
self._logfile = tempfile.NamedTemporaryFile()
|
||||
self._logfile = tempfile.NamedTemporaryFile(mode='w')
|
||||
|
||||
self.target.clear_logcat()
|
||||
|
||||
@@ -614,12 +835,16 @@ class LogcatMonitor(object):
|
||||
else:
|
||||
logcat_cmd = '{} | grep {}'.format(logcat_cmd, quote(regexp))
|
||||
|
||||
logcat_cmd = get_adb_command(self.target.conn.device, logcat_cmd)
|
||||
if self._logcat_format:
|
||||
logcat_cmd = "{} -v {}".format(logcat_cmd, quote(self._logcat_format))
|
||||
|
||||
logcat_cmd = get_adb_command(self.target.conn.device, logcat_cmd, self.target.adb_server)
|
||||
|
||||
logger.debug('logcat command ="{}"'.format(logcat_cmd))
|
||||
self._logcat = pexpect.spawn(logcat_cmd, logfile=self._logfile)
|
||||
self._logcat = pexpect.spawn(logcat_cmd, logfile=self._logfile, encoding='utf-8')
|
||||
|
||||
def stop(self):
|
||||
self.flush_log()
|
||||
self._logcat.terminate()
|
||||
self._logfile.close()
|
||||
|
||||
@@ -627,6 +852,12 @@ class LogcatMonitor(object):
|
||||
"""
|
||||
Return the list of lines found by the monitor
|
||||
"""
|
||||
self.flush_log()
|
||||
|
||||
with open(self._logfile.name) as fh:
|
||||
return [line for line in fh]
|
||||
|
||||
def flush_log(self):
|
||||
# Unless we tell pexect to 'expect' something, it won't read from
|
||||
# logcat's buffer or write into our logfile. We'll need to force it to
|
||||
# read any pending logcat output.
|
||||
@@ -657,9 +888,6 @@ class LogcatMonitor(object):
|
||||
# printed anything since pexpect last read from its buffer.
|
||||
break
|
||||
|
||||
with open(self._logfile.name) as fh:
|
||||
return [line for line in fh]
|
||||
|
||||
def clear_log(self):
|
||||
with open(self._logfile.name, 'w') as _:
|
||||
pass
|
||||
|
@@ -18,7 +18,7 @@ import logging
|
||||
from devlib.utils.types import numeric
|
||||
|
||||
|
||||
GEM5STATS_FIELD_REGEX = re.compile("^(?P<key>[^- ]\S*) +(?P<value>[^#]+).+$")
|
||||
GEM5STATS_FIELD_REGEX = re.compile(r"^(?P<key>[^- ]\S*) +(?P<value>[^#]+).+$")
|
||||
GEM5STATS_DUMP_HEAD = '---------- Begin Simulation Statistics ----------'
|
||||
GEM5STATS_DUMP_TAIL = '---------- End Simulation Statistics ----------'
|
||||
GEM5STATS_ROI_NUMBER = 8
|
||||
|
@@ -19,11 +19,14 @@ Miscellaneous functions that don't fit anywhere else.
|
||||
|
||||
"""
|
||||
from __future__ import division
|
||||
from functools import partial, reduce
|
||||
from contextlib import contextmanager
|
||||
from functools import partial, reduce, wraps
|
||||
from itertools import groupby
|
||||
from operator import itemgetter
|
||||
from weakref import WeakKeyDictionary, WeakSet
|
||||
|
||||
import ctypes
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
import pkgutil
|
||||
@@ -38,6 +41,16 @@ import wrapt
|
||||
import warnings
|
||||
|
||||
|
||||
try:
|
||||
from contextlib import ExitStack
|
||||
except AttributeError:
|
||||
from contextlib2 import ExitStack
|
||||
|
||||
try:
|
||||
from shlex import quote
|
||||
except ImportError:
|
||||
from pipes import quote
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
# pylint: disable=redefined-builtin
|
||||
@@ -129,9 +142,6 @@ def get_cpu_name(implementer, part, variant):
|
||||
|
||||
|
||||
def preexec_function():
|
||||
# Ignore the SIGINT signal by setting the handler to the standard
|
||||
# signal handler SIG_IGN.
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
# Change process group in case we have to kill the subprocess and all of
|
||||
# its children later.
|
||||
# TODO: this is Unix-specific; would be good to find an OS-agnostic way
|
||||
@@ -145,10 +155,22 @@ check_output_logger = logging.getLogger('check_output')
|
||||
check_output_lock = threading.Lock()
|
||||
|
||||
|
||||
def check_output(command, timeout=None, ignore=None, inputtext=None,
|
||||
combined_output=False, **kwargs):
|
||||
"""This is a version of subprocess.check_output that adds a timeout parameter to kill
|
||||
the subprocess if it does not return within the specified time."""
|
||||
def get_subprocess(command, **kwargs):
|
||||
if 'stdout' in kwargs:
|
||||
raise ValueError('stdout argument not allowed, it will be overridden.')
|
||||
with check_output_lock:
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
preexec_fn=preexec_function,
|
||||
**kwargs)
|
||||
return process
|
||||
|
||||
|
||||
def check_subprocess_output(process, timeout=None, ignore=None, inputtext=None):
|
||||
output = None
|
||||
error = None
|
||||
# pylint: disable=too-many-branches
|
||||
if ignore is None:
|
||||
ignore = []
|
||||
@@ -157,49 +179,35 @@ def check_output(command, timeout=None, ignore=None, inputtext=None,
|
||||
elif not isinstance(ignore, list) and ignore != 'all':
|
||||
message = 'Invalid value for ignore parameter: "{}"; must be an int or a list'
|
||||
raise ValueError(message.format(ignore))
|
||||
if 'stdout' in kwargs:
|
||||
raise ValueError('stdout argument not allowed, it will be overridden.')
|
||||
|
||||
def callback(pid):
|
||||
try:
|
||||
check_output_logger.debug('{} timed out; sending SIGKILL'.format(pid))
|
||||
os.killpg(pid, signal.SIGKILL)
|
||||
except OSError:
|
||||
pass # process may have already terminated.
|
||||
|
||||
with check_output_lock:
|
||||
stderr = subprocess.STDOUT if combined_output else subprocess.PIPE
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=stderr,
|
||||
stdin=subprocess.PIPE,
|
||||
preexec_fn=preexec_function,
|
||||
**kwargs)
|
||||
|
||||
if timeout:
|
||||
timer = threading.Timer(timeout, callback, [process.pid, ])
|
||||
timer.start()
|
||||
|
||||
try:
|
||||
output, error = process.communicate(inputtext)
|
||||
if sys.version_info[0] == 3:
|
||||
# Currently errors=replace is needed as 0x8c throws an error
|
||||
output = output.decode(sys.stdout.encoding or 'utf-8', "replace")
|
||||
if error:
|
||||
error = error.decode(sys.stderr.encoding or 'utf-8', "replace")
|
||||
finally:
|
||||
if timeout:
|
||||
timer.cancel()
|
||||
output, error = process.communicate(inputtext, timeout=timeout)
|
||||
except subprocess.TimeoutExpired as e:
|
||||
timeout_expired = e
|
||||
else:
|
||||
timeout_expired = None
|
||||
|
||||
# Currently errors=replace is needed as 0x8c throws an error
|
||||
output = output.decode(sys.stdout.encoding or 'utf-8', "replace") if output else ''
|
||||
error = error.decode(sys.stderr.encoding or 'utf-8', "replace") if error else ''
|
||||
|
||||
if timeout_expired:
|
||||
raise TimeoutError(process.args, output='\n'.join([output, error]))
|
||||
|
||||
retcode = process.poll()
|
||||
if retcode:
|
||||
if retcode == -9: # killed, assume due to timeout callback
|
||||
raise TimeoutError(command, output='\n'.join([output or '', error or '']))
|
||||
elif ignore != 'all' and retcode not in ignore:
|
||||
raise subprocess.CalledProcessError(retcode, command, output='\n'.join([output or '', error or '']))
|
||||
if retcode and ignore != 'all' and retcode not in ignore:
|
||||
raise subprocess.CalledProcessError(retcode, process.args, output='\n'.join([output, error]))
|
||||
|
||||
return output, error
|
||||
|
||||
|
||||
def check_output(command, timeout=None, ignore=None, inputtext=None, **kwargs):
|
||||
"""This is a version of subprocess.check_output that adds a timeout parameter to kill
|
||||
the subprocess if it does not return within the specified time."""
|
||||
process = get_subprocess(command, **kwargs)
|
||||
return check_subprocess_output(process, timeout=timeout, ignore=ignore, inputtext=inputtext)
|
||||
|
||||
|
||||
def walk_modules(path):
|
||||
"""
|
||||
Given package name, return a list of all modules (including submodules, etc)
|
||||
@@ -237,6 +245,32 @@ def walk_modules(path):
|
||||
mods.append(submod)
|
||||
return mods
|
||||
|
||||
def redirect_streams(stdout, stderr, command):
|
||||
"""
|
||||
Update a command to redirect a given stream to /dev/null if it's
|
||||
``subprocess.DEVNULL``.
|
||||
|
||||
:return: A tuple (stdout, stderr, command) with stream set to ``subprocess.PIPE``
|
||||
if the `stream` parameter was set to ``subprocess.DEVNULL``.
|
||||
"""
|
||||
def redirect(stream, redirection):
|
||||
if stream == subprocess.DEVNULL:
|
||||
suffix = '{}/dev/null'.format(redirection)
|
||||
elif stream == subprocess.STDOUT:
|
||||
suffix = '{}&1'.format(redirection)
|
||||
# Indicate that there is nothing to monitor for stderr anymore
|
||||
# since it's merged into stdout
|
||||
stream = subprocess.DEVNULL
|
||||
else:
|
||||
suffix = ''
|
||||
|
||||
return (stream, suffix)
|
||||
|
||||
stdout, suffix1 = redirect(stdout, '>')
|
||||
stderr, suffix2 = redirect(stderr, '2>')
|
||||
|
||||
command = 'sh -c {} {} {}'.format(quote(command), suffix1, suffix2)
|
||||
return (stdout, stderr, command)
|
||||
|
||||
def ensure_directory_exists(dirpath):
|
||||
"""A filter for directory paths to ensure they exist."""
|
||||
@@ -461,7 +495,7 @@ def escape_spaces(text):
|
||||
|
||||
.. note:: :func:`pipes.quote` should be favored where possible.
|
||||
"""
|
||||
return text.replace(' ', '\ ')
|
||||
return text.replace(' ', '\\ ')
|
||||
|
||||
|
||||
def getch(count=1):
|
||||
@@ -695,3 +729,200 @@ def memoized(wrapped, instance, args, kwargs): # pylint: disable=unused-argumen
|
||||
return __memo_cache[id_string]
|
||||
|
||||
return memoize_wrapper(*args, **kwargs)
|
||||
|
||||
@contextmanager
|
||||
def batch_contextmanager(f, kwargs_list):
|
||||
"""
|
||||
Return a context manager that will call the ``f`` callable with the keyword
|
||||
arguments dict in the given list, in one go.
|
||||
|
||||
:param f: Callable expected to return a context manager.
|
||||
|
||||
:param kwargs_list: list of kwargs dictionaries to be used to call ``f``.
|
||||
:type kwargs_list: list(dict)
|
||||
"""
|
||||
with ExitStack() as stack:
|
||||
for kwargs in kwargs_list:
|
||||
stack.enter_context(f(**kwargs))
|
||||
yield
|
||||
|
||||
|
||||
@contextmanager
|
||||
def nullcontext(enter_result=None):
|
||||
"""
|
||||
Backport of Python 3.7 ``contextlib.nullcontext``
|
||||
|
||||
This context manager does nothing, so it can be used as a default
|
||||
placeholder for code that needs to select at runtime what context manager
|
||||
to use.
|
||||
|
||||
:param enter_result: Object that will be bound to the target of the with
|
||||
statement, or `None` if nothing is specified.
|
||||
:type enter_result: object
|
||||
"""
|
||||
yield enter_result
|
||||
|
||||
|
||||
class tls_property:
|
||||
"""
|
||||
Use it like `property` decorator, but the result will be memoized per
|
||||
thread. When the owning thread dies, the values for that thread will be
|
||||
destroyed.
|
||||
|
||||
In order to get the values, it's necessary to call the object
|
||||
given by the property. This is necessary in order to be able to add methods
|
||||
to that object, like :meth:`_BoundTLSProperty.get_all_values`.
|
||||
|
||||
Values can be set and deleted as well, which will be a thread-local set.
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.factory.__name__
|
||||
|
||||
def __init__(self, factory):
|
||||
self.factory = factory
|
||||
# Lock accesses to shared WeakKeyDictionary and WeakSet
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def __get__(self, instance, owner=None):
|
||||
return _BoundTLSProperty(self, instance, owner)
|
||||
|
||||
def _get_value(self, instance, owner):
|
||||
tls, values = self._get_tls(instance)
|
||||
try:
|
||||
return tls.value
|
||||
except AttributeError:
|
||||
# Bind the method to `instance`
|
||||
f = self.factory.__get__(instance, owner)
|
||||
obj = f()
|
||||
tls.value = obj
|
||||
# Since that's a WeakSet, values will be removed automatically once
|
||||
# the threading.local variable that holds them is destroyed
|
||||
with self.lock:
|
||||
values.add(obj)
|
||||
return obj
|
||||
|
||||
def _get_all_values(self, instance, owner):
|
||||
with self.lock:
|
||||
# Grab a reference to all the objects at the time of the call by
|
||||
# using a regular set
|
||||
tls, values = self._get_tls(instance=instance)
|
||||
return set(values)
|
||||
|
||||
def __set__(self, instance, value):
|
||||
tls, values = self._get_tls(instance)
|
||||
tls.value = value
|
||||
with self.lock:
|
||||
values.add(value)
|
||||
|
||||
def __delete__(self, instance):
|
||||
tls, values = self._get_tls(instance)
|
||||
with self.lock:
|
||||
values.discard(tls.value)
|
||||
del tls.value
|
||||
|
||||
def _get_tls(self, instance):
|
||||
dct = instance.__dict__
|
||||
name = self.name
|
||||
try:
|
||||
# Using instance.__dict__[self.name] is safe as
|
||||
# getattr(instance, name) will return the property instead, as
|
||||
# the property is a descriptor
|
||||
tls = dct[name]
|
||||
except KeyError:
|
||||
with self.lock:
|
||||
# Double check after taking the lock to avoid a race
|
||||
if name not in dct:
|
||||
tls = (threading.local(), WeakSet())
|
||||
dct[name] = tls
|
||||
|
||||
return tls
|
||||
|
||||
@property
|
||||
def basic_property(self):
|
||||
"""
|
||||
Return a basic property that can be used to access the TLS value
|
||||
without having to call it first.
|
||||
|
||||
The drawback is that it's not possible to do anything over than
|
||||
getting/setting/deleting.
|
||||
"""
|
||||
def getter(instance, owner=None):
|
||||
prop = self.__get__(instance, owner)
|
||||
return prop()
|
||||
|
||||
return property(getter, self.__set__, self.__delete__)
|
||||
|
||||
class _BoundTLSProperty:
|
||||
"""
|
||||
Simple proxy object to allow either calling it to get the TLS value, or get
|
||||
some other informations by calling methods.
|
||||
"""
|
||||
def __init__(self, tls_property, instance, owner):
|
||||
self.tls_property = tls_property
|
||||
self.instance = instance
|
||||
self.owner = owner
|
||||
|
||||
def __call__(self):
|
||||
return self.tls_property._get_value(
|
||||
instance=self.instance,
|
||||
owner=self.owner,
|
||||
)
|
||||
|
||||
def get_all_values(self):
|
||||
"""
|
||||
Returns all the thread-local values currently in use in the process for
|
||||
that property for that instance.
|
||||
"""
|
||||
return self.tls_property._get_all_values(
|
||||
instance=self.instance,
|
||||
owner=self.owner,
|
||||
)
|
||||
|
||||
|
||||
class InitCheckpointMeta(type):
|
||||
"""
|
||||
Metaclass providing an ``initialized`` boolean attributes on instances.
|
||||
|
||||
``initialized`` is set to ``True`` once the ``__init__`` constructor has
|
||||
returned. It will deal cleanly with nested calls to ``super().__init__``.
|
||||
"""
|
||||
def __new__(metacls, name, bases, dct, **kwargs):
|
||||
cls = super().__new__(metacls, name, bases, dct, **kwargs)
|
||||
init_f = cls.__init__
|
||||
|
||||
@wraps(init_f)
|
||||
def init_wrapper(self, *args, **kwargs):
|
||||
self.initialized = False
|
||||
|
||||
# Track the nesting of super()__init__ to set initialized=True only
|
||||
# when the outer level is finished
|
||||
try:
|
||||
stack = self._init_stack
|
||||
except AttributeError:
|
||||
stack = []
|
||||
self._init_stack = stack
|
||||
|
||||
stack.append(init_f)
|
||||
try:
|
||||
x = init_f(self, *args, **kwargs)
|
||||
finally:
|
||||
stack.pop()
|
||||
|
||||
if not stack:
|
||||
self.initialized = True
|
||||
del self._init_stack
|
||||
|
||||
return x
|
||||
|
||||
cls.__init__ = init_wrapper
|
||||
|
||||
return cls
|
||||
|
||||
|
||||
class InitCheckpoint(metaclass=InitCheckpointMeta):
|
||||
"""
|
||||
Inherit from this class to set the :class:`InitCheckpointMeta` metaclass.
|
||||
"""
|
||||
pass
|
||||
|
@@ -49,12 +49,12 @@ class FrameCollector(threading.Thread):
|
||||
self.refresh_period = None
|
||||
self.drop_threshold = None
|
||||
self.unresponsive_count = 0
|
||||
self.last_ready_time = None
|
||||
self.last_ready_time = 0
|
||||
self.exc = None
|
||||
self.header = None
|
||||
|
||||
def run(self):
|
||||
logger.debug('Surface flinger frame data collection started.')
|
||||
logger.debug('Frame data collection started.')
|
||||
try:
|
||||
self.stop_signal.clear()
|
||||
fd, self.temp_file = tempfile.mkstemp()
|
||||
@@ -71,7 +71,7 @@ class FrameCollector(threading.Thread):
|
||||
except Exception as e: # pylint: disable=W0703
|
||||
logger.warning('Exception on collector thread: {}({})'.format(e.__class__.__name__, e))
|
||||
self.exc = WorkerThreadError(self.name, sys.exc_info())
|
||||
logger.debug('Surface flinger frame data collection stopped.')
|
||||
logger.debug('Frame data collection stopped.')
|
||||
|
||||
def stop(self):
|
||||
self.stop_signal.set()
|
||||
@@ -133,7 +133,7 @@ class SurfaceFlingerFrameCollector(FrameCollector):
|
||||
def collect_frames(self, wfh):
|
||||
for activity in self.list():
|
||||
if activity == self.view:
|
||||
wfh.write(self.get_latencies(activity))
|
||||
wfh.write(self.get_latencies(activity).encode('utf-8'))
|
||||
|
||||
def clear(self):
|
||||
self.target.execute('dumpsys SurfaceFlinger --latency-clear ')
|
||||
@@ -147,32 +147,44 @@ class SurfaceFlingerFrameCollector(FrameCollector):
|
||||
return text.replace('\r\n', '\n').replace('\r', '\n').split('\n')
|
||||
|
||||
def _process_raw_file(self, fh):
|
||||
found = False
|
||||
text = fh.read().replace('\r\n', '\n').replace('\r', '\n')
|
||||
for line in text.split('\n'):
|
||||
line = line.strip()
|
||||
if line:
|
||||
self._process_trace_line(line)
|
||||
if not line:
|
||||
continue
|
||||
if 'SurfaceFlinger appears to be unresponsive, dumping anyways' in line:
|
||||
self.unresponsive_count += 1
|
||||
continue
|
||||
parts = line.split()
|
||||
# We only want numerical data, ignore textual data.
|
||||
try:
|
||||
parts = list(map(int, parts))
|
||||
except ValueError:
|
||||
continue
|
||||
found = True
|
||||
self._process_trace_parts(parts)
|
||||
if not found:
|
||||
logger.warning('Could not find expected SurfaceFlinger output.')
|
||||
|
||||
def _process_trace_line(self, line):
|
||||
parts = line.split()
|
||||
def _process_trace_parts(self, parts):
|
||||
if len(parts) == 3:
|
||||
frame = SurfaceFlingerFrame(*list(map(int, parts)))
|
||||
frame = SurfaceFlingerFrame(*parts)
|
||||
if not frame.frame_ready_time:
|
||||
return # "null" frame
|
||||
if frame.frame_ready_time <= self.last_ready_time:
|
||||
return # duplicate frame
|
||||
if (frame.frame_ready_time - frame.desired_present_time) > self.drop_threshold:
|
||||
logger.debug('Dropping bogus frame {}.'.format(line))
|
||||
logger.debug('Dropping bogus frame {}.'.format(' '.join(map(str, parts))))
|
||||
return # bogus data
|
||||
self.last_ready_time = frame.frame_ready_time
|
||||
self.frames.append(frame)
|
||||
elif len(parts) == 1:
|
||||
self.refresh_period = int(parts[0])
|
||||
self.refresh_period = parts[0]
|
||||
self.drop_threshold = self.refresh_period * 1000
|
||||
elif 'SurfaceFlinger appears to be unresponsive, dumping anyways' in line:
|
||||
self.unresponsive_count += 1
|
||||
else:
|
||||
logger.warning('Unexpected SurfaceFlinger dump output: {}'.format(line))
|
||||
msg = 'Unexpected SurfaceFlinger dump output: {}'.format(' '.join(map(str, parts)))
|
||||
logger.warning(msg)
|
||||
|
||||
|
||||
def read_gfxinfo_columns(target):
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -15,8 +15,23 @@
|
||||
|
||||
import os
|
||||
import sys
|
||||
from collections import namedtuple
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
|
||||
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision', 'dev'])
|
||||
|
||||
version = VersionTuple(1, 3, 0, '')
|
||||
|
||||
|
||||
def get_devlib_version():
|
||||
version_string = '{}.{}.{}'.format(
|
||||
version.major, version.minor, version.revision)
|
||||
if version.dev:
|
||||
version_string += '.{}'.format(version.dev)
|
||||
return version_string
|
||||
|
||||
|
||||
def get_commit():
|
||||
p = Popen(['git', 'rev-parse', 'HEAD'], cwd=os.path.dirname(__file__),
|
||||
stdout=PIPE, stderr=PIPE)
|
||||
|
153
doc/collectors.rst
Normal file
153
doc/collectors.rst
Normal file
@@ -0,0 +1,153 @@
|
||||
.. _collector:
|
||||
|
||||
Collectors
|
||||
==========
|
||||
|
||||
The ``Collector`` API provide a consistent way of collecting arbitrary data from
|
||||
a target. Data is collected via an instance of a class derived from
|
||||
:class:`CollectorBase`.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
The following example shows how to use a collector to read the logcat output
|
||||
from an Android target.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
# import and instantiate the Target and the collector
|
||||
# (note: this assumes exactly one android target connected
|
||||
# to the host machine).
|
||||
In [1]: from devlib import AndroidTarget, LogcatCollector
|
||||
|
||||
In [2]: t = AndroidTarget()
|
||||
|
||||
# Set up the collector on the Target.
|
||||
|
||||
In [3]: collector = LogcatCollector(t)
|
||||
|
||||
# Configure the output file path for the collector to use.
|
||||
In [4]: collector.set_output('adb_log.txt')
|
||||
|
||||
# Reset the Collector to preform any required configuration or preparation.
|
||||
In [5]: collector.reset()
|
||||
|
||||
# Start Collecting
|
||||
In [6]: collector.start()
|
||||
|
||||
# Wait for some output to be generated
|
||||
In [7]: sleep(10)
|
||||
|
||||
# Stop Collecting
|
||||
In [8]: collector.stop()
|
||||
|
||||
# Retrieved the collected data
|
||||
In [9]: output = collector.get_data()
|
||||
|
||||
# Display the returned ``CollectorOutput`` Object.
|
||||
In [10]: output
|
||||
Out[10]: [<adb_log.txt (file)>]
|
||||
|
||||
In [11] log_file = output[0]
|
||||
|
||||
# Get the path kind of the the returned CollectorOutputEntry.
|
||||
In [12]: log_file.path_kind
|
||||
Out[12]: 'file'
|
||||
|
||||
# Get the path of the returned CollectorOutputEntry.
|
||||
In [13]: log_file.path
|
||||
Out[13]: 'adb_log.txt'
|
||||
|
||||
# Find the full path to the log file.
|
||||
In [14]: os.path.join(os.getcwd(), logfile)
|
||||
Out[14]: '/tmp/adb_log.txt'
|
||||
|
||||
|
||||
API
|
||||
---
|
||||
.. collector:
|
||||
|
||||
.. module:: devlib.collector
|
||||
|
||||
|
||||
CollectorBase
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
.. class:: CollectorBase(target, \*\*kwargs)
|
||||
|
||||
A ``CollectorBase`` is the the base class and API that should be
|
||||
implemented to allowing collecting various data from a traget e.g. traces,
|
||||
logs etc.
|
||||
|
||||
.. method:: Collector.setup(\*args, \*\*kwargs)
|
||||
|
||||
This will set up the collector on the target. Parameters this method takes
|
||||
are particular to subclasses (see documentation for specific collectors
|
||||
below). What actions are performed by this method are also
|
||||
collector-specific. Usually these will be things like installing
|
||||
executables, starting services, deploying assets, etc. Typically, this method
|
||||
needs to be invoked at most once per reboot of the target (unless
|
||||
``teardown()`` has been called), but see documentation for the collector
|
||||
you're interested in.
|
||||
|
||||
.. method:: CollectorBase.reset()
|
||||
|
||||
This can be used to configure a collector for collection. This must be invoked
|
||||
before ``start()`` is called to begin collection.
|
||||
|
||||
.. method:: CollectorBase.start()
|
||||
|
||||
Starts collecting from the target.
|
||||
|
||||
.. method:: CollectorBase.stop()
|
||||
|
||||
Stops collecting from target. Must be called after
|
||||
:func:`start()`.
|
||||
|
||||
|
||||
.. method:: CollectorBase.set_output(output_path)
|
||||
|
||||
Configure the output path for the particular collector. This will be either
|
||||
a directory or file path which will be used when storing the data. Please see
|
||||
the individual Collector documentation for more information.
|
||||
|
||||
|
||||
.. method:: CollectorBase.get_data()
|
||||
|
||||
The collected data will be return via the previously specified output_path.
|
||||
This method will return a ``CollectorOutput`` object which is a subclassed
|
||||
list object containing individual ``CollectorOutputEntry`` objects with details
|
||||
about the individual output entry.
|
||||
|
||||
|
||||
CollectorOutputEntry
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This object is designed to allow for the output of a collector to be processed
|
||||
generically. The object will behave as a regular string containing the path to
|
||||
underlying output path and can be used directly in ``os.path`` operations.
|
||||
|
||||
.. attribute:: CollectorOutputEntry.path
|
||||
|
||||
The file path for the corresponding output item.
|
||||
|
||||
.. attribute:: CollectorOutputEntry.path_kind
|
||||
|
||||
The type of output the is specified in the ``path`` attribute. Current valid
|
||||
kinds are: ``file`` and ``directory``.
|
||||
|
||||
.. method:: CollectorOutputEntry.__init__(path, path_kind)
|
||||
|
||||
Initialises a ``CollectorOutputEntry`` object with the desired file path and
|
||||
kind of file path specified.
|
||||
|
||||
|
||||
.. collectors:
|
||||
|
||||
Available Collectors
|
||||
---------------------
|
||||
|
||||
This section lists collectors that are currently part of devlib.
|
||||
|
||||
.. todo:: Add collectors
|
@@ -3,16 +3,17 @@ Connection
|
||||
|
||||
A :class:`Connection` abstracts an actual physical connection to a device. The
|
||||
first connection is created when :func:`Target.connect` method is called. If a
|
||||
:class:`Target` is used in a multi-threaded environment, it will maintain a
|
||||
connection for each thread in which it is invoked. This allows the same target
|
||||
object to be used in parallel in multiple threads.
|
||||
:class:`~devlib.target.Target` is used in a multi-threaded environment, it will
|
||||
maintain a connection for each thread in which it is invoked. This allows
|
||||
the same target object to be used in parallel in multiple threads.
|
||||
|
||||
:class:`Connection`\ s will be automatically created and managed by
|
||||
:class:`Target`\ s, so there is usually no reason to create one manually.
|
||||
Instead, configuration for a :class:`Connection` is passed as
|
||||
`connection_settings` parameter when creating a :class:`Target`. The connection
|
||||
to be used target is also specified on instantiation by `conn_cls` parameter,
|
||||
though all concrete :class:`Target` implementations will set an appropriate
|
||||
:class:`~devlib.target.Target`\ s, so there is usually no reason to create one
|
||||
manually. Instead, configuration for a :class:`Connection` is passed as
|
||||
`connection_settings` parameter when creating a
|
||||
:class:`~devlib.target.Target`. The connection to be used target is also
|
||||
specified on instantiation by `conn_cls` parameter, though all concrete
|
||||
:class:`~devlib.target.Target` implementations will set an appropriate
|
||||
default, so there is typically no need to specify this explicitly.
|
||||
|
||||
:class:`Connection` classes are not a part of an inheritance hierarchy, i.e.
|
||||
@@ -20,25 +21,25 @@ they do not derive from a common base. Instead, a :class:`Connection` is any
|
||||
class that implements the following methods.
|
||||
|
||||
|
||||
.. method:: push(self, source, dest, timeout=None)
|
||||
.. method:: push(self, sources, dest, timeout=None)
|
||||
|
||||
Transfer a file from the host machine to the connected device.
|
||||
Transfer a list of files from the host machine to the connected device.
|
||||
|
||||
:param source: path of to the file on the host
|
||||
:param dest: path of to the file on the connected device.
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
:param sources: list of paths on the host
|
||||
:param dest: path to the file or folder on the connected device.
|
||||
:param timeout: timeout (in seconds) for the transfer of each file; if the
|
||||
transfer does not complete within this period, an exception will be
|
||||
raised.
|
||||
|
||||
.. method:: pull(self, source, dest, timeout=None)
|
||||
.. method:: pull(self, sources, dest, timeout=None)
|
||||
|
||||
Transfer a file, or files matching a glob pattern, from the connected device
|
||||
to the host machine.
|
||||
Transfer a list of files from the connected device to the host machine.
|
||||
|
||||
:param source: path of to the file on the connected device. If ``dest`` is a
|
||||
directory, may be a glob pattern.
|
||||
:param dest: path of to the file on the host
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
:param sources: list of paths on the connected device.
|
||||
:param dest: path to the file or folder on the host
|
||||
:param timeout: timeout (in seconds) for the transfer for each file; if the
|
||||
transfer does not complete within this period, an exception will be
|
||||
raised.
|
||||
|
||||
.. method:: execute(self, command, timeout=None, check_exit_code=False, as_root=False, strip_colors=True, will_succeed=False)
|
||||
|
||||
@@ -58,7 +59,7 @@ class that implements the following methods.
|
||||
:param will_succeed: The command is assumed to always succeed, unless there is
|
||||
an issue in the environment like the loss of network connectivity. That
|
||||
will make the method always raise an instance of a subclass of
|
||||
:class:`DevlibTransientError' when the command fails, instead of a
|
||||
:class:`DevlibTransientError` when the command fails, instead of a
|
||||
:class:`DevlibStableError`.
|
||||
|
||||
.. method:: background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False)
|
||||
@@ -76,7 +77,7 @@ class that implements the following methods.
|
||||
|
||||
.. note:: This **will block the connection** until the command completes.
|
||||
|
||||
.. note:: The above methods are directly wrapped by :class:`Target` methods,
|
||||
.. note:: The above methods are directly wrapped by :class:`~devlib.target.Target` methods,
|
||||
however note that some of the defaults are different.
|
||||
|
||||
.. method:: cancel_running_command(self)
|
||||
@@ -100,7 +101,12 @@ class that implements the following methods.
|
||||
Connection Types
|
||||
----------------
|
||||
|
||||
.. class:: AdbConnection(device=None, timeout=None)
|
||||
|
||||
.. module:: devlib.utils.android
|
||||
|
||||
.. class:: AdbConnection(device=None, timeout=None, adb_server=None, adb_as_root=False, connection_attempts=MAX_ATTEMPTS,\
|
||||
poll_transfers=False, start_transfer_poll_delay=30, total_transfer_timeout=3600,\
|
||||
transfer_poll_period=30)
|
||||
|
||||
A connection to an android device via ``adb`` (Android Debug Bridge).
|
||||
``adb`` is part of the Android SDK (though stand-alone versions are also
|
||||
@@ -113,10 +119,37 @@ Connection Types
|
||||
:param timeout: Connection timeout in seconds. If a connection to the device
|
||||
is not established within this period, :class:`HostError`
|
||||
is raised.
|
||||
:param adb_server: Allows specifying the address of the adb server to use.
|
||||
:param adb_as_root: Specify whether the adb server should be restarted in root mode.
|
||||
:param connection_attempts: Specify how many connection attempts, 10 seconds
|
||||
apart, should be attempted to connect to the device.
|
||||
Defaults to 5.
|
||||
:param poll_transfers: Specify whether file transfers should be polled. Polling
|
||||
monitors the progress of file transfers and periodically
|
||||
checks whether they have stalled, attempting to cancel
|
||||
the transfers prematurely if so.
|
||||
:param start_transfer_poll_delay: If transfers are polled, specify the length of
|
||||
time after a transfer has started before polling
|
||||
should start.
|
||||
:param total_transfer_timeout: If transfers are polled, specify the total amount of time
|
||||
to elapse before the transfer is cancelled, regardless
|
||||
of its activity.
|
||||
:param transfer_poll_period: If transfers are polled, specify the period at which
|
||||
the transfers are sampled for activity. Too small values
|
||||
may cause the destination size to appear the same over
|
||||
one or more sample periods, causing improper transfer
|
||||
cancellation.
|
||||
|
||||
|
||||
.. class:: SshConnection(host, username, password=None, keyfile=None, port=None,\
|
||||
timeout=None, password_prompt=None)
|
||||
|
||||
.. module:: devlib.utils.ssh
|
||||
|
||||
.. class:: SshConnection(host, username, password=None, keyfile=None, port=22,\
|
||||
timeout=None, platform=None, \
|
||||
sudo_cmd="sudo -- sh -c {}", strict_host_check=True, \
|
||||
use_scp=False, poll_transfers=False,
|
||||
start_transfer_poll_delay=30, total_transfer_timeout=3600,\
|
||||
transfer_poll_period=30)
|
||||
|
||||
A connection to a device on the network over SSH.
|
||||
|
||||
@@ -124,6 +157,9 @@ Connection Types
|
||||
:param username: username for SSH login
|
||||
:param password: password for the SSH connection
|
||||
|
||||
.. note:: To connect to a system without a password this
|
||||
parameter should be set to an empty string otherwise
|
||||
ssh key authentication will be attempted.
|
||||
.. note:: In order to user password-based authentication,
|
||||
``sshpass`` utility must be installed on the
|
||||
system.
|
||||
@@ -138,10 +174,26 @@ Connection Types
|
||||
:param timeout: Timeout for the connection in seconds. If a connection
|
||||
cannot be established within this time, an error will be
|
||||
raised.
|
||||
:param password_prompt: A string with the password prompt used by
|
||||
``sshpass``. Set this if your version of ``sshpass``
|
||||
uses something other than ``"[sudo] password"``.
|
||||
|
||||
:param platform: Specify the platform to be used. The generic :class:`~devlib.platform.Platform`
|
||||
class is used by default.
|
||||
:param sudo_cmd: Specify the format of the command used to grant sudo access.
|
||||
:param strict_host_check: Specify the ssh connection parameter ``StrictHostKeyChecking``,
|
||||
:param use_scp: Use SCP for file transfers, defaults to SFTP.
|
||||
:param poll_transfers: Specify whether file transfers should be polled. Polling
|
||||
monitors the progress of file transfers and periodically
|
||||
checks whether they have stalled, attempting to cancel
|
||||
the transfers prematurely if so.
|
||||
:param start_transfer_poll_delay: If transfers are polled, specify the length of
|
||||
time after a transfer has started before polling
|
||||
should start.
|
||||
:param total_transfer_timeout: If transfers are polled, specify the total amount of time
|
||||
to elapse before the transfer is cancelled, regardless
|
||||
of its activity.
|
||||
:param transfer_poll_period: If transfers are polled, specify the period at which
|
||||
the transfers are sampled for activity. Too small values
|
||||
may cause the destination size to appear the same over
|
||||
one or more sample periods, causing improper transfer
|
||||
cancellation.
|
||||
|
||||
.. class:: TelnetConnection(host, username, password=None, port=None,\
|
||||
timeout=None, password_prompt=None,\
|
||||
@@ -174,6 +226,7 @@ Connection Types
|
||||
connection to reduce the possibility of clashes).
|
||||
This parameter is ignored for SSH connections.
|
||||
|
||||
.. module:: devlib.host
|
||||
|
||||
.. class:: LocalConnection(keep_password=True, unrooted=False, password=None)
|
||||
|
||||
@@ -189,6 +242,9 @@ Connection Types
|
||||
prompting for it.
|
||||
|
||||
|
||||
.. module:: devlib.utils.ssh
|
||||
:noindex:
|
||||
|
||||
.. class:: Gem5Connection(platform, host=None, username=None, password=None,\
|
||||
timeout=None, password_prompt=None,\
|
||||
original_prompt=None)
|
||||
@@ -197,7 +253,7 @@ Connection Types
|
||||
|
||||
.. note:: Some of the following input parameters are optional and will be ignored during
|
||||
initialisation. They were kept to keep the analogy with a :class:`TelnetConnection`
|
||||
(i.e. ``host``, `username``, ``password``, ``port``,
|
||||
(i.e. ``host``, ``username``, ``password``, ``port``,
|
||||
``password_prompt`` and ``original_promp``)
|
||||
|
||||
|
||||
@@ -207,7 +263,7 @@ Connection Types
|
||||
will be ignored, the gem5 simulation needs to be
|
||||
on the same host the user is currently on, so if
|
||||
the host given as input parameter is not the
|
||||
same as the actual host, a ``TargetStableError``
|
||||
same as the actual host, a :class:`TargetStableError`
|
||||
will be raised to prevent confusion.
|
||||
|
||||
:param username: Username in the simulated system
|
||||
@@ -233,14 +289,14 @@ The only methods discussed below are those that will be overwritten by the
|
||||
|
||||
A connection to a gem5 simulation that emulates a Linux system.
|
||||
|
||||
.. method:: _login_to_device(self)
|
||||
.. method:: _login_to_device(self)
|
||||
|
||||
Login to the gem5 simulated system.
|
||||
Login to the gem5 simulated system.
|
||||
|
||||
.. class:: AndroidGem5Connection
|
||||
|
||||
A connection to a gem5 simulation that emulates an Android system.
|
||||
|
||||
.. method:: _wait_for_boot(self)
|
||||
.. method:: _wait_for_boot(self)
|
||||
|
||||
Wait for the gem5 simulated system to have booted and finished the booting animation.
|
||||
Wait for the gem5 simulated system to have booted and finished the booting animation.
|
||||
|
@@ -1,7 +1,6 @@
|
||||
Derived Measurements
|
||||
=====================
|
||||
|
||||
|
||||
The ``DerivedMeasurements`` API provides a consistent way of performing post
|
||||
processing on a provided :class:`MeasurementCsv` file.
|
||||
|
||||
@@ -35,6 +34,8 @@ API
|
||||
Derived Measurements
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. module:: devlib.derived
|
||||
|
||||
.. class:: DerivedMeasurements
|
||||
|
||||
The ``DerivedMeasurements`` class provides an API for post-processing
|
||||
@@ -102,17 +103,20 @@ Available Derived Measurements
|
||||
Energy
|
||||
~~~~~~
|
||||
|
||||
.. module:: devlib.derived.energy
|
||||
|
||||
.. class:: DerivedEnergyMeasurements
|
||||
|
||||
The ``DerivedEnergyMeasurements`` class is used to calculate average power and
|
||||
cumulative energy for each site if the required data is present.
|
||||
The ``DerivedEnergyMeasurements`` class is used to calculate average power
|
||||
and cumulative energy for each site if the required data is present.
|
||||
|
||||
The calculation of cumulative energy can occur in 3 ways. If a
|
||||
``site`` contains ``energy`` results, the first and last measurements are extracted
|
||||
and the delta calculated. If not, a ``timestamp`` channel will be used to calculate
|
||||
the energy from the power channel, failing back to using the sample rate attribute
|
||||
of the :class:`MeasurementCsv` file if timestamps are not available. If neither
|
||||
timestamps or a sample rate are available then an error will be raised.
|
||||
The calculation of cumulative energy can occur in 3 ways. If a ``site``
|
||||
contains ``energy`` results, the first and last measurements are extracted
|
||||
and the delta calculated. If not, a ``timestamp`` channel will be used to
|
||||
calculate the energy from the power channel, failing back to using the sample
|
||||
rate attribute of the :class:`MeasurementCsv` file if timestamps are not
|
||||
available. If neither timestamps or a sample rate are available then an error
|
||||
will be raised.
|
||||
|
||||
|
||||
.. method:: DerivedEnergyMeasurements.process(measurement_csv)
|
||||
@@ -128,6 +132,8 @@ Energy
|
||||
FPS / Rendering
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. module:: devlib.derived.fps
|
||||
|
||||
.. class:: DerivedGfxInfoStats(drop_threshold=5, suffix='-fps', filename=None, outdir=None)
|
||||
|
||||
Produces FPS (frames-per-second) and other derived statistics from
|
||||
|
@@ -3,6 +3,8 @@
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
.. module:: devlib
|
||||
|
||||
Welcome to devlib documentation
|
||||
===============================
|
||||
|
||||
@@ -19,6 +21,7 @@ Contents:
|
||||
target
|
||||
modules
|
||||
instrumentation
|
||||
collectors
|
||||
derived_measurements
|
||||
platform
|
||||
connection
|
||||
|
@@ -1,11 +1,13 @@
|
||||
.. _instrumentation:
|
||||
|
||||
Instrumentation
|
||||
===============
|
||||
|
||||
The ``Instrument`` API provide a consistent way of collecting measurements from
|
||||
a target. Measurements are collected via an instance of a class derived from
|
||||
:class:`Instrument`. An ``Instrument`` allows collection of measurement from one
|
||||
or more channels. An ``Instrument`` may support ``INSTANTANEOUS`` or
|
||||
``CONTINUOUS`` collection, or both.
|
||||
:class:`~devlib.instrument.Instrument`. An ``Instrument`` allows collection of
|
||||
measurement from one or more channels. An ``Instrument`` may support
|
||||
``INSTANTANEOUS`` or ``CONTINUOUS`` collection, or both.
|
||||
|
||||
Example
|
||||
-------
|
||||
@@ -48,6 +50,8 @@ Android target.
|
||||
API
|
||||
---
|
||||
|
||||
.. module:: devlib.instrument
|
||||
|
||||
Instrument
|
||||
~~~~~~~~~~
|
||||
|
||||
@@ -120,14 +124,16 @@ Instrument
|
||||
Take a single measurement from ``active_channels``. Returns a list of
|
||||
:class:`Measurement` objects (one for each active channel).
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
.. note:: This method is only implemented by
|
||||
:class:`~devlib.instrument.Instrument`\ s that
|
||||
support ``INSTANTANEOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.start()
|
||||
|
||||
Starts collecting measurements from ``active_channels``.
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
.. note:: This method is only implemented by
|
||||
:class:`~devlib.instrument.Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.stop()
|
||||
@@ -135,7 +141,8 @@ Instrument
|
||||
Stops collecting measurements from ``active_channels``. Must be called after
|
||||
:func:`start()`.
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
.. note:: This method is only implemented by
|
||||
:class:`~devlib.instrument.Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.get_data(outfile)
|
||||
@@ -146,9 +153,9 @@ Instrument
|
||||
``<site>_<kind>`` (see :class:`InstrumentChannel`). The order of the columns
|
||||
will be the same as the order of channels in ``Instrument.active_channels``.
|
||||
|
||||
If reporting timestamps, one channel must have a ``site`` named ``"timestamp"``
|
||||
and a ``kind`` of a :class:`MeasurmentType` of an appropriate time unit which will
|
||||
be used, if appropriate, during any post processing.
|
||||
If reporting timestamps, one channel must have a ``site`` named
|
||||
``"timestamp"`` and a ``kind`` of a :class:`MeasurmentType` of an appropriate
|
||||
time unit which will be used, if appropriate, during any post processing.
|
||||
|
||||
.. note:: Currently supported time units are seconds, milliseconds and
|
||||
microseconds, other units can also be used if an appropriate
|
||||
@@ -158,21 +165,34 @@ Instrument
|
||||
that can be used to stream :class:`Measurement`\ s lists (similar to what is
|
||||
returned by ``take_measurement()``.
|
||||
|
||||
.. note:: This method is only implemented by :class:`Instrument`\ s that
|
||||
.. note:: This method is only implemented by
|
||||
:class:`~devlib.instrument.Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
.. method:: Instrument.get_raw()
|
||||
|
||||
Returns a list of paths to files containing raw output from the underlying
|
||||
source(s) that is used to produce the data CSV. If now raw output is
|
||||
source(s) that is used to produce the data CSV. If no raw output is
|
||||
generated or saved, an empty list will be returned. The format of the
|
||||
contents of the raw files is entirely source-dependent.
|
||||
|
||||
.. note:: This method is not guaranteed to return valid filepaths after the
|
||||
:meth:`teardown` method has been invoked as the raw files may have
|
||||
been deleted. Please ensure that copies are created manually
|
||||
prior to calling :meth:`teardown` if the files are to be retained.
|
||||
|
||||
.. method:: Instrument.teardown()
|
||||
|
||||
Performs any required clean up of the instrument. This usually includes
|
||||
removing temporary and raw files (if ``keep_raw`` is set to ``False`` on relevant
|
||||
instruments), stopping services etc.
|
||||
|
||||
.. attribute:: Instrument.sample_rate_hz
|
||||
|
||||
Sample rate of the instrument in Hz. Assumed to be the same for all channels.
|
||||
|
||||
.. note:: This attribute is only provided by :class:`Instrument`\ s that
|
||||
.. note:: This attribute is only provided by
|
||||
:class:`~devlib.instrument.Instrument`\ s that
|
||||
support ``CONTINUOUS`` measurement.
|
||||
|
||||
Instrument Channel
|
||||
@@ -181,8 +201,8 @@ Instrument Channel
|
||||
.. class:: InstrumentChannel(name, site, measurement_type, \*\*attrs)
|
||||
|
||||
An :class:`InstrumentChannel` describes a single type of measurement that may
|
||||
be collected by an :class:`Instrument`. A channel is primarily defined by a
|
||||
``site`` and a ``measurement_type``.
|
||||
be collected by an :class:`~devlib.instrument.Instrument`. A channel is
|
||||
primarily defined by a ``site`` and a ``measurement_type``.
|
||||
|
||||
A ``site`` indicates where on the target a measurement is collected from
|
||||
(e.g. a voltage rail or location of a sensor).
|
||||
@@ -400,7 +420,7 @@ For reference, the software stack on the host is roughly given by:
|
||||
|
||||
Ethernet was the only IIO Interface used and tested during the development of
|
||||
this instrument. However,
|
||||
`USB seems to be supported<https://gitlab.com/baylibre-acme/ACME/issues/2>`_.
|
||||
`USB seems to be supported <https://gitlab.com/baylibre-acme/ACME/issues/2>`_.
|
||||
The IIO library also provides "Local" and "XML" connections but these are to be
|
||||
used when the IIO devices are directly connected to the host *i.e.* in our
|
||||
case, if we were to run Python and devlib on the BBB. These are also untested.
|
||||
@@ -475,12 +495,13 @@ voltage (see previous figure), samples are retrieved at a frequency of
|
||||
|
||||
where :math:`T_X` is the integration time for the :math:`X` voltage.
|
||||
|
||||
As described below (:meth:`BaylibreAcmeInstrument.reset`), the integration
|
||||
times for the bus and shunt voltage can be set separately which allows a
|
||||
tradeoff of accuracy between signals. This is particularly useful as the shunt
|
||||
voltage returned by the INA226 has a higher resolution than the bus voltage
|
||||
(2.5 μV and 1.25 mV LSB, respectively) and therefore would benefit more from a
|
||||
longer integration time.
|
||||
As described below (:meth:`BaylibreAcmeInstrument.reset
|
||||
<devlib.instrument.baylibre_acme.BaylibreAcmeInstrument.reset>`), the
|
||||
integration times for the bus and shunt voltage can be set separately which
|
||||
allows a tradeoff of accuracy between signals. This is particularly useful as
|
||||
the shunt voltage returned by the INA226 has a higher resolution than the bus
|
||||
voltage (2.5 μV and 1.25 mV LSB, respectively) and therefore would benefit more
|
||||
from a longer integration time.
|
||||
|
||||
As an illustration, consider the following sampled sine wave and notice how
|
||||
increasing the integration time (of the bus voltage in this case) "smoothes"
|
||||
@@ -588,8 +609,9 @@ Buffer-based transactions
|
||||
|
||||
Samples made available by the INA226 are retrieved by the BBB and stored in a
|
||||
buffer which is sent back to the host once it is full (see
|
||||
``buffer_samples_count`` in :meth:`BaylibreAcmeInstrument.setup` for setting
|
||||
its size). Therefore, the larger the buffer is, the longer it takes to be
|
||||
``buffer_samples_count`` in :meth:`BaylibreAcmeInstrument.setup
|
||||
<devlib.instrument.baylibre_acme.BaylibreAcmeInstrument.setup>` for setting its
|
||||
size). Therefore, the larger the buffer is, the longer it takes to be
|
||||
transmitted back but the less often it has to be transmitted. To illustrate
|
||||
this, consider the following graphs showing the time difference between
|
||||
successive samples in a retrieved signal when the size of the buffer changes:
|
||||
@@ -611,6 +633,8 @@ given by `libiio (the Linux IIO interface)`_ however only the network-based one
|
||||
has been tested. For the other classes, please refer to the official IIO
|
||||
documentation for the meaning of their constructor parameters.
|
||||
|
||||
.. module:: devlib.instrument.baylibre_acme
|
||||
|
||||
.. class:: BaylibreAcmeInstrument(target=None, iio_context=None, use_base_iio_context=False, probe_names=None)
|
||||
|
||||
Base class wrapper for the ACME instrument which itself is a wrapper for the
|
||||
|
@@ -1,11 +1,13 @@
|
||||
.. module:: devlib.module
|
||||
|
||||
.. _modules:
|
||||
|
||||
Modules
|
||||
=======
|
||||
|
||||
Modules add additional functionality to the core :class:`Target` interface.
|
||||
Usually, it is support for specific subsystems on the target. Modules are
|
||||
instantiated as attributes of the :class:`Target` instance.
|
||||
Modules add additional functionality to the core :class:`~devlib.target.Target`
|
||||
interface. Usually, it is support for specific subsystems on the target. Modules
|
||||
are instantiated as attributes of the :class:`~devlib.target.Target` instance.
|
||||
|
||||
hotplug
|
||||
-------
|
||||
@@ -28,6 +30,8 @@ interface to this subsystem
|
||||
# Make sure all cpus are online
|
||||
target.hotplug.online_all()
|
||||
|
||||
.. module:: devlib.module.cpufreq
|
||||
|
||||
cpufreq
|
||||
-------
|
||||
|
||||
@@ -132,6 +136,9 @@ policies (governors). The ``devlib`` module exposes the following interface
|
||||
``1`` or ``"cpu1"``).
|
||||
:param frequency: Frequency to set.
|
||||
|
||||
|
||||
.. module:: devlib.module.cupidle
|
||||
|
||||
cpuidle
|
||||
-------
|
||||
|
||||
@@ -167,11 +174,15 @@ cpuidle
|
||||
You can also call ``enable()`` or ``disable()`` on :class:`CpuidleState` objects
|
||||
returned by get_state(s).
|
||||
|
||||
.. module:: devlib.module.cgroups
|
||||
|
||||
cgroups
|
||||
-------
|
||||
|
||||
TODO
|
||||
|
||||
.. module:: devlib.module.hwmon
|
||||
|
||||
hwmon
|
||||
-----
|
||||
|
||||
@@ -187,8 +198,8 @@ Modules implement discrete, optional pieces of functionality ("optional" in the
|
||||
sense that the functionality may or may not be present on the target device, or
|
||||
that it may or may not be necessary for a particular application).
|
||||
|
||||
Every module (ultimately) derives from :class:`Module` class. A module must
|
||||
define the following class attributes:
|
||||
Every module (ultimately) derives from :class:`devlib.module.Module` class. A
|
||||
module must define the following class attributes:
|
||||
|
||||
:name: A unique name for the module. This cannot clash with any of the existing
|
||||
names and must be a valid Python identifier, but is otherwise free-form.
|
||||
@@ -204,14 +215,16 @@ define the following class attributes:
|
||||
which case the module's ``name`` will be treated as its
|
||||
``kind`` as well.
|
||||
|
||||
:stage: This defines when the module will be installed into a :class:`Target`.
|
||||
Currently, the following values are allowed:
|
||||
:stage: This defines when the module will be installed into a
|
||||
:class:`~devlib.target.Target`. Currently, the following values are
|
||||
allowed:
|
||||
|
||||
:connected: The module is installed after a connection to the target has
|
||||
been established. This is the default.
|
||||
:early: The module will be installed when a :class:`Target` is first
|
||||
created. This should be used for modules that do not rely on a
|
||||
live connection to the target.
|
||||
:early: The module will be installed when a
|
||||
:class:`~devlib.target.Target` is first created. This should be
|
||||
used for modules that do not rely on a live connection to the
|
||||
target.
|
||||
:setup: The module will be installed after initial setup of the device
|
||||
has been performed. This allows the module to utilize assets
|
||||
deployed during the setup stage for example 'Busybox'.
|
||||
@@ -220,8 +233,8 @@ Additionally, a module must implement a static (or class) method :func:`probe`:
|
||||
|
||||
.. method:: Module.probe(target)
|
||||
|
||||
This method takes a :class:`Target` instance and returns ``True`` if this
|
||||
module is supported by that target, or ``False`` otherwise.
|
||||
This method takes a :class:`~devlib.target.Target` instance and returns
|
||||
``True`` if this module is supported by that target, or ``False`` otherwise.
|
||||
|
||||
.. note:: If the module ``stage`` is ``"early"``, this method cannot assume
|
||||
that a connection has been established (i.e. it can only access
|
||||
@@ -231,9 +244,9 @@ Installation and invocation
|
||||
***************************
|
||||
|
||||
The default installation method will create an instance of a module (the
|
||||
:class:`Target` instance being the sole argument) and assign it to the target
|
||||
instance attribute named after the module's ``kind`` (or ``name`` if ``kind`` is
|
||||
``None``).
|
||||
:class:`~devlib.target.Target` instance being the sole argument) and assign it
|
||||
to the target instance attribute named after the module's ``kind`` (or
|
||||
``name`` if ``kind`` is ``None``).
|
||||
|
||||
It is possible to change the installation procedure for a module by overriding
|
||||
the default :func:`install` method. The method must have the following
|
||||
@@ -322,7 +335,7 @@ FlashModule
|
||||
|
||||
"flash"
|
||||
|
||||
.. method:: __call__(image_bundle=None, images=None, boot_config=None)
|
||||
.. method:: __call__(image_bundle=None, images=None, boot_config=None, connect=True)
|
||||
|
||||
Must be implemented by derived classes.
|
||||
|
||||
@@ -338,15 +351,17 @@ FlashModule
|
||||
:param boot_config: Some platforms require specifying boot arguments at the
|
||||
time of flashing the images, rather than during each
|
||||
reboot. For other platforms, this will be ignored.
|
||||
:connect: Specifiy whether to try and connect to the target after flashing.
|
||||
|
||||
|
||||
Module Registration
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Modules are specified on :class:`Target` or :class:`Platform` creation by name.
|
||||
In order to find the class associated with the name, the module needs to be
|
||||
registered with ``devlib``. This is accomplished by passing the module class
|
||||
into :func:`register_module` method once it is defined.
|
||||
Modules are specified on :class:`~devlib.target.Target` or
|
||||
:class:`~devlib.platform.Platform` creation by name. In order to find the class
|
||||
associated with the name, the module needs to be registered with ``devlib``.
|
||||
This is accomplished by passing the module class into :func:`register_module`
|
||||
method once it is defined.
|
||||
|
||||
.. note:: If you're wiring a module to be included as part of ``devlib`` code
|
||||
base, you can place the file with the module class under
|
||||
|
@@ -1,26 +1,26 @@
|
||||
Overview
|
||||
========
|
||||
|
||||
A :class:`Target` instance serves as the main interface to the target device.
|
||||
A :class:`~devlib.target.Target` instance serves as the main interface to the target device.
|
||||
There are currently four target interfaces:
|
||||
|
||||
- :class:`LinuxTarget` for interacting with Linux devices over SSH.
|
||||
- :class:`AndroidTarget` for interacting with Android devices over adb.
|
||||
- :class:`ChromeOsTarget`: for interacting with ChromeOS devices over SSH, and
|
||||
their Android containers over adb.
|
||||
- :class:`LocalLinuxTarget`: for interacting with the local Linux host.
|
||||
- :class:`~devlib.target.LinuxTarget` for interacting with Linux devices over SSH.
|
||||
- :class:`~devlib.target.AndroidTarget` for interacting with Android devices over adb.
|
||||
- :class:`~devlib.target.ChromeOsTarget`: for interacting with ChromeOS devices
|
||||
over SSH, and their Android containers over adb.
|
||||
- :class:`~devlib.target.LocalLinuxTarget`: for interacting with the local Linux host.
|
||||
|
||||
They all work in more-or-less the same way, with the major difference being in
|
||||
how connection settings are specified; though there may also be a few APIs
|
||||
specific to a particular target type (e.g. :class:`AndroidTarget` exposes
|
||||
methods for working with logcat).
|
||||
specific to a particular target type (e.g. :class:`~devlib.target.AndroidTarget`
|
||||
exposes methods for working with logcat).
|
||||
|
||||
|
||||
Acquiring a Target
|
||||
------------------
|
||||
|
||||
To create an interface to your device, you just need to instantiate one of the
|
||||
:class:`Target` derivatives listed above, and pass it the right
|
||||
:class:`~devlib.target.Target` derivatives listed above, and pass it the right
|
||||
``connection_settings``. Code snippet below gives a typical example of
|
||||
instantiating each of the three target types.
|
||||
|
||||
@@ -47,21 +47,22 @@ instantiating each of the three target types.
|
||||
t3 = AndroidTarget(connection_settings={'device': '0123456789abcde'})
|
||||
|
||||
Instantiating a target may take a second or two as the remote device will be
|
||||
queried to initialize :class:`Target`'s internal state. If you would like to
|
||||
create a :class:`Target` instance but not immediately connect to the remote
|
||||
device, you can pass ``connect=False`` parameter. If you do that, you would have
|
||||
to then explicitly call ``t.connect()`` before you can interact with the device.
|
||||
queried to initialize :class:`~devlib.target.Target`'s internal state. If you
|
||||
would like to create a :class:`~devlib.target.Target` instance but not
|
||||
immediately connect to the remote device, you can pass ``connect=False``
|
||||
parameter. If you do that, you would have to then explicitly call
|
||||
``t.connect()`` before you can interact with the device.
|
||||
|
||||
There are a few additional parameters you can pass in instantiation besides
|
||||
``connection_settings``, but they are usually unnecessary. Please see
|
||||
:class:`Target` API documentation for more details.
|
||||
:class:`~devlib.target.Target` API documentation for more details.
|
||||
|
||||
Target Interface
|
||||
----------------
|
||||
|
||||
This is a quick overview of the basic interface to the device. See
|
||||
:class:`Target` API documentation for the full list of supported methods and
|
||||
more detailed documentation.
|
||||
:class:`~devlib.target.Target` API documentation for the full list of supported
|
||||
methods and more detailed documentation.
|
||||
|
||||
One-time Setup
|
||||
~~~~~~~~~~~~~~
|
||||
@@ -167,15 +168,16 @@ Process Control
|
||||
# PsEntry records.
|
||||
entries = t.ps()
|
||||
# e.g. print virtual memory sizes of all running sshd processes:
|
||||
print ', '.join(str(e.vsize) for e in entries if e.name == 'sshd')
|
||||
print(', '.join(str(e.vsize) for e in entries if e.name == 'sshd'))
|
||||
|
||||
|
||||
More...
|
||||
~~~~~~~
|
||||
|
||||
As mentioned previously, the above is not intended to be exhaustive
|
||||
documentation of the :class:`Target` interface. Please refer to the API
|
||||
documentation for the full list of attributes and methods and their parameters.
|
||||
documentation of the :class:`~devlib.target.Target` interface. Please refer to
|
||||
the API documentation for the full list of attributes and methods and their
|
||||
parameters.
|
||||
|
||||
Super User Privileges
|
||||
---------------------
|
||||
@@ -239,18 +241,20 @@ complete. Retrying it or bailing out is therefore a responsability of the caller
|
||||
|
||||
The hierarchy is as follows:
|
||||
|
||||
.. module:: devlib.exception
|
||||
|
||||
- :class:`DevlibError`
|
||||
|
||||
|
||||
- :class:`WorkerThreadError`
|
||||
- :class:`HostError`
|
||||
- :class:`TargetError`
|
||||
|
||||
|
||||
- :class:`TargetStableError`
|
||||
- :class:`TargetTransientError`
|
||||
- :class:`TargetNotRespondingError`
|
||||
|
||||
|
||||
- :class:`DevlibStableError`
|
||||
|
||||
|
||||
- :class:`TargetStableError`
|
||||
|
||||
- :class:`DevlibTransientError`
|
||||
@@ -288,7 +292,7 @@ Modules
|
||||
Additional functionality is exposed via modules. Modules are initialized as
|
||||
attributes of a target instance. By default, ``hotplug``, ``cpufreq``,
|
||||
``cpuidle``, ``cgroups`` and ``hwmon`` will attempt to load on target; additional
|
||||
modules may be specified when creating a :class:`Target` instance.
|
||||
modules may be specified when creating a :class:`~devlib.target.Target` instance.
|
||||
|
||||
A module will probe the target for support before attempting to load. So if the
|
||||
underlying platform does not support particular functionality (e.g. the kernel
|
||||
@@ -307,12 +311,22 @@ has been successfully installed on a target, you can use ``has()`` method, e.g.
|
||||
|
||||
Please see the modules documentation for more detail.
|
||||
|
||||
Instruments and Collectors
|
||||
--------------------------
|
||||
|
||||
Measurement and Trace
|
||||
---------------------
|
||||
You can retrieve multiple types of data from a target. There are two categories
|
||||
of classes that allow for this:
|
||||
|
||||
You can collected traces (currently, just ftrace) using
|
||||
:class:`TraceCollector`\ s. For example
|
||||
|
||||
- An :class:`Instrument` which may be used to collect measurements (such as power) from
|
||||
targets that support it. Please see the
|
||||
:ref:`instruments documentation <Instrumentation>` for more details.
|
||||
|
||||
- A :class:`Collector` may be used to collect arbitary data from a ``Target`` varying
|
||||
from screenshots to trace data. Please see the
|
||||
:ref:`collectors documentation <collector>` for more details.
|
||||
|
||||
An example workflow using :class:`FTraceCollector` is as follows:
|
||||
|
||||
.. code:: python
|
||||
|
||||
@@ -326,23 +340,19 @@ You can collected traces (currently, just ftrace) using
|
||||
# As a context manager, clear ftrace buffer using trace.reset(),
|
||||
# start trace collection using trace.start(), then stop it Using
|
||||
# trace.stop(). Using a context manager brings the guarantee that
|
||||
# tracing will stop even if an exception occurs, including
|
||||
# tracing will stop even if an exception occurs, including
|
||||
# KeyboardInterrupt (ctr-C) and SystemExit (sys.exit)
|
||||
with trace:
|
||||
# Perform the operations you want to trace here...
|
||||
import time; time.sleep(5)
|
||||
|
||||
# extract the trace file from the target into a local file
|
||||
trace.get_trace('/tmp/trace.bin')
|
||||
trace.get_data('/tmp/trace.bin')
|
||||
|
||||
# View trace file using Kernelshark (must be installed on the host).
|
||||
trace.view('/tmp/trace.bin')
|
||||
|
||||
# Convert binary trace into text format. This would normally be done
|
||||
# automatically during get_trace(), unless autoreport is set to False during
|
||||
# automatically during get_data(), unless autoreport is set to False during
|
||||
# instantiation of the trace collector.
|
||||
trace.report('/tmp/trace.bin', '/tmp/trace.txt')
|
||||
|
||||
In a similar way, :class:`Instrument` instances may be used to collect
|
||||
measurements (such as power) from targets that support it. Please see
|
||||
instruments documentation for more details.
|
||||
|
@@ -1,14 +1,17 @@
|
||||
.. module:: devlib.platform
|
||||
|
||||
.. _platform:
|
||||
|
||||
Platform
|
||||
========
|
||||
|
||||
:class:`Platform`\ s describe the system underlying the OS. They encapsulate
|
||||
hardware- and firmware-specific details. In most cases, the generic
|
||||
:class:`Platform` class, which gets used if a platform is not explicitly
|
||||
specified on :class:`Target` creation, will be sufficient. It will automatically
|
||||
query as much platform information (such CPU topology, hardware model, etc) if
|
||||
it was not specified explicitly by the user.
|
||||
:class:`~devlib.platform.Platform`\ s describe the system underlying the OS.
|
||||
They encapsulate hardware- and firmware-specific details. In most cases, the
|
||||
generic :class:`~devlib.platform.Platform` class, which gets used if a
|
||||
platform is not explicitly specified on :class:`~devlib.target.Target`
|
||||
creation, will be sufficient. It will automatically query as much platform
|
||||
information (such CPU topology, hardware model, etc) if it was not specified
|
||||
explicitly by the user.
|
||||
|
||||
|
||||
.. class:: Platform(name=None, core_names=None, core_clusters=None,\
|
||||
@@ -31,6 +34,7 @@ it was not specified explicitly by the user.
|
||||
platform (e.g. for handling flashing, rebooting, etc). These
|
||||
would be added to the Target's modules. (See :ref:`modules`\ ).
|
||||
|
||||
.. module:: devlib.platform.arm
|
||||
|
||||
Versatile Express
|
||||
-----------------
|
||||
@@ -38,8 +42,8 @@ Versatile Express
|
||||
The generic platform may be extended to support hardware- or
|
||||
infrastructure-specific functionality. Platforms exist for ARM
|
||||
VersatileExpress-based :class:`Juno` and :class:`TC2` development boards. In
|
||||
addition to the standard :class:`Platform` parameters above, these platforms
|
||||
support additional configuration:
|
||||
addition to the standard :class:`~devlib.platform.Platform` parameters above,
|
||||
these platforms support additional configuration:
|
||||
|
||||
|
||||
.. class:: VersatileExpressPlatform
|
||||
@@ -116,43 +120,53 @@ support additional configuration:
|
||||
Gem5 Simulation Platform
|
||||
------------------------
|
||||
|
||||
By initialising a Gem5SimulationPlatform, devlib will start a gem5 simulation (based upon the
|
||||
arguments the user provided) and then connect to it using :class:`Gem5Connection`.
|
||||
Using the methods discussed above, some methods of the :class:`Target` will be altered
|
||||
slightly to better suit gem5.
|
||||
By initialising a Gem5SimulationPlatform, devlib will start a gem5 simulation
|
||||
(based upon the arguments the user provided) and then connect to it using
|
||||
:class:`~devlib.utils.ssh.Gem5Connection`. Using the methods discussed above,
|
||||
some methods of the :class:`~devlib.target.Target` will be altered slightly to
|
||||
better suit gem5.
|
||||
|
||||
.. module:: devlib.platform.gem5
|
||||
|
||||
.. class:: Gem5SimulationPlatform(name, host_output_dir, gem5_bin, gem5_args, gem5_virtio, gem5_telnet_port=None)
|
||||
|
||||
During initialisation the gem5 simulation will be kicked off (based upon the arguments
|
||||
provided by the user) and the telnet port used by the gem5 simulation will be intercepted
|
||||
and stored for use by the :class:`Gem5Connection`.
|
||||
During initialisation the gem5 simulation will be kicked off (based upon the
|
||||
arguments provided by the user) and the telnet port used by the gem5
|
||||
simulation will be intercepted and stored for use by the
|
||||
:class:`~devlib.utils.ssh.Gem5Connection`.
|
||||
|
||||
:param name: Platform name
|
||||
|
||||
:param host_output_dir: Path on the host where the gem5 outputs will be placed (e.g. stats file)
|
||||
:param host_output_dir: Path on the host where the gem5 outputs will be
|
||||
placed (e.g. stats file)
|
||||
|
||||
:param gem5_bin: gem5 binary
|
||||
|
||||
:param gem5_args: Arguments to be passed onto gem5 such as config file etc.
|
||||
|
||||
:param gem5_virtio: Arguments to be passed onto gem5 in terms of the virtIO device used
|
||||
to transfer files between the host and the gem5 simulated system.
|
||||
:param gem5_virtio: Arguments to be passed onto gem5 in terms of the virtIO
|
||||
device used to transfer files between the host and the gem5 simulated
|
||||
system.
|
||||
|
||||
:param gem5_telnet_port: Not yet in use as it would be used in future implementations
|
||||
of devlib in which the user could use the platform to pick
|
||||
up an existing and running simulation.
|
||||
:param gem5_telnet_port: Not yet in use as it would be used in future
|
||||
implementations of devlib in which the user could
|
||||
use the platform to pick up an existing and running
|
||||
simulation.
|
||||
|
||||
|
||||
.. method:: Gem5SimulationPlatform.init_target_connection([target])
|
||||
|
||||
Based upon the OS defined in the :class:`Target`, the type of :class:`Gem5Connection`
|
||||
will be set (:class:`AndroidGem5Connection` or :class:`AndroidGem5Connection`).
|
||||
Based upon the OS defined in the :class:`~devlib.target.Target`, the type of
|
||||
:class:`~devlib.utils.ssh.Gem5Connection` will be set
|
||||
(:class:`~devlib.utils.ssh.AndroidGem5Connection` or
|
||||
:class:`~devlib.utils.ssh.AndroidGem5Connection`).
|
||||
|
||||
.. method:: Gem5SimulationPlatform.update_from_target([target])
|
||||
|
||||
This method provides specific setup procedures for a gem5 simulation. First of all, the m5
|
||||
binary will be installed on the guest (if it is not present). Secondly, three methods
|
||||
in the :class:`Target` will be monkey-patched:
|
||||
This method provides specific setup procedures for a gem5 simulation. First
|
||||
of all, the m5 binary will be installed on the guest (if it is not present).
|
||||
Secondly, three methods in the :class:`~devlib.target.Target` will be
|
||||
monkey-patched:
|
||||
|
||||
- **reboot**: this is not supported in gem5
|
||||
- **reset**: this is not supported in gem5
|
||||
@@ -160,7 +174,7 @@ slightly to better suit gem5.
|
||||
monkey-patched method will first try to
|
||||
transfer the existing screencaps.
|
||||
In case that does not work, it will fall back
|
||||
to the original :class:`Target` implementation
|
||||
to the original :class:`~devlib.target.Target` implementation
|
||||
of :func:`capture_screen`.
|
||||
|
||||
Finally, it will call the parent implementation of :func:`update_from_target`.
|
||||
|
280
doc/target.rst
280
doc/target.rst
@@ -1,57 +1,62 @@
|
||||
.. module:: devlib.target
|
||||
|
||||
Target
|
||||
======
|
||||
|
||||
|
||||
.. class:: Target(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, conn_cls=None)
|
||||
|
||||
:class:`Target` is the primary interface to the remote device. All interactions
|
||||
with the device are performed via a :class:`Target` instance, either
|
||||
directly, or via its modules or a wrapper interface (such as an
|
||||
:class:`Instrument`).
|
||||
:class:`~devlib.target.Target` is the primary interface to the remote
|
||||
device. All interactions with the device are performed via a
|
||||
:class:`~devlib.target.Target` instance, either directly, or via its
|
||||
modules or a wrapper interface (such as an
|
||||
:class:`~devlib.instrument.Instrument`).
|
||||
|
||||
:param connection_settings: A ``dict`` that specifies how to connect to the remote
|
||||
device. Its contents depend on the specific :class:`Target` type (used see
|
||||
:param connection_settings: A ``dict`` that specifies how to connect to the
|
||||
remote device. Its contents depend on the specific
|
||||
:class:`~devlib.target.Target` type (used see
|
||||
:ref:`connection-types`\ ).
|
||||
|
||||
:param platform: A :class:`Target` defines interactions at Operating System level. A
|
||||
:class:`Platform` describes the underlying hardware (such as CPUs
|
||||
available). If a :class:`Platform` instance is not specified on
|
||||
:class:`Target` creation, one will be created automatically and it will
|
||||
dynamically probe the device to discover as much about the underlying
|
||||
hardware as it can. See also :ref:`platform`\ .
|
||||
:param platform: A :class:`~devlib.target.Target` defines interactions at
|
||||
Operating System level. A :class:`~devlib.platform.Platform` describes
|
||||
the underlying hardware (such as CPUs available). If a
|
||||
:class:`~devlib.platform.Platform` instance is not specified on
|
||||
:class:`~devlib.target.Target` creation, one will be created
|
||||
automatically and it will dynamically probe the device to discover
|
||||
as much about the underlying hardware as it can. See also
|
||||
:ref:`platform`\ .
|
||||
|
||||
:param working_directory: This is primary location for on-target file system
|
||||
interactions performed by ``devlib``. This location *must* be readable and
|
||||
writable directly (i.e. without sudo) by the connection's user account.
|
||||
It may or may not allow execution. This location will be created,
|
||||
if necessary, during ``setup()``.
|
||||
interactions performed by ``devlib``. This location *must* be readable
|
||||
and writable directly (i.e. without sudo) by the connection's user
|
||||
account. It may or may not allow execution. This location will be
|
||||
created, if necessary, during :meth:`setup()`.
|
||||
|
||||
If not explicitly specified, this will be set to a default value
|
||||
depending on the type of :class:`Target`
|
||||
depending on the type of :class:`~devlib.target.Target`
|
||||
|
||||
:param executables_directory: This is the location to which ``devlib`` will
|
||||
install executable binaries (either during ``setup()`` or via an
|
||||
explicit ``install()`` call). This location *must* support execution
|
||||
install executable binaries (either during :meth:`setup()` or via an
|
||||
explicit :meth:`install()` call). This location *must* support execution
|
||||
(obviously). It should also be possible to write to this location,
|
||||
possibly with elevated privileges (i.e. on a rooted Linux target, it
|
||||
should be possible to write here with sudo, but not necessarily directly
|
||||
by the connection's account). This location will be created,
|
||||
if necessary, during ``setup()``.
|
||||
by the connection's account). This location will be created, if
|
||||
necessary, during :meth:`setup()`.
|
||||
|
||||
This location does *not* need to be same as the system's executables
|
||||
location. In fact, to prevent devlib from overwriting system's defaults,
|
||||
it better if this is a separate location, if possible.
|
||||
|
||||
If not explicitly specified, this will be set to a default value
|
||||
depending on the type of :class:`Target`
|
||||
depending on the type of :class:`~devlib.target.Target`
|
||||
|
||||
:param connect: Specifies whether a connections should be established to the
|
||||
target. If this is set to ``False``, then ``connect()`` must be
|
||||
explicitly called later on before the :class:`Target` instance can be
|
||||
used.
|
||||
target. If this is set to ``False``, then :meth:`connect()` must be
|
||||
explicitly called later on before the :class:`~devlib.target.Target`
|
||||
instance can be used.
|
||||
|
||||
:param modules: a list of additional modules to be installed. Some modules will
|
||||
try to install by default (if supported by the underlying target).
|
||||
:param modules: a list of additional modules to be installed. Some modules
|
||||
will try to install by default (if supported by the underlying target).
|
||||
Current default modules are ``hotplug``, ``cpufreq``, ``cpuidle``,
|
||||
``cgroups``, and ``hwmon`` (See :ref:`modules`\ ).
|
||||
|
||||
@@ -59,40 +64,40 @@ Target
|
||||
|
||||
:param load_default_modules: If set to ``False``, default modules listed
|
||||
above will *not* attempt to load. This may be used to either speed up
|
||||
target instantiation (probing for initializing modules takes a bit of time)
|
||||
or if there is an issue with one of the modules on a particular device
|
||||
(the rest of the modules will then have to be explicitly specified in
|
||||
the ``modules``).
|
||||
target instantiation (probing for initializing modules takes a bit of
|
||||
time) or if there is an issue with one of the modules on a particular
|
||||
device (the rest of the modules will then have to be explicitly
|
||||
specified in the ``modules``).
|
||||
|
||||
:param shell_prompt: This is a regular expression that matches the shell
|
||||
prompted on the target. This may be used by some modules that establish
|
||||
auxiliary connections to a target over UART.
|
||||
|
||||
:param conn_cls: This is the type of connection that will be used to communicate
|
||||
with the device.
|
||||
:param conn_cls: This is the type of connection that will be used to
|
||||
communicate with the device.
|
||||
|
||||
.. attribute:: Target.core_names
|
||||
|
||||
This is a list containing names of CPU cores on the target, in the order in
|
||||
which they are index by the kernel. This is obtained via the underlying
|
||||
:class:`Platform`.
|
||||
:class:`~devlib.platform.Platform`.
|
||||
|
||||
.. attribute:: Target.core_clusters
|
||||
|
||||
Some devices feature heterogeneous core configurations (such as ARM
|
||||
big.LITTLE). This is a list that maps CPUs onto underlying clusters.
|
||||
(Usually, but not always, clusters correspond to groups of CPUs with the same
|
||||
name). This is obtained via the underlying :class:`Platform`.
|
||||
name). This is obtained via the underlying :class:`~devlib.platform.Platform`.
|
||||
|
||||
.. attribute:: Target.big_core
|
||||
|
||||
This is the name of the cores that are the "big"s in an ARM big.LITTLE
|
||||
configuration. This is obtained via the underlying :class:`Platform`.
|
||||
configuration. This is obtained via the underlying :class:`~devlib.platform.Platform`.
|
||||
|
||||
.. attribute:: Target.little_core
|
||||
|
||||
This is the name of the cores that are the "little"s in an ARM big.LITTLE
|
||||
configuration. This is obtained via the underlying :class:`Platform`.
|
||||
configuration. This is obtained via the underlying :class:`~devlib.platform.Platform`.
|
||||
|
||||
.. attribute:: Target.is_connected
|
||||
|
||||
@@ -152,11 +157,11 @@ Target
|
||||
|
||||
The underlying connection object. This will be ``None`` if an active
|
||||
connection does not exist (e.g. if ``connect=False`` as passed on
|
||||
initialization and ``connect()`` has not been called).
|
||||
initialization and :meth:`connect()` has not been called).
|
||||
|
||||
.. note:: a :class:`Target` will automatically create a connection per
|
||||
thread. This will always be set to the connection for the current
|
||||
thread.
|
||||
.. note:: a :class:`~devlib.target.Target` will automatically create a
|
||||
connection per thread. This will always be set to the connection
|
||||
for the current thread.
|
||||
|
||||
.. method:: Target.connect([timeout])
|
||||
|
||||
@@ -176,19 +181,20 @@ Target
|
||||
being executed.
|
||||
|
||||
This should *not* be used to establish an initial connection; use
|
||||
``connect()`` instead.
|
||||
:meth:`connect()` instead.
|
||||
|
||||
.. note:: :class:`Target` will automatically create a connection per
|
||||
thread, so you don't normally need to use this explicitly in
|
||||
.. note:: :class:`~devlib.target.Target` will automatically create a connection
|
||||
per thread, so you don't normally need to use this explicitly in
|
||||
threaded code. This is generally useful if you want to perform a
|
||||
blocking operation (e.g. using ``background()``) while at the same
|
||||
blocking operation (e.g. using :class:`background()`) while at the same
|
||||
time doing something else in the same host-side thread.
|
||||
|
||||
.. method:: Target.setup([executables])
|
||||
|
||||
This will perform an initial one-time set up of a device for devlib
|
||||
interaction. This involves deployment of tools relied on the :class:`Target`,
|
||||
creation of working locations on the device, etc.
|
||||
interaction. This involves deployment of tools relied on the
|
||||
:class:`~devlib.target.Target`, creation of working locations on the device,
|
||||
etc.
|
||||
|
||||
Usually, it is enough to call this method once per new device, as its effects
|
||||
will persist across reboots. However, it is safe to call this method multiple
|
||||
@@ -212,27 +218,45 @@ Target
|
||||
operations during reboot process to detect if the reboot has failed and
|
||||
the device has hung.
|
||||
|
||||
.. method:: Target.push(source, dest [,as_root , timeout])
|
||||
.. method:: Target.push(source, dest [,as_root , timeout, globbing])
|
||||
|
||||
Transfer a file from the host machine to the target device.
|
||||
|
||||
:param source: path of to the file on the host
|
||||
:param dest: path of to the file on the target
|
||||
If transfer polling is supported (ADB connections and SSH connections),
|
||||
``poll_transfers`` is set in the connection, and a timeout is not specified,
|
||||
the push will be polled for activity. Inactive transfers will be
|
||||
cancelled. (See :ref:`connection-types`\ for more information on polling).
|
||||
|
||||
:param source: path on the host
|
||||
:param dest: path on the target
|
||||
:param as_root: whether root is required. Defaults to false.
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
:param globbing: If ``True``, the ``source`` is interpreted as a globbing
|
||||
pattern instead of being take as-is. If the pattern has mulitple
|
||||
matches, ``dest`` must be a folder (or will be created as such if it
|
||||
does not exists yet).
|
||||
|
||||
.. method:: Target.pull(source, dest [, as_root, timeout])
|
||||
.. method:: Target.pull(source, dest [, as_root, timeout, globbing])
|
||||
|
||||
Transfer a file from the target device to the host machine.
|
||||
|
||||
:param source: path of to the file on the target
|
||||
:param dest: path of to the file on the host
|
||||
If transfer polling is supported (ADB connections and SSH connections),
|
||||
``poll_transfers`` is set in the connection, and a timeout is not specified,
|
||||
the pull will be polled for activity. Inactive transfers will be
|
||||
cancelled. (See :ref:`connection-types`\ for more information on polling).
|
||||
|
||||
:param source: path on the target
|
||||
:param dest: path on the host
|
||||
:param as_root: whether root is required. Defaults to false.
|
||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||
not complete within this period, an exception will be raised.
|
||||
:param globbing: If ``True``, the ``source`` is interpreted as a globbing
|
||||
pattern instead of being take as-is. If the pattern has mulitple
|
||||
matches, ``dest`` must be a folder (or will be created as such if it
|
||||
does not exists yet).
|
||||
|
||||
.. method:: Target.execute(command [, timeout [, check_exit_code [, as_root [, strip_colors [, will_succeed]]]]])
|
||||
.. method:: Target.execute(command [, timeout [, check_exit_code [, as_root [, strip_colors [, will_succeed [, force_locale]]]]]])
|
||||
|
||||
Execute the specified command on the target device and return its output.
|
||||
|
||||
@@ -252,6 +276,9 @@ Target
|
||||
will make the method always raise an instance of a subclass of
|
||||
:class:`DevlibTransientError` when the command fails, instead of a
|
||||
:class:`DevlibStableError`.
|
||||
:param force_locale: Prepend ``LC_ALL=<force_locale>`` in front of the
|
||||
command to get predictable output that can be more safely parsed.
|
||||
If ``None``, no locale is prepended.
|
||||
|
||||
.. method:: Target.background(command [, stdout [, stderr [, as_root]]])
|
||||
|
||||
@@ -278,31 +305,31 @@ Target
|
||||
a string.
|
||||
:param in_directory: execute the binary in the specified directory. This must
|
||||
be an absolute path.
|
||||
:param on_cpus: taskset the binary to these CPUs. This may be a single ``int`` (in which
|
||||
case, it will be interpreted as the mask), a list of ``ints``, in which
|
||||
case this will be interpreted as the list of cpus, or string, which
|
||||
will be interpreted as a comma-separated list of cpu ranges, e.g.
|
||||
``"0,4-7"``.
|
||||
:param on_cpus: taskset the binary to these CPUs. This may be a single
|
||||
``int`` (in which case, it will be interpreted as the mask), a list of
|
||||
``ints``, in which case this will be interpreted as the list of cpus,
|
||||
or string, which will be interpreted as a comma-separated list of cpu
|
||||
ranges, e.g. ``"0,4-7"``.
|
||||
:param as_root: Specify whether the command should be run as root
|
||||
:param timeout: If this is specified and invocation does not terminate within this number
|
||||
of seconds, an exception will be raised.
|
||||
|
||||
.. method:: Target.background_invoke(binary [, args [, in_directory [, on_cpus [, as_root ]]]])
|
||||
|
||||
Execute the specified binary on target (must already be installed) as a background
|
||||
task, under the specified conditions and return the :class:`subprocess.Popen`
|
||||
instance for the command.
|
||||
Execute the specified binary on target (must already be installed) as a
|
||||
background task, under the specified conditions and return the
|
||||
:class:`subprocess.Popen` instance for the command.
|
||||
|
||||
:param binary: binary to execute. Must be present and executable on the device.
|
||||
:param args: arguments to be passed to the binary. The can be either a list or
|
||||
a string.
|
||||
:param in_directory: execute the binary in the specified directory. This must
|
||||
be an absolute path.
|
||||
:param on_cpus: taskset the binary to these CPUs. This may be a single ``int`` (in which
|
||||
case, it will be interpreted as the mask), a list of ``ints``, in which
|
||||
case this will be interpreted as the list of cpus, or string, which
|
||||
will be interpreted as a comma-separated list of cpu ranges, e.g.
|
||||
``"0,4-7"``.
|
||||
:param on_cpus: taskset the binary to these CPUs. This may be a single
|
||||
``int`` (in which case, it will be interpreted as the mask), a list of
|
||||
``ints``, in which case this will be interpreted as the list of cpus,
|
||||
or string, which will be interpreted as a comma-separated list of cpu
|
||||
ranges, e.g. ``"0,4-7"``.
|
||||
:param as_root: Specify whether the command should be run as root
|
||||
|
||||
.. method:: Target.kick_off(command [, as_root])
|
||||
@@ -346,7 +373,19 @@ Target
|
||||
some sysfs entries silently failing to set the written value without
|
||||
returning an error code.
|
||||
|
||||
.. method:: Target.read_tree_values(path, depth=1, dictcls=dict):
|
||||
.. method:: Target.revertable_write_value(path, value [, verify])
|
||||
|
||||
Same as :meth:`Target.write_value`, but as a context manager that will write
|
||||
back the previous value on exit.
|
||||
|
||||
.. method:: Target.batch_revertable_write_value(kwargs_list)
|
||||
|
||||
Calls :meth:`Target.revertable_write_value` with all the keyword arguments
|
||||
dictionary given in the list. This is a convenience method to update
|
||||
multiple files at once, leaving them in their original state on exit. If one
|
||||
write fails, all the already-performed writes will be reverted as well.
|
||||
|
||||
.. method:: Target.read_tree_values(path, depth=1, dictcls=dict, [, tar [, decode_unicode [, strip_null_char ]]])
|
||||
|
||||
Read values of all sysfs (or similar) file nodes under ``path``, traversing
|
||||
up to the maximum depth ``depth``.
|
||||
@@ -358,11 +397,20 @@ Target
|
||||
value is a dict-line object with a key for every entry under ``path``
|
||||
mapping onto its value or further dict-like objects as appropriate.
|
||||
|
||||
Although the default behaviour should suit most users, it is possible to
|
||||
encounter issues when reading binary files, or files with colons in their
|
||||
name for example. In such cases, the ``tar`` parameter can be set to force a
|
||||
full archive of the tree using tar, hence providing a more robust behaviour.
|
||||
This can, however, slow down the read process significantly.
|
||||
|
||||
:param path: sysfs path to scan
|
||||
:param depth: maximum depth to descend
|
||||
:param dictcls: a dict-like type to be used for each level of the hierarchy.
|
||||
:param tar: the files will be read using tar rather than grep
|
||||
:param decode_unicode: decode the content of tar-ed files as utf-8
|
||||
:param strip_null_char: remove null chars from utf-8 decoded files
|
||||
|
||||
.. method:: Target.read_tree_values_flat(path, depth=1):
|
||||
.. method:: Target.read_tree_values_flat(path, depth=1)
|
||||
|
||||
Read values of all sysfs (or similar) file nodes under ``path``, traversing
|
||||
up to the maximum depth ``depth``.
|
||||
@@ -406,6 +454,10 @@ Target
|
||||
Return a list of :class:`PsEntry` instances for all running processes on the
|
||||
system.
|
||||
|
||||
.. method:: Target.makedirs(self, path)
|
||||
|
||||
Create a directory at the given path and all its ancestors if needed.
|
||||
|
||||
.. method:: Target.file_exists(self, filepath)
|
||||
|
||||
Returns ``True`` if the specified path exists on the target and ``False``
|
||||
@@ -521,15 +573,43 @@ Target
|
||||
|
||||
:returns: ``True`` if internet seems available, ``False`` otherwise.
|
||||
|
||||
.. method:: Target.install_module(mod, **params)
|
||||
|
||||
:param mod: The module name or object to be installed to the target.
|
||||
:param params: Keyword arguments used to instantiate the module.
|
||||
|
||||
Installs an additional module to the target after the initial setup has been
|
||||
performed.
|
||||
|
||||
Linux Target
|
||||
------------
|
||||
|
||||
.. class:: LinuxTarget(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, conn_cls=SshConnection, is_container=False,)
|
||||
|
||||
:class:`LinuxTarget` is a subclass of :class:`~devlib.target.Target`
|
||||
with customisations specific to a device running linux.
|
||||
|
||||
|
||||
Local Linux Target
|
||||
------------------
|
||||
|
||||
.. class:: LocalLinuxTarget(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, conn_cls=SshConnection, is_container=False,)
|
||||
|
||||
:class:`LocalLinuxTarget` is a subclass of
|
||||
:class:`~devlib.target.LinuxTarget` with customisations specific to using
|
||||
the host machine running linux as the target.
|
||||
|
||||
|
||||
Android Target
|
||||
---------------
|
||||
|
||||
.. class:: AndroidTarget(connection_settings=None, platform=None, working_directory=None, executables_directory=None, connect=True, modules=None, load_default_modules=True, shell_prompt=DEFAULT_SHELL_PROMPT, conn_cls=AdbConnection, package_data_directory="/data/data")
|
||||
|
||||
:class:`AndroidTarget` is a subclass of :class:`Target` with additional features specific to a device running Android.
|
||||
:class:`AndroidTarget` is a subclass of :class:`~devlib.target.Target` with
|
||||
additional features specific to a device running Android.
|
||||
|
||||
:param package_data_directory: This is the location of the data stored
|
||||
for installed Android packages on the device.
|
||||
:param package_data_directory: This is the location of the data stored for
|
||||
installed Android packages on the device.
|
||||
|
||||
.. method:: AndroidTarget.set_rotation(rotation)
|
||||
|
||||
@@ -602,18 +682,58 @@ Android Target
|
||||
Returns ``True`` if the targets auto brightness is currently
|
||||
enabled and ``False`` otherwise.
|
||||
|
||||
.. method:: AndroidTarget.ensure_screen_is_off()
|
||||
.. method:: AndroidTarget.set_stay_on_never()
|
||||
|
||||
Sets the stay-on mode to ``0``, where the screen will turn off
|
||||
as standard after the timeout.
|
||||
|
||||
.. method:: AndroidTarget.set_stay_on_while_powered()
|
||||
|
||||
Sets the stay-on mode to ``7``, where the screen will stay on
|
||||
while the device is charging
|
||||
|
||||
.. method:: AndroidTarget.set_stay_on_mode(mode)
|
||||
|
||||
Sets the stay-on mode to the specified number between ``0`` and
|
||||
``7`` (inclusive).
|
||||
|
||||
.. method:: AndroidTarget.get_stay_on_mode()
|
||||
|
||||
Returns an integer between ``0`` and ``7`` representing the current
|
||||
stay-on mode of the device.
|
||||
|
||||
.. method:: AndroidTarget.ensure_screen_is_off(verify=True)
|
||||
|
||||
Checks if the devices screen is on and if so turns it off.
|
||||
If ``verify`` is set to ``True`` then a ``TargetStableError``
|
||||
will be raise if the display cannot be turned off. E.g. if
|
||||
always on mode is enabled.
|
||||
|
||||
.. method:: AndroidTarget.ensure_screen_is_on()
|
||||
.. method:: AndroidTarget.ensure_screen_is_on(verify=True)
|
||||
|
||||
Checks if the devices screen is off and if so turns it on.
|
||||
If ``verify`` is set to ``True`` then a ``TargetStableError``
|
||||
will be raise if the display cannot be turned on.
|
||||
|
||||
.. method:: AndroidTarget.ensure_screen_is_on_and_stays(verify=True, mode=7)
|
||||
|
||||
Calls ``AndroidTarget.ensure_screen_is_on(verify)`` then additionally
|
||||
sets the screen stay on mode to ``mode``.
|
||||
|
||||
.. method:: AndroidTarget.is_screen_on()
|
||||
|
||||
Returns ``True`` if the targets screen is currently on and ``False``
|
||||
otherwise.
|
||||
otherwise. If the display is in a "Doze" mode or similar always on state,
|
||||
this will return ``True``.
|
||||
|
||||
.. method:: AndroidTarget.wait_for_device(timeout=30)
|
||||
|
||||
Returns when the devices becomes available withing the given timeout
|
||||
otherwise returns a ``TimeoutError``.
|
||||
|
||||
.. method:: AndroidTarget.reboot_bootloader(timeout=30)
|
||||
|
||||
Attempts to reboot the target into it's bootloader.
|
||||
|
||||
.. method:: AndroidTarget.homescreen()
|
||||
|
||||
@@ -646,9 +766,9 @@ ChromeOS Target
|
||||
:class:`ChromeOsTarget` if the device supports android otherwise only the
|
||||
:class:`LinuxTarget` methods will be available.
|
||||
|
||||
:param working_directory: This is the location of the working
|
||||
directory to be used for the Linux target container. If not specified will
|
||||
default to ``"/mnt/stateful_partition/devlib-target"``.
|
||||
:param working_directory: This is the location of the working directory to
|
||||
be used for the Linux target container. If not specified will default to
|
||||
``"/mnt/stateful_partition/devlib-target"``.
|
||||
|
||||
:param android_working_directory: This is the location of the working
|
||||
directory to be used for the android container. If not specified it will
|
||||
@@ -656,7 +776,7 @@ ChromeOS Target
|
||||
|
||||
:param android_executables_directory: This is the location of the
|
||||
executables directory to be used for the android container. If not
|
||||
specified will default to a ``bin`` subfolder in the
|
||||
specified will default to a ``bin`` subdirectory in the
|
||||
``android_working_directory.``
|
||||
|
||||
:param package_data_directory: This is the location of the data stored
|
||||
|
39
setup.py
39
setup.py
@@ -41,23 +41,13 @@ except OSError:
|
||||
pass
|
||||
|
||||
|
||||
with open(os.path.join(devlib_dir, '__init__.py')) as fh:
|
||||
# Extract the version by parsing the text of the file,
|
||||
# as may not be able to load as a module yet.
|
||||
for line in fh:
|
||||
if '__version__' in line:
|
||||
parts = line.split("'")
|
||||
__version__ = parts[1]
|
||||
break
|
||||
else:
|
||||
raise RuntimeError('Did not see __version__')
|
||||
|
||||
vh_path = os.path.join(devlib_dir, 'utils', 'version.py')
|
||||
# can load this, as it does not have any devlib imports
|
||||
version_helper = imp.load_source('version_helper', vh_path)
|
||||
commit = version_helper.get_commit()
|
||||
if commit:
|
||||
__version__ = '{}+{}'.format(__version__, commit)
|
||||
vh_path = os.path.join(devlib_dir, 'utils', 'version.py')
|
||||
# can load this, as it does not have any devlib imports
|
||||
version_helper = imp.load_source('version_helper', vh_path)
|
||||
__version__ = version_helper.get_devlib_version()
|
||||
commit = version_helper.get_commit()
|
||||
if commit:
|
||||
__version__ = '{}+{}'.format(__version__, commit)
|
||||
|
||||
|
||||
packages = []
|
||||
@@ -92,24 +82,29 @@ params = dict(
|
||||
'python-dateutil', # converting between UTC and local time.
|
||||
'pexpect>=3.3', # Send/recieve to/from device
|
||||
'pyserial', # Serial port interface
|
||||
'paramiko', # SSH connection
|
||||
'scp', # SSH connection file transfers
|
||||
'wrapt', # Basic for construction of decorator functions
|
||||
'future', # Python 2-3 compatibility
|
||||
'enum34;python_version<"3.4"', # Enums for Python < 3.4
|
||||
'pandas',
|
||||
'numpy',
|
||||
'contextlib2;python_version<"3.0"', # Python 3 contextlib backport for Python 2
|
||||
'numpy<=1.16.4; python_version<"3"',
|
||||
'numpy; python_version>="3"',
|
||||
'pandas<=0.24.2; python_version<"3"',
|
||||
'pandas; python_version>"3"',
|
||||
],
|
||||
extras_require={
|
||||
'daq': ['daqpower'],
|
||||
'daq': ['daqpower>=2'],
|
||||
'doc': ['sphinx'],
|
||||
'monsoon': ['python-gflags'],
|
||||
'acme': ['pandas', 'numpy'],
|
||||
},
|
||||
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
|
||||
classifiers=[
|
||||
'Development Status :: 4 - Beta',
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Programming Language :: Python :: 2.7',
|
||||
'Programming Language :: Python :: 3',
|
||||
],
|
||||
)
|
||||
|
||||
|
6
src/get_clock_boottime/Makefile
Normal file
6
src/get_clock_boottime/Makefile
Normal file
@@ -0,0 +1,6 @@
|
||||
CFLAGS=-Wall --pedantic-errors -O2 -static
|
||||
|
||||
all: get_clock_boottime
|
||||
|
||||
get_clock_boottime: get_clock_boottime.c
|
||||
$(CC) $(CFLAGS) $^ -o $@
|
18
src/get_clock_boottime/get_clock_boottime.c
Normal file
18
src/get_clock_boottime/get_clock_boottime.c
Normal file
@@ -0,0 +1,18 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
int main(void) {
|
||||
int ret;
|
||||
struct timespec tp;
|
||||
|
||||
ret = clock_gettime(CLOCK_BOOTTIME, &tp);
|
||||
if (ret) {
|
||||
perror("clock_gettime()");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
printf("%ld.%ld\n", tp.tv_sec, tp.tv_nsec);
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
Reference in New Issue
Block a user