1
0
mirror of https://github.com/ARM-software/devlib.git synced 2025-09-22 20:01:53 +01:00

13 Commits

Author SHA1 Message Date
Marc Bonnici
e6c52c49ff version: Bump revision number 2021-07-23 15:42:13 +01:00
Marc Bonnici
6825130e48 connection: Use busybox implementation of kill
Some target implementations of kill do not support killing
process groups so use the busybox implementation for greater
portability.
2021-07-23 12:35:51 +01:00
Douglas Raillard
80c0e37d11 utils/misc: Use an RLock in tls_property
Allow reentrancy of the lock to fix a deadlock that can occur if
self._get_tls() is called while holding the lock.
2021-07-21 16:44:49 +01:00
Douglas Raillard
f523afda95 target: Fix deadlock in Target.clear_logcat()
Ensure that only once clear_logcat() call is active at once, and just
ignore reentrant calls.
2021-07-21 16:44:49 +01:00
Douglas Raillard
b64ec714a0 utils/misc: Use RLock for check_output_lock
Using a threading.Lock leads to a deadlock in some circumstances.
2021-07-21 16:44:49 +01:00
Valentin Schneider
6249c06b44 modules/sched: Add awareness of new debug directory root
Scheduler debug information is being unified under /sys/kernel/debug/sched
for Linux v5.13. Plug in awareness for the new path while still trying the
old one(s) for backwards compatibility.
2021-07-12 15:16:59 +01:00
Marc Bonnici
3af3463c3c utils/ssh: Fix paramiko streams
Ensure that we use the input stream for reading.
2021-06-29 13:44:14 +01:00
Marc Bonnici
7065847f77 utils/ssh: Fix paramiko stdin
Ensure that we open the stdin stream for writing instead
of read only.
2021-06-29 13:44:14 +01:00
douglas-raillard-arm
79783fa09a target: Create new connection for reentrant calls
When Target.conn property is required while the current connection is
already in use, provide a fresh connection to avoid deadlocks. This is
enabled by the @call_conn decorator that is used on all Target methods
that use self.conn directly.
2021-06-03 17:24:50 +01:00
douglas-raillard-arm
796536d67d hotplug: Verify hotplug.online_all()
Check that all CPUs are effectively online after a call to
target.hotplug.online_all(), as hotplug issues are common and failure to
bring back up a CPU can be quite problematic.
2021-06-03 17:24:43 +01:00
douglas-raillard-arm
b9374d530e ssh: Raise explicit exception when SFTP is not available
When SFTP is not available on OpenSSH, paramiko will raise a generic
exception:

    paramiko.ssh_exception.SSHException: EOF during negotiation

In order to make it easier to debug, raise a TargetStableError telling
the user to enable SFTP on their server. On OpenSSH, this means
installing the sftp subsystem and enabling it in sshd_config.
2021-05-11 09:39:53 +01:00
Javi Merino
34e51e7230 collector/perf: raise an error if report_options or report_sample_options are specified when not using perf/simpleperf record 2021-04-27 10:40:06 +01:00
Marc Bonnici
fa595e1a3d version: Dev version bump 2021-04-19 11:02:53 +01:00
8 changed files with 179 additions and 37 deletions

View File

@@ -126,6 +126,10 @@ class PerfCollector(CollectorBase):
self.command = command
else:
raise ValueError('Unsupported perf command, must be stat or record')
if report_options and (command != 'record'):
raise ValueError('report_options specified, but command is not record')
if report_sample_options and (command != 'record'):
raise ValueError('report_sample_options specified, but command is not record')
self.binary = self.target.get_installed(self.perf_type)
if self.force_install or not self.binary:

View File

@@ -33,8 +33,8 @@ from devlib.utils.misc import InitCheckpoint
_KILL_TIMEOUT = 3
def _kill_pgid_cmd(pgid, sig):
return 'kill -{} -{}'.format(sig.value, pgid)
def _kill_pgid_cmd(pgid, sig, busybox):
return '{} kill -{} -{}'.format(busybox, sig.value, pgid)
class ConnectionBase(InitCheckpoint):
@@ -258,7 +258,7 @@ class ParamikoBackgroundCommand(BackgroundCommand):
return
# Use -PGID to target a process group rather than just the process
# itself
cmd = _kill_pgid_cmd(self.pid, sig)
cmd = _kill_pgid_cmd(self.pid, sig, self.conn.busybox)
self.conn.execute(cmd, as_root=self.as_root)
@property
@@ -322,7 +322,7 @@ class AdbBackgroundCommand(BackgroundCommand):
def send_signal(self, sig):
self.conn.execute(
_kill_pgid_cmd(self.pid, sig),
_kill_pgid_cmd(self.pid, sig, self.conn.busybox),
as_root=self.as_root,
)

View File

@@ -14,6 +14,7 @@
#
from devlib.module import Module
from devlib.exception import TargetTransientError
class HotplugModule(Module):
@@ -39,9 +40,13 @@ class HotplugModule(Module):
return [cpu for cpu in range(self.target.number_of_cpus)
if self.target.file_exists(self._cpu_path(self.target, cpu))]
def online_all(self):
def online_all(self, verify=True):
self.target._execute_util('hotplug_online_all', # pylint: disable=protected-access
as_root=self.target.is_rooted)
if verify:
offline = set(self.target.list_offline_cpus())
if offline:
raise TargetTransientError('The following CPUs failed to come back online: {}'.format(offline))
def online(self, *args):
for cpu in args:

View File

@@ -21,7 +21,7 @@ from past.builtins import basestring
from devlib.module import Module
from devlib.utils.misc import memoized
from devlib.utils.types import boolean
from devlib.exception import TargetStableError
class SchedProcFSNode(object):
"""
@@ -303,19 +303,33 @@ class SchedDomain(SchedProcFSNode):
self.flags = flags
def _select_path(target, paths, name):
for p in paths:
if target.file_exists(p):
return p
raise TargetStableError('No {} found. Tried: {}'.format(name, ', '.join(paths)))
class SchedProcFSData(SchedProcFSNode):
"""
Root class for creating & storing SchedProcFSNode instances
"""
_read_depth = 6
sched_domain_root = '/proc/sys/kernel/sched_domain'
@classmethod
def get_data_root(cls, target):
# Location differs depending on kernel version
paths = ['/sys/kernel/debug/sched/domains/', '/proc/sys/kernel/sched_domain']
return _select_path(target, paths, "sched_domain debug directory")
@staticmethod
def available(target):
path = SchedProcFSData.sched_domain_root
cpus = target.list_directory(path) if target.file_exists(path) else []
try:
path = SchedProcFSData.get_data_root(target)
except TargetStableError:
return False
cpus = target.list_directory(path)
if not cpus:
return False
@@ -329,7 +343,7 @@ class SchedProcFSData(SchedProcFSNode):
def __init__(self, target, path=None):
if path is None:
path = self.sched_domain_root
path = SchedProcFSData.get_data_root(target)
procfs = target.read_tree_values(path, depth=self._read_depth)
super(SchedProcFSData, self).__init__(procfs)
@@ -362,6 +376,15 @@ class SchedModule(Module):
return schedproc or debug or dmips
def __init__(self, target):
super().__init__(target)
@classmethod
def get_sched_features_path(cls, target):
# Location differs depending on kernel version
paths = ['/sys/kernel/debug/sched/features', '/sys/kernel/debug/sched_features']
return _select_path(target, paths, "sched_features file")
def get_kernel_attributes(self, matching=None, check_exit_code=True):
"""
Get the value of scheduler attributes.
@@ -418,12 +441,12 @@ class SchedModule(Module):
def target_has_debug(cls, target):
if target.config.get('SCHED_DEBUG') != 'y':
return False
return target.file_exists('/sys/kernel/debug/sched_features')
@property
@memoized
def has_debug(self):
return self.target_has_debug(self.target)
try:
cls.get_sched_features_path(target)
return True
except TargetStableError:
return False
def get_features(self):
"""
@@ -431,9 +454,7 @@ class SchedModule(Module):
:returns: a dictionary of features and their "is enabled" status
"""
if not self.has_debug:
raise RuntimeError("sched_features not available")
feats = self.target.read_value('/sys/kernel/debug/sched_features')
feats = self.target.read_value(self.get_sched_features_path(self.target))
features = {}
for feat in feats.split():
value = True
@@ -453,13 +474,11 @@ class SchedModule(Module):
:raise ValueError: if the specified enable value is not bool
:raise RuntimeError: if the specified feature cannot be set
"""
if not self.has_debug:
raise RuntimeError("sched_features not available")
feature = feature.upper()
feat_value = feature
if not boolean(enable):
feat_value = 'NO_' + feat_value
self.target.write_value('/sys/kernel/debug/sched_features',
self.target.write_value(self.get_sched_features_path(self.target),
feat_value, verify=False)
if not verify:
return
@@ -471,10 +490,10 @@ class SchedModule(Module):
def get_cpu_sd_info(self, cpu):
"""
:returns: An object view of /proc/sys/kernel/sched_domain/cpu<cpu>/*
:returns: An object view of the sched_domain debug directory of 'cpu'
"""
path = self.target.path.join(
SchedProcFSData.sched_domain_root,
SchedProcFSData.get_data_root(self.target),
"cpu{}".format(cpu)
)
@@ -482,7 +501,7 @@ class SchedModule(Module):
def get_sd_info(self):
"""
:returns: An object view of /proc/sys/kernel/sched_domain/*
:returns: An object view of the entire sched_domain debug directory
"""
return SchedProcFSData(self.target)

View File

@@ -76,6 +76,48 @@ GOOGLE_DNS_SERVER_ADDRESS = '8.8.8.8'
installed_package_info = namedtuple('installed_package_info', 'apk_path package')
def call_conn(f):
"""
Decorator to be used on all :class:`devlib.target.Target` methods that
directly use a method of ``self.conn``.
This ensures that if a call to any of the decorated method occurs while
executing, a new connection will be created in order to avoid possible
deadlocks. This can happen if e.g. a target's method is called from
``__del__``, which could be executed by the garbage collector, interrupting
another call to a method of the connection instance.
.. note:: This decorator could be applied directly to all methods with a
metaclass or ``__init_subclass__`` but it could create issues when
passing target methods as callbacks to connections' methods.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
reentered = self.conn.is_in_use
disconnect = False
try:
# If the connection was already in use we need to use a different
# instance to avoid reentrancy deadlocks. This can happen even in
# single threaded code via __del__ implementations that can be
# called at any point.
if reentered:
# Shallow copy so we can use another connection instance
_self = copy.copy(self)
_self.conn = _self.get_connection()
assert self.conn is not _self.conn
disconnect = True
else:
_self = self
return f(_self, *args, **kwargs)
finally:
if disconnect:
_self.disconnect()
return wrapper
class Target(object):
path = None
@@ -294,6 +336,14 @@ class Target(object):
if connect:
self.connect()
def __copy__(self):
new = self.__class__.__new__(self.__class__)
new.__dict__ = self.__dict__.copy()
# Avoid sharing the connection instance with the original target, so
# that each target can live its own independent life
del new.__dict__['_conn']
return new
# connection and initialization
def connect(self, timeout=None, check_boot_completed=True):
@@ -433,6 +483,7 @@ class Target(object):
dst_mkdir(dest)
@call_conn
def push(self, source, dest, as_root=False, timeout=None, globbing=False): # pylint: disable=arguments-differ
sources = glob.glob(source) if globbing else [source]
self._prepare_xfer('push', sources, dest)
@@ -488,6 +539,7 @@ class Target(object):
return paths
@call_conn
def pull(self, source, dest, as_root=False, timeout=None, globbing=False): # pylint: disable=arguments-differ
if globbing:
sources = self._expand_glob(source, as_root=as_root)
@@ -557,6 +609,7 @@ class Target(object):
return command
@call_conn
def execute(self, command, timeout=None, check_exit_code=True,
as_root=False, strip_colors=True, will_succeed=False,
force_locale='C'):
@@ -566,6 +619,7 @@ class Target(object):
check_exit_code=check_exit_code, as_root=as_root,
strip_colors=strip_colors, will_succeed=will_succeed)
@call_conn
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False,
force_locale='C', timeout=None):
command = self._prepare_cmd(command, force_locale)
@@ -691,6 +745,7 @@ class Target(object):
pass
self.conn.connected_as_root = None
@call_conn
def check_responsive(self, explode=True):
try:
self.conn.execute('ls /', timeout=5)
@@ -1005,6 +1060,7 @@ class Target(object):
os.remove(shutils_ofile)
os.rmdir(tmp_dir)
@call_conn
def _execute_util(self, command, timeout=None, check_exit_code=True, as_root=False):
command = '{} {}'.format(self.shutils, command)
return self.conn.execute(command, timeout, check_exit_code, as_root)
@@ -1169,6 +1225,7 @@ class LinuxTarget(Target):
def wait_boot_complete(self, timeout=10):
pass
@call_conn
def kick_off(self, command, as_root=False):
command = 'sh -c {} 1>/dev/null 2>/dev/null &'.format(quote(command))
return self.conn.execute(command, as_root=as_root)
@@ -1702,18 +1759,24 @@ class AndroidTarget(Target):
self.remove(dev_path)
def clear_logcat(self):
with self.clear_logcat_lock:
if isinstance(self.conn, AdbConnection):
adb_command(self.adb_name, 'logcat -c', timeout=30, adb_server=self.adb_server)
else:
self.execute('logcat -c', timeout=30)
locked = self.clear_logcat_lock.acquire(blocking=False)
if locked:
try:
if isinstance(self.conn, AdbConnection):
adb_command(self.adb_name, 'logcat -c', timeout=30, adb_server=self.adb_server)
else:
self.execute('logcat -c', timeout=30)
finally:
self.clear_logcat_lock.release()
def get_logcat_monitor(self, regexps=None):
return LogcatMonitor(self, regexps)
@call_conn
def wait_for_device(self, timeout=30):
self.conn.wait_for_device()
@call_conn
def reboot_bootloader(self, timeout=30):
self.conn.reboot_bootloader()

View File

@@ -37,6 +37,7 @@ import string
import subprocess
import sys
import threading
import types
import wrapt
import warnings
@@ -152,7 +153,7 @@ def preexec_function():
check_output_logger = logging.getLogger('check_output')
# Popen is not thread safe. If two threads attempt to call it at the same time,
# one may lock up. See https://bugs.python.org/issue12739.
check_output_lock = threading.Lock()
check_output_lock = threading.RLock()
def get_subprocess(command, **kwargs):
@@ -783,7 +784,7 @@ class tls_property:
def __init__(self, factory):
self.factory = factory
# Lock accesses to shared WeakKeyDictionary and WeakSet
self.lock = threading.Lock()
self.lock = threading.RLock()
def __get__(self, instance, owner=None):
return _BoundTLSProperty(self, instance, owner)
@@ -883,10 +884,14 @@ class _BoundTLSProperty:
class InitCheckpointMeta(type):
"""
Metaclass providing an ``initialized`` boolean attributes on instances.
Metaclass providing an ``initialized`` and ``is_in_use`` boolean attributes
on instances.
``initialized`` is set to ``True`` once the ``__init__`` constructor has
returned. It will deal cleanly with nested calls to ``super().__init__``.
``is_in_use`` is set to ``True`` when an instance method is being called.
This allows to detect reentrance.
"""
def __new__(metacls, name, bases, dct, **kwargs):
cls = super().__new__(metacls, name, bases, dct, **kwargs)
@@ -895,6 +900,7 @@ class InitCheckpointMeta(type):
@wraps(init_f)
def init_wrapper(self, *args, **kwargs):
self.initialized = False
self.is_in_use = False
# Track the nesting of super()__init__ to set initialized=True only
# when the outer level is finished
@@ -918,6 +924,45 @@ class InitCheckpointMeta(type):
cls.__init__ = init_wrapper
# Set the is_in_use attribute to allow external code to detect if the
# methods are about to be re-entered.
def make_wrapper(f):
if f is None:
return None
@wraps(f)
def wrapper(self, *args, **kwargs):
f_ = f.__get__(self, self.__class__)
initial_state = self.is_in_use
try:
self.is_in_use = True
return f_(*args, **kwargs)
finally:
self.is_in_use = initial_state
return wrapper
# This will not decorate methods defined in base classes, but we cannot
# use inspect.getmembers() as it uses __get__ to bind the attributes to
# the class, making staticmethod indistinguishible from instance
# methods.
for name, attr in cls.__dict__.items():
# Only wrap the methods (exposed as functions), not things like
# classmethod or staticmethod
if (
name not in ('__init__', '__new__') and
isinstance(attr, types.FunctionType)
):
setattr(cls, name, make_wrapper(attr))
elif isinstance(attr, property):
prop = property(
fget=make_wrapper(attr.fget),
fset=make_wrapper(attr.fset),
fdel=make_wrapper(attr.fdel),
doc=attr.__doc__,
)
setattr(cls, name, prop)
return cls

View File

@@ -466,7 +466,13 @@ class SshConnection(SshConnectionBase):
return self.transfer_mgr.progress_cb if self.transfer_mgr is not None else None
def _get_sftp(self, timeout):
sftp = self.client.open_sftp()
try:
sftp = self.client.open_sftp()
except paramiko.ssh_exception.SSHException as e:
if 'EOF during negotiation' in str(e):
raise TargetStableError('The SSH server does not support SFTP. Please install and enable appropriate module.') from e
else:
raise
sftp.get_channel().settimeout(timeout)
return sftp
@@ -654,7 +660,7 @@ class SshConnection(SshConnectionBase):
# Read are not buffered so we will always get the data as soon as
# they arrive
return (
channel.makefile_stdin(),
channel.makefile_stdin('w', 0),
channel.makefile(),
channel.makefile_stderr(),
)
@@ -685,11 +691,11 @@ class SshConnection(SshConnectionBase):
w = os.fdopen(w, 'wb')
# Turn a file descriptor into a file-like object
elif isinstance(stream_out, int) and stream_out >= 0:
r = os.fdopen(stream_out, 'rb')
r = os.fdopen(stream_in, 'rb')
w = os.fdopen(stream_out, 'wb')
# file-like object
else:
r = stream_out
r = stream_in
w = stream_out
return (r, w)

View File

@@ -21,7 +21,7 @@ from subprocess import Popen, PIPE
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision', 'dev'])
version = VersionTuple(1, 3, 1, '')
version = VersionTuple(1, 3, 2, '')
def get_devlib_version():