mirror of
https://github.com/ARM-software/devlib.git
synced 2025-09-22 20:01:53 +01:00
Compare commits
50 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
e6c52c49ff | ||
|
6825130e48 | ||
|
80c0e37d11 | ||
|
f523afda95 | ||
|
b64ec714a0 | ||
|
6249c06b44 | ||
|
3af3463c3c | ||
|
7065847f77 | ||
|
79783fa09a | ||
|
796536d67d | ||
|
b9374d530e | ||
|
34e51e7230 | ||
|
fa595e1a3d | ||
|
78938cf243 | ||
|
4941a7183a | ||
|
3ab9d23a4a | ||
|
5cf18a7b3c | ||
|
5bfeae08f4 | ||
|
a87a1df0fb | ||
|
3bf763688e | ||
|
a5cced85ce | ||
|
9f55ae7603 | ||
|
e7bafd6e5b | ||
|
ca84124fae | ||
|
1f41853341 | ||
|
82a2f7d8b6 | ||
|
2a633b783a | ||
|
b6d1863e77 | ||
|
bbc891341c | ||
|
d14df074ee | ||
|
81f9ee2c50 | ||
|
09d0a0f500 | ||
|
fe2fe3ae04 | ||
|
4859e818fb | ||
|
5d342044a2 | ||
|
d953377ff3 | ||
|
4f2d9fa66d | ||
|
4e44863777 | ||
|
6cabad14d0 | ||
|
31f7c1e8f9 | ||
|
3bc98f855b | ||
|
d2b80ccaf9 | ||
|
552040f390 | ||
|
0d259be01b | ||
|
792101819a | ||
|
3b8317d42e | ||
|
e3da419e5b | ||
|
e251b158b2 | ||
|
c0a5765da5 | ||
|
b32f15bbdb |
Binary file not shown.
Binary file not shown.
BIN
devlib/bin/ppc64le/busybox
Executable file → Normal file
BIN
devlib/bin/ppc64le/busybox
Executable file → Normal file
Binary file not shown.
BIN
devlib/bin/x86/busybox
Executable file
BIN
devlib/bin/x86/busybox
Executable file
Binary file not shown.
Binary file not shown.
@@ -20,6 +20,8 @@ from datetime import timedelta
|
|||||||
|
|
||||||
from devlib.collector import (CollectorBase, CollectorOutput,
|
from devlib.collector import (CollectorBase, CollectorOutput,
|
||||||
CollectorOutputEntry)
|
CollectorOutputEntry)
|
||||||
|
from devlib.target import KernelConfigTristate
|
||||||
|
from devlib.exception import TargetStableError
|
||||||
|
|
||||||
|
|
||||||
class KernelLogEntry(object):
|
class KernelLogEntry(object):
|
||||||
@@ -152,6 +154,10 @@ class DmesgCollector(CollectorBase):
|
|||||||
|
|
||||||
def __init__(self, target, level=LOG_LEVELS[-1], facility='kern'):
|
def __init__(self, target, level=LOG_LEVELS[-1], facility='kern'):
|
||||||
super(DmesgCollector, self).__init__(target)
|
super(DmesgCollector, self).__init__(target)
|
||||||
|
|
||||||
|
if not target.is_rooted:
|
||||||
|
raise TargetStableError('Cannot collect dmesg on non-rooted target')
|
||||||
|
|
||||||
self.output_path = None
|
self.output_path = None
|
||||||
|
|
||||||
if level not in self.LOG_LEVELS:
|
if level not in self.LOG_LEVELS:
|
||||||
@@ -167,6 +173,8 @@ class DmesgCollector(CollectorBase):
|
|||||||
self.basic_dmesg = '--force-prefix' not in \
|
self.basic_dmesg = '--force-prefix' not in \
|
||||||
self.target.execute('dmesg -h', check_exit_code=False)
|
self.target.execute('dmesg -h', check_exit_code=False)
|
||||||
self.facility = facility
|
self.facility = facility
|
||||||
|
self.needs_root = bool(target.config.typed_config.get(
|
||||||
|
'CONFIG_SECURITY_DMESG_RESTRICT', KernelConfigTristate.NO))
|
||||||
self.reset()
|
self.reset()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -178,7 +186,7 @@ class DmesgCollector(CollectorBase):
|
|||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
self.reset()
|
self.reset()
|
||||||
# Empty the dmesg ring buffer
|
# Empty the dmesg ring buffer. This requires root in all cases
|
||||||
self.target.execute('dmesg -c', as_root=True)
|
self.target.execute('dmesg -c', as_root=True)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
@@ -195,7 +203,7 @@ class DmesgCollector(CollectorBase):
|
|||||||
facility=self.facility,
|
facility=self.facility,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.dmesg_out = self.target.execute(cmd)
|
self.dmesg_out = self.target.execute(cmd, as_root=self.needs_root)
|
||||||
|
|
||||||
def set_output(self, output_path):
|
def set_output(self, output_path):
|
||||||
self.output_path = output_path
|
self.output_path = output_path
|
||||||
|
@@ -24,8 +24,9 @@ from devlib.collector import (CollectorBase, CollectorOutput,
|
|||||||
from devlib.utils.misc import ensure_file_directory_exists as _f
|
from devlib.utils.misc import ensure_file_directory_exists as _f
|
||||||
|
|
||||||
|
|
||||||
PERF_COMMAND_TEMPLATE = '{binary} {command} {options} {events} sleep 1000 > {outfile} 2>&1 '
|
PERF_STAT_COMMAND_TEMPLATE = '{binary} {command} {options} {events} {sleep_cmd} > {outfile} 2>&1 '
|
||||||
PERF_REPORT_COMMAND_TEMPLATE= '{binary} report {options} -i {datafile} > {outfile} 2>&1 '
|
PERF_REPORT_COMMAND_TEMPLATE= '{binary} report {options} -i {datafile} > {outfile} 2>&1 '
|
||||||
|
PERF_REPORT_SAMPLE_COMMAND_TEMPLATE= '{binary} report-sample {options} -i {datafile} > {outfile} '
|
||||||
PERF_RECORD_COMMAND_TEMPLATE= '{binary} record {options} {events} -o {outfile}'
|
PERF_RECORD_COMMAND_TEMPLATE= '{binary} record {options} {events} -o {outfile}'
|
||||||
|
|
||||||
PERF_DEFAULT_EVENTS = [
|
PERF_DEFAULT_EVENTS = [
|
||||||
@@ -90,12 +91,16 @@ class PerfCollector(CollectorBase):
|
|||||||
events=None,
|
events=None,
|
||||||
optionstring=None,
|
optionstring=None,
|
||||||
report_options=None,
|
report_options=None,
|
||||||
|
run_report_sample=False,
|
||||||
|
report_sample_options=None,
|
||||||
labels=None,
|
labels=None,
|
||||||
force_install=False):
|
force_install=False):
|
||||||
super(PerfCollector, self).__init__(target)
|
super(PerfCollector, self).__init__(target)
|
||||||
self.force_install = force_install
|
self.force_install = force_install
|
||||||
self.labels = labels
|
self.labels = labels
|
||||||
self.report_options = report_options
|
self.report_options = report_options
|
||||||
|
self.run_report_sample = run_report_sample
|
||||||
|
self.report_sample_options = report_sample_options
|
||||||
self.output_path = None
|
self.output_path = None
|
||||||
|
|
||||||
# Validate parameters
|
# Validate parameters
|
||||||
@@ -121,6 +126,10 @@ class PerfCollector(CollectorBase):
|
|||||||
self.command = command
|
self.command = command
|
||||||
else:
|
else:
|
||||||
raise ValueError('Unsupported perf command, must be stat or record')
|
raise ValueError('Unsupported perf command, must be stat or record')
|
||||||
|
if report_options and (command != 'record'):
|
||||||
|
raise ValueError('report_options specified, but command is not record')
|
||||||
|
if report_sample_options and (command != 'record'):
|
||||||
|
raise ValueError('report_sample_options specified, but command is not record')
|
||||||
|
|
||||||
self.binary = self.target.get_installed(self.perf_type)
|
self.binary = self.target.get_installed(self.perf_type)
|
||||||
if self.force_install or not self.binary:
|
if self.force_install or not self.binary:
|
||||||
@@ -138,17 +147,20 @@ class PerfCollector(CollectorBase):
|
|||||||
self.target.remove(filepath)
|
self.target.remove(filepath)
|
||||||
filepath = self._get_target_file(label, 'rpt')
|
filepath = self._get_target_file(label, 'rpt')
|
||||||
self.target.remove(filepath)
|
self.target.remove(filepath)
|
||||||
|
filepath = self._get_target_file(label, 'rptsamples')
|
||||||
|
self.target.remove(filepath)
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
for command in self.commands:
|
for command in self.commands:
|
||||||
self.target.kick_off(command)
|
self.target.background(command, as_root=self.target.is_rooted)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
self.target.killall(self.perf_type, signal='SIGINT',
|
self.target.killall(self.perf_type, signal='SIGINT',
|
||||||
as_root=self.target.is_rooted)
|
as_root=self.target.is_rooted)
|
||||||
# perf doesn't transmit the signal to its sleep call so handled here:
|
if self.perf_type == "perf" and self.command == "stat":
|
||||||
self.target.killall('sleep', as_root=self.target.is_rooted)
|
# perf doesn't transmit the signal to its sleep call so handled here:
|
||||||
# NB: we hope that no other "important" sleep is on-going
|
self.target.killall('sleep', as_root=self.target.is_rooted)
|
||||||
|
# NB: we hope that no other "important" sleep is on-going
|
||||||
|
|
||||||
def set_output(self, output_path):
|
def set_output(self, output_path):
|
||||||
self.output_path = output_path
|
self.output_path = output_path
|
||||||
@@ -164,6 +176,9 @@ class PerfCollector(CollectorBase):
|
|||||||
self._wait_for_data_file_write(label, self.output_path)
|
self._wait_for_data_file_write(label, self.output_path)
|
||||||
path = self._pull_target_file_to_host(label, 'rpt', self.output_path)
|
path = self._pull_target_file_to_host(label, 'rpt', self.output_path)
|
||||||
output.append(CollectorOutputEntry(path, 'file'))
|
output.append(CollectorOutputEntry(path, 'file'))
|
||||||
|
if self.run_report_sample:
|
||||||
|
report_samples_path = self._pull_target_file_to_host(label, 'rptsamples', self.output_path)
|
||||||
|
output.append(CollectorOutputEntry(report_samples_path, 'file'))
|
||||||
else:
|
else:
|
||||||
path = self._pull_target_file_to_host(label, 'out', self.output_path)
|
path = self._pull_target_file_to_host(label, 'out', self.output_path)
|
||||||
output.append(CollectorOutputEntry(path, 'file'))
|
output.append(CollectorOutputEntry(path, 'file'))
|
||||||
@@ -188,10 +203,12 @@ class PerfCollector(CollectorBase):
|
|||||||
|
|
||||||
def _build_perf_stat_command(self, options, events, label):
|
def _build_perf_stat_command(self, options, events, label):
|
||||||
event_string = ' '.join(['-e {}'.format(e) for e in events])
|
event_string = ' '.join(['-e {}'.format(e) for e in events])
|
||||||
command = PERF_COMMAND_TEMPLATE.format(binary = self.binary,
|
sleep_cmd = 'sleep 1000' if self.perf_type == 'perf' else ''
|
||||||
|
command = PERF_STAT_COMMAND_TEMPLATE.format(binary = self.binary,
|
||||||
command = self.command,
|
command = self.command,
|
||||||
options = options or '',
|
options = options or '',
|
||||||
events = event_string,
|
events = event_string,
|
||||||
|
sleep_cmd = sleep_cmd,
|
||||||
outfile = self._get_target_file(label, 'out'))
|
outfile = self._get_target_file(label, 'out'))
|
||||||
return command
|
return command
|
||||||
|
|
||||||
@@ -202,6 +219,13 @@ class PerfCollector(CollectorBase):
|
|||||||
outfile=self._get_target_file(label, 'rpt'))
|
outfile=self._get_target_file(label, 'rpt'))
|
||||||
return command
|
return command
|
||||||
|
|
||||||
|
def _build_perf_report_sample_command(self, label):
|
||||||
|
command = PERF_REPORT_SAMPLE_COMMAND_TEMPLATE.format(binary=self.binary,
|
||||||
|
options=self.report_sample_options or '',
|
||||||
|
datafile=self._get_target_file(label, 'data'),
|
||||||
|
outfile=self._get_target_file(label, 'rptsamples'))
|
||||||
|
return command
|
||||||
|
|
||||||
def _build_perf_record_command(self, options, label):
|
def _build_perf_record_command(self, options, label):
|
||||||
event_string = ' '.join(['-e {}'.format(e) for e in self.events])
|
event_string = ' '.join(['-e {}'.format(e) for e in self.events])
|
||||||
command = PERF_RECORD_COMMAND_TEMPLATE.format(binary=self.binary,
|
command = PERF_RECORD_COMMAND_TEMPLATE.format(binary=self.binary,
|
||||||
@@ -234,6 +258,9 @@ class PerfCollector(CollectorBase):
|
|||||||
data_file_finished_writing = True
|
data_file_finished_writing = True
|
||||||
report_command = self._build_perf_report_command(self.report_options, label)
|
report_command = self._build_perf_report_command(self.report_options, label)
|
||||||
self.target.execute(report_command)
|
self.target.execute(report_command)
|
||||||
|
if self.run_report_sample:
|
||||||
|
report_sample_command = self._build_perf_report_sample_command(label)
|
||||||
|
self.target.execute(report_sample_command)
|
||||||
|
|
||||||
def _validate_events(self, events):
|
def _validate_events(self, events):
|
||||||
available_events_string = self.target.execute('{} list | {} cat'.format(self.perf_type, self.target.busybox))
|
available_events_string = self.target.execute('{} list | {} cat'.format(self.perf_type, self.target.busybox))
|
||||||
|
@@ -33,8 +33,8 @@ from devlib.utils.misc import InitCheckpoint
|
|||||||
_KILL_TIMEOUT = 3
|
_KILL_TIMEOUT = 3
|
||||||
|
|
||||||
|
|
||||||
def _kill_pgid_cmd(pgid, sig):
|
def _kill_pgid_cmd(pgid, sig, busybox):
|
||||||
return 'kill -{} -{}'.format(sig.value, pgid)
|
return '{} kill -{} -{}'.format(busybox, sig.value, pgid)
|
||||||
|
|
||||||
|
|
||||||
class ConnectionBase(InitCheckpoint):
|
class ConnectionBase(InitCheckpoint):
|
||||||
@@ -105,12 +105,20 @@ class BackgroundCommand(ABC):
|
|||||||
"""
|
"""
|
||||||
self.send_signal(signal.SIGKILL)
|
self.send_signal(signal.SIGKILL)
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def cancel(self, kill_timeout=_KILL_TIMEOUT):
|
def cancel(self, kill_timeout=_KILL_TIMEOUT):
|
||||||
"""
|
"""
|
||||||
Try to gracefully terminate the process by sending ``SIGTERM``, then
|
Try to gracefully terminate the process by sending ``SIGTERM``, then
|
||||||
waiting for ``kill_timeout`` to send ``SIGKILL``.
|
waiting for ``kill_timeout`` to send ``SIGKILL``.
|
||||||
"""
|
"""
|
||||||
|
if self.poll() is None:
|
||||||
|
self._cancel(kill_timeout=kill_timeout)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _cancel(self, kill_timeout):
|
||||||
|
"""
|
||||||
|
Method to override in subclasses to implement :meth:`cancel`.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def wait(self):
|
def wait(self):
|
||||||
@@ -209,11 +217,11 @@ class PopenBackgroundCommand(BackgroundCommand):
|
|||||||
def poll(self):
|
def poll(self):
|
||||||
return self.popen.poll()
|
return self.popen.poll()
|
||||||
|
|
||||||
def cancel(self, kill_timeout=_KILL_TIMEOUT):
|
def _cancel(self, kill_timeout):
|
||||||
popen = self.popen
|
popen = self.popen
|
||||||
os.killpg(os.getpgid(popen.pid), signal.SIGTERM)
|
os.killpg(os.getpgid(popen.pid), signal.SIGTERM)
|
||||||
try:
|
try:
|
||||||
popen.wait(timeout=_KILL_TIMEOUT)
|
popen.wait(timeout=kill_timeout)
|
||||||
except subprocess.TimeoutExpired:
|
except subprocess.TimeoutExpired:
|
||||||
os.killpg(os.getpgid(popen.pid), signal.SIGKILL)
|
os.killpg(os.getpgid(popen.pid), signal.SIGKILL)
|
||||||
|
|
||||||
@@ -250,7 +258,7 @@ class ParamikoBackgroundCommand(BackgroundCommand):
|
|||||||
return
|
return
|
||||||
# Use -PGID to target a process group rather than just the process
|
# Use -PGID to target a process group rather than just the process
|
||||||
# itself
|
# itself
|
||||||
cmd = _kill_pgid_cmd(self.pid, sig)
|
cmd = _kill_pgid_cmd(self.pid, sig, self.conn.busybox)
|
||||||
self.conn.execute(cmd, as_root=self.as_root)
|
self.conn.execute(cmd, as_root=self.as_root)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -266,7 +274,7 @@ class ParamikoBackgroundCommand(BackgroundCommand):
|
|||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def cancel(self, kill_timeout=_KILL_TIMEOUT):
|
def _cancel(self, kill_timeout):
|
||||||
self.send_signal(signal.SIGTERM)
|
self.send_signal(signal.SIGTERM)
|
||||||
# Check if the command terminated quickly
|
# Check if the command terminated quickly
|
||||||
time.sleep(10e-3)
|
time.sleep(10e-3)
|
||||||
@@ -314,7 +322,7 @@ class AdbBackgroundCommand(BackgroundCommand):
|
|||||||
|
|
||||||
def send_signal(self, sig):
|
def send_signal(self, sig):
|
||||||
self.conn.execute(
|
self.conn.execute(
|
||||||
_kill_pgid_cmd(self.pid, sig),
|
_kill_pgid_cmd(self.pid, sig, self.conn.busybox),
|
||||||
as_root=self.as_root,
|
as_root=self.as_root,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -340,10 +348,10 @@ class AdbBackgroundCommand(BackgroundCommand):
|
|||||||
def poll(self):
|
def poll(self):
|
||||||
return self.adb_popen.poll()
|
return self.adb_popen.poll()
|
||||||
|
|
||||||
def cancel(self, kill_timeout=_KILL_TIMEOUT):
|
def _cancel(self, kill_timeout):
|
||||||
self.send_signal(signal.SIGTERM)
|
self.send_signal(signal.SIGTERM)
|
||||||
try:
|
try:
|
||||||
self.adb_popen.wait(timeout=_KILL_TIMEOUT)
|
self.adb_popen.wait(timeout=kill_timeout)
|
||||||
except subprocess.TimeoutExpired:
|
except subprocess.TimeoutExpired:
|
||||||
self.send_signal(signal.SIGKILL)
|
self.send_signal(signal.SIGKILL)
|
||||||
self.adb_popen.kill()
|
self.adb_popen.kill()
|
||||||
@@ -436,7 +444,7 @@ class TransferManagerBase(ABC):
|
|||||||
self.transfer_started.clear()
|
self.transfer_started.clear()
|
||||||
self.transfer_completed.clear()
|
self.transfer_completed.clear()
|
||||||
self.transfer_aborted.clear()
|
self.transfer_aborted.clear()
|
||||||
|
|
||||||
def _monitor(self):
|
def _monitor(self):
|
||||||
start_t = monotonic()
|
start_t = monotonic()
|
||||||
self.transfer_completed.wait(self.start_transfer_poll_delay)
|
self.transfer_completed.wait(self.start_transfer_poll_delay)
|
||||||
@@ -458,6 +466,7 @@ class PopenTransferManager(TransferManagerBase):
|
|||||||
if self.transfer:
|
if self.transfer:
|
||||||
self.transfer.cancel()
|
self.transfer.cancel()
|
||||||
self.transfer = None
|
self.transfer = None
|
||||||
|
self.last_sample = None
|
||||||
|
|
||||||
def isactive(self):
|
def isactive(self):
|
||||||
size_fn = self._push_dest_size if self.direction == 'push' else self._pull_dest_size
|
size_fn = self._push_dest_size if self.direction == 'push' else self._pull_dest_size
|
||||||
@@ -469,8 +478,9 @@ class PopenTransferManager(TransferManagerBase):
|
|||||||
|
|
||||||
def set_transfer_and_wait(self, popen_bg_cmd):
|
def set_transfer_and_wait(self, popen_bg_cmd):
|
||||||
self.transfer = popen_bg_cmd
|
self.transfer = popen_bg_cmd
|
||||||
|
self.last_sample = None
|
||||||
ret = self.transfer.wait()
|
ret = self.transfer.wait()
|
||||||
|
|
||||||
if ret and not self.transfer_aborted.is_set():
|
if ret and not self.transfer_aborted.is_set():
|
||||||
raise subprocess.CalledProcessError(ret, self.transfer.popen.args)
|
raise subprocess.CalledProcessError(ret, self.transfer.popen.args)
|
||||||
elif self.transfer_aborted.is_set():
|
elif self.transfer_aborted.is_set():
|
||||||
@@ -520,4 +530,4 @@ class SSHTransferManager(TransferManagerBase):
|
|||||||
self.to_transfer = args[1]
|
self.to_transfer = args[1]
|
||||||
elif len(args) == 2: # For SFTPClient callbacks
|
elif len(args) == 2: # For SFTPClient callbacks
|
||||||
self.transferred = args[0]
|
self.transferred = args[0]
|
||||||
self.to_transfer = args[1]
|
self.to_transfer = args[1]
|
||||||
|
@@ -102,7 +102,7 @@ class LocalConnection(ConnectionBase):
|
|||||||
if self.unrooted:
|
if self.unrooted:
|
||||||
raise TargetStableError('unrooted')
|
raise TargetStableError('unrooted')
|
||||||
password = self._get_password()
|
password = self._get_password()
|
||||||
command = "echo {} | sudo -p ' ' -S -- sh -c {}".format(quote(password), quote(command))
|
command = "echo {} | sudo -k -p ' ' -S -- sh -c {}".format(quote(password), quote(command))
|
||||||
ignore = None if check_exit_code else 'all'
|
ignore = None if check_exit_code else 'all'
|
||||||
try:
|
try:
|
||||||
stdout, stderr = check_output(command, shell=True, timeout=timeout, ignore=ignore)
|
stdout, stderr = check_output(command, shell=True, timeout=timeout, ignore=ignore)
|
||||||
@@ -127,7 +127,7 @@ class LocalConnection(ConnectionBase):
|
|||||||
password = self._get_password()
|
password = self._get_password()
|
||||||
# The sudo prompt will add a space on stderr, but we cannot filter
|
# The sudo prompt will add a space on stderr, but we cannot filter
|
||||||
# it out here
|
# it out here
|
||||||
command = "echo {} | sudo -p ' ' -S -- sh -c {}".format(quote(password), quote(command))
|
command = "echo {} | sudo -k -p ' ' -S -- sh -c {}".format(quote(password), quote(command))
|
||||||
|
|
||||||
# Make sure to get a new PGID so PopenBackgroundCommand() can kill
|
# Make sure to get a new PGID so PopenBackgroundCommand() can kill
|
||||||
# all sub processes that could be started without troubles.
|
# all sub processes that could be started without troubles.
|
||||||
|
@@ -301,7 +301,7 @@ class CpufreqModule(Module):
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
|
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
|
||||||
|
|
||||||
def get_frequency(self, cpu):
|
def get_frequency(self, cpu, cpuinfo=False):
|
||||||
"""
|
"""
|
||||||
Returns the current frequency currently set for the specified CPU.
|
Returns the current frequency currently set for the specified CPU.
|
||||||
|
|
||||||
@@ -309,12 +309,18 @@ class CpufreqModule(Module):
|
|||||||
try to read the current frequency and the following exception will be
|
try to read the current frequency and the following exception will be
|
||||||
raised ::
|
raised ::
|
||||||
|
|
||||||
|
:param cpuinfo: Read the value in the cpuinfo interface that reflects
|
||||||
|
the actual running frequency.
|
||||||
|
|
||||||
:raises: TargetStableError if for some reason the frequency could not be read.
|
:raises: TargetStableError if for some reason the frequency could not be read.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if isinstance(cpu, int):
|
if isinstance(cpu, int):
|
||||||
cpu = 'cpu{}'.format(cpu)
|
cpu = 'cpu{}'.format(cpu)
|
||||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_cur_freq'.format(cpu)
|
|
||||||
|
sysfile = '/sys/devices/system/cpu/{}/cpufreq/{}'.format(
|
||||||
|
cpu,
|
||||||
|
'cpuinfo_cur_freq' if cpuinfo else 'scaling_cur_freq')
|
||||||
return self.target.read_int(sysfile)
|
return self.target.read_int(sysfile)
|
||||||
|
|
||||||
def set_frequency(self, cpu, frequency, exact=True):
|
def set_frequency(self, cpu, frequency, exact=True):
|
||||||
@@ -350,6 +356,10 @@ class CpufreqModule(Module):
|
|||||||
raise TargetStableError('Can\'t set {} frequency; governor must be "userspace"'.format(cpu))
|
raise TargetStableError('Can\'t set {} frequency; governor must be "userspace"'.format(cpu))
|
||||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_setspeed'.format(cpu)
|
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_setspeed'.format(cpu)
|
||||||
self.target.write_value(sysfile, value, verify=False)
|
self.target.write_value(sysfile, value, verify=False)
|
||||||
|
cpuinfo = self.get_frequency(cpu, cpuinfo=True)
|
||||||
|
if cpuinfo != value:
|
||||||
|
self.logger.warning(
|
||||||
|
'The cpufreq value has not been applied properly cpuinfo={} request={}'.format(cpuinfo, value))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
|
raise ValueError('Frequency must be an integer; got: "{}"'.format(frequency))
|
||||||
|
|
||||||
|
@@ -14,6 +14,7 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
from devlib.module import Module
|
from devlib.module import Module
|
||||||
|
from devlib.exception import TargetTransientError
|
||||||
|
|
||||||
|
|
||||||
class HotplugModule(Module):
|
class HotplugModule(Module):
|
||||||
@@ -39,9 +40,13 @@ class HotplugModule(Module):
|
|||||||
return [cpu for cpu in range(self.target.number_of_cpus)
|
return [cpu for cpu in range(self.target.number_of_cpus)
|
||||||
if self.target.file_exists(self._cpu_path(self.target, cpu))]
|
if self.target.file_exists(self._cpu_path(self.target, cpu))]
|
||||||
|
|
||||||
def online_all(self):
|
def online_all(self, verify=True):
|
||||||
self.target._execute_util('hotplug_online_all', # pylint: disable=protected-access
|
self.target._execute_util('hotplug_online_all', # pylint: disable=protected-access
|
||||||
as_root=self.target.is_rooted)
|
as_root=self.target.is_rooted)
|
||||||
|
if verify:
|
||||||
|
offline = set(self.target.list_offline_cpus())
|
||||||
|
if offline:
|
||||||
|
raise TargetTransientError('The following CPUs failed to come back online: {}'.format(offline))
|
||||||
|
|
||||||
def online(self, *args):
|
def online(self, *args):
|
||||||
for cpu in args:
|
for cpu in args:
|
||||||
@@ -57,3 +62,23 @@ class HotplugModule(Module):
|
|||||||
return
|
return
|
||||||
value = 1 if online else 0
|
value = 1 if online else 0
|
||||||
self.target.write_value(path, value)
|
self.target.write_value(path, value)
|
||||||
|
|
||||||
|
def _get_path(self, path):
|
||||||
|
return self.target.path.join(self.base_path,
|
||||||
|
path)
|
||||||
|
|
||||||
|
def fail(self, cpu, state):
|
||||||
|
path = self._get_path('cpu{}/hotplug/fail'.format(cpu))
|
||||||
|
return self.target.write_value(path, state)
|
||||||
|
|
||||||
|
def get_state(self, cpu):
|
||||||
|
path = self._get_path('cpu{}/hotplug/state'.format(cpu))
|
||||||
|
return self.target.read_value(path)
|
||||||
|
|
||||||
|
def get_states(self):
|
||||||
|
path = self._get_path('hotplug/states')
|
||||||
|
states_string = self.target.read_value(path)
|
||||||
|
return dict(
|
||||||
|
map(str.strip, string.split(':', 1))
|
||||||
|
for string in states_string.strip().splitlines()
|
||||||
|
)
|
||||||
|
@@ -15,14 +15,13 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
from enum import Enum
|
|
||||||
|
|
||||||
from past.builtins import basestring
|
from past.builtins import basestring
|
||||||
|
|
||||||
from devlib.module import Module
|
from devlib.module import Module
|
||||||
from devlib.utils.misc import memoized
|
from devlib.utils.misc import memoized
|
||||||
from devlib.utils.types import boolean
|
from devlib.utils.types import boolean
|
||||||
|
from devlib.exception import TargetStableError
|
||||||
|
|
||||||
class SchedProcFSNode(object):
|
class SchedProcFSNode(object):
|
||||||
"""
|
"""
|
||||||
@@ -147,43 +146,44 @@ class SchedProcFSNode(object):
|
|||||||
self._dyn_attrs[key] = self._build_node(key, nodes[key])
|
self._dyn_attrs[key] = self._build_node(key, nodes[key])
|
||||||
|
|
||||||
|
|
||||||
class DocInt(int):
|
class _SchedDomainFlag:
|
||||||
|
|
||||||
# See https://stackoverflow.com/a/50473952/5096023
|
|
||||||
def __new__(cls, value, doc):
|
|
||||||
new = super(DocInt, cls).__new__(cls, value)
|
|
||||||
new.__doc__ = doc
|
|
||||||
return new
|
|
||||||
|
|
||||||
|
|
||||||
class SchedDomainFlag(DocInt, Enum):
|
|
||||||
"""
|
"""
|
||||||
Represents a sched domain flag
|
Backward-compatible emulation of the former :class:`enum.Enum` that will
|
||||||
|
work on recent kernels with dynamic sched domain flags name and no value
|
||||||
|
exposed.
|
||||||
"""
|
"""
|
||||||
# pylint: disable=bad-whitespace
|
|
||||||
# Domain flags obtained from include/linux/sched/topology.h on v4.17
|
|
||||||
# https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux/+/v4.17/include/linux/sched/topology.h#20
|
|
||||||
SD_LOAD_BALANCE = 0x0001, "Do load balancing on this domain"
|
|
||||||
SD_BALANCE_NEWIDLE = 0x0002, "Balance when about to become idle"
|
|
||||||
SD_BALANCE_EXEC = 0x0004, "Balance on exec"
|
|
||||||
SD_BALANCE_FORK = 0x0008, "Balance on fork, clone"
|
|
||||||
SD_BALANCE_WAKE = 0x0010, "Balance on wakeup"
|
|
||||||
SD_WAKE_AFFINE = 0x0020, "Wake task to waking CPU"
|
|
||||||
SD_ASYM_CPUCAPACITY = 0x0040, "Groups have different max cpu capacities"
|
|
||||||
SD_SHARE_CPUCAPACITY = 0x0080, "Domain members share cpu capacity"
|
|
||||||
SD_SHARE_POWERDOMAIN = 0x0100, "Domain members share power domain"
|
|
||||||
SD_SHARE_PKG_RESOURCES = 0x0200, "Domain members share cpu pkg resources"
|
|
||||||
SD_SERIALIZE = 0x0400, "Only a single load balancing instance"
|
|
||||||
SD_ASYM_PACKING = 0x0800, "Place busy groups earlier in the domain"
|
|
||||||
SD_PREFER_SIBLING = 0x1000, "Prefer to place tasks in a sibling domain"
|
|
||||||
SD_OVERLAP = 0x2000, "Sched_domains of this level overlap"
|
|
||||||
SD_NUMA = 0x4000, "Cross-node balancing"
|
|
||||||
# Only defined in Android
|
|
||||||
# https://android.googlesource.com/kernel/common/+/android-4.14/include/linux/sched/topology.h#29
|
|
||||||
SD_SHARE_CAP_STATES = 0x8000, "(Android only) Domain members share capacity state"
|
|
||||||
|
|
||||||
@classmethod
|
_INSTANCES = {}
|
||||||
def check_version(cls, target, logger):
|
"""
|
||||||
|
Dictionary storing the instances so that they can be compared with ``is``
|
||||||
|
operator.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __new__(cls, name, value, doc=None):
|
||||||
|
self = super().__new__(cls)
|
||||||
|
self.name = name
|
||||||
|
self._value = value
|
||||||
|
self.__doc__ = doc
|
||||||
|
return cls._INSTANCES.setdefault(self, self)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
# We *have to* check for "value" as well, otherwise it will be
|
||||||
|
# impossible to keep in the same set 2 instances with differing values.
|
||||||
|
return self.name == other.name and self._value == other._value
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash((self.name, self._value))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def value(self):
|
||||||
|
value = self._value
|
||||||
|
if value is None:
|
||||||
|
raise AttributeError('The kernel does not expose the sched domain flag values')
|
||||||
|
else:
|
||||||
|
return value
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def check_version(target, logger):
|
||||||
"""
|
"""
|
||||||
Check the target and see if its kernel version matches our view of the world
|
Check the target and see if its kernel version matches our view of the world
|
||||||
"""
|
"""
|
||||||
@@ -197,38 +197,139 @@ class SchedDomainFlag(DocInt, Enum):
|
|||||||
"but target is running v{}".format(ref_parts, parts)
|
"but target is running v{}".format(ref_parts, parts)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return self.name
|
return self.name
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<SchedDomainFlag: {}>'.format(self.name)
|
||||||
|
|
||||||
|
|
||||||
|
class _SchedDomainFlagMeta(type):
|
||||||
|
"""
|
||||||
|
Metaclass of :class:`SchedDomainFlag`.
|
||||||
|
|
||||||
|
Provides some level of emulation of :class:`enum.Enum` behavior for
|
||||||
|
backward compatibility.
|
||||||
|
"""
|
||||||
|
@property
|
||||||
|
def _flags(self):
|
||||||
|
return [
|
||||||
|
attr
|
||||||
|
for name, attr in self.__dict__.items()
|
||||||
|
if name.startswith('SD_')
|
||||||
|
]
|
||||||
|
|
||||||
|
def __getitem__(self, i):
|
||||||
|
return self._flags[i]
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self._flags)
|
||||||
|
|
||||||
|
# These would be provided by collections.abc.Sequence, but using it on a
|
||||||
|
# metaclass seems to have issues around __init_subclass__
|
||||||
|
def __iter__(self):
|
||||||
|
return iter(self._flags)
|
||||||
|
|
||||||
|
def __reversed__(self):
|
||||||
|
return reversed(self._flags)
|
||||||
|
|
||||||
|
def __contains__(self, x):
|
||||||
|
return x in self._flags
|
||||||
|
|
||||||
|
@property
|
||||||
|
def __members__(self):
|
||||||
|
return {flag.name: flag for flag in self._flags}
|
||||||
|
|
||||||
|
|
||||||
|
class SchedDomainFlag(_SchedDomainFlag, metaclass=_SchedDomainFlagMeta):
|
||||||
|
"""
|
||||||
|
Represents a sched domain flag.
|
||||||
|
|
||||||
|
.. note:: ``SD_*`` class attributes are deprecated, new code should never
|
||||||
|
test a given flag against one of these attributes with ``is`` (.e.g ``x
|
||||||
|
is SchedDomainFlag.SD_LOAD_BALANCE``. This is because the
|
||||||
|
``SD_LOAD_BALANCE`` flag exists in two flavors that are not equal: one
|
||||||
|
with a value (the class attribute) and one without (dynamically created
|
||||||
|
when parsing flags for new kernels). Old code ran on old kernels should
|
||||||
|
work fine though.
|
||||||
|
"""
|
||||||
|
# pylint: disable=bad-whitespace
|
||||||
|
# Domain flags obtained from include/linux/sched/topology.h on v4.17
|
||||||
|
# https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux/+/v4.17/include/linux/sched/topology.h#20
|
||||||
|
SD_LOAD_BALANCE = _SchedDomainFlag("SD_LOAD_BALANCE", 0x0001, "Do load balancing on this domain")
|
||||||
|
SD_BALANCE_NEWIDLE = _SchedDomainFlag("SD_BALANCE_NEWIDLE", 0x0002, "Balance when about to become idle")
|
||||||
|
SD_BALANCE_EXEC = _SchedDomainFlag("SD_BALANCE_EXEC", 0x0004, "Balance on exec")
|
||||||
|
SD_BALANCE_FORK = _SchedDomainFlag("SD_BALANCE_FORK", 0x0008, "Balance on fork, clone")
|
||||||
|
SD_BALANCE_WAKE = _SchedDomainFlag("SD_BALANCE_WAKE", 0x0010, "Balance on wakeup")
|
||||||
|
SD_WAKE_AFFINE = _SchedDomainFlag("SD_WAKE_AFFINE", 0x0020, "Wake task to waking CPU")
|
||||||
|
SD_ASYM_CPUCAPACITY = _SchedDomainFlag("SD_ASYM_CPUCAPACITY", 0x0040, "Groups have different max cpu capacities")
|
||||||
|
SD_SHARE_CPUCAPACITY = _SchedDomainFlag("SD_SHARE_CPUCAPACITY", 0x0080, "Domain members share cpu capacity")
|
||||||
|
SD_SHARE_POWERDOMAIN = _SchedDomainFlag("SD_SHARE_POWERDOMAIN", 0x0100, "Domain members share power domain")
|
||||||
|
SD_SHARE_PKG_RESOURCES = _SchedDomainFlag("SD_SHARE_PKG_RESOURCES", 0x0200, "Domain members share cpu pkg resources")
|
||||||
|
SD_SERIALIZE = _SchedDomainFlag("SD_SERIALIZE", 0x0400, "Only a single load balancing instance")
|
||||||
|
SD_ASYM_PACKING = _SchedDomainFlag("SD_ASYM_PACKING", 0x0800, "Place busy groups earlier in the domain")
|
||||||
|
SD_PREFER_SIBLING = _SchedDomainFlag("SD_PREFER_SIBLING", 0x1000, "Prefer to place tasks in a sibling domain")
|
||||||
|
SD_OVERLAP = _SchedDomainFlag("SD_OVERLAP", 0x2000, "Sched_domains of this level overlap")
|
||||||
|
SD_NUMA = _SchedDomainFlag("SD_NUMA", 0x4000, "Cross-node balancing")
|
||||||
|
# Only defined in Android
|
||||||
|
# https://android.googlesource.com/kernel/common/+/android-4.14/include/linux/sched/topology.h#29
|
||||||
|
SD_SHARE_CAP_STATES = _SchedDomainFlag("SD_SHARE_CAP_STATES", 0x8000, "(Android only) Domain members share capacity state")
|
||||||
|
|
||||||
|
|
||||||
class SchedDomain(SchedProcFSNode):
|
class SchedDomain(SchedProcFSNode):
|
||||||
"""
|
"""
|
||||||
Represents a sched domain as seen through procfs
|
Represents a sched domain as seen through procfs
|
||||||
"""
|
"""
|
||||||
def __init__(self, nodes):
|
def __init__(self, nodes):
|
||||||
super(SchedDomain, self).__init__(nodes)
|
super().__init__(nodes)
|
||||||
|
|
||||||
obj_flags = set()
|
flags = self.flags
|
||||||
for flag in list(SchedDomainFlag):
|
# Recent kernels now have a space-separated list of flags instead of a
|
||||||
if self.flags & flag.value == flag.value:
|
# packed bitfield
|
||||||
obj_flags.add(flag)
|
if isinstance(flags, str):
|
||||||
|
flags = {
|
||||||
|
_SchedDomainFlag(name=name, value=None)
|
||||||
|
for name in flags.split()
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
def has_flag(flags, flag):
|
||||||
|
return flags & flag.value == flag.value
|
||||||
|
|
||||||
self.flags = obj_flags
|
flags = {
|
||||||
|
flag
|
||||||
|
for flag in SchedDomainFlag
|
||||||
|
if has_flag(flags, flag)
|
||||||
|
}
|
||||||
|
|
||||||
|
self.flags = flags
|
||||||
|
|
||||||
|
def _select_path(target, paths, name):
|
||||||
|
for p in paths:
|
||||||
|
if target.file_exists(p):
|
||||||
|
return p
|
||||||
|
|
||||||
|
raise TargetStableError('No {} found. Tried: {}'.format(name, ', '.join(paths)))
|
||||||
|
|
||||||
class SchedProcFSData(SchedProcFSNode):
|
class SchedProcFSData(SchedProcFSNode):
|
||||||
"""
|
"""
|
||||||
Root class for creating & storing SchedProcFSNode instances
|
Root class for creating & storing SchedProcFSNode instances
|
||||||
"""
|
"""
|
||||||
_read_depth = 6
|
_read_depth = 6
|
||||||
sched_domain_root = '/proc/sys/kernel/sched_domain'
|
|
||||||
|
@classmethod
|
||||||
|
def get_data_root(cls, target):
|
||||||
|
# Location differs depending on kernel version
|
||||||
|
paths = ['/sys/kernel/debug/sched/domains/', '/proc/sys/kernel/sched_domain']
|
||||||
|
return _select_path(target, paths, "sched_domain debug directory")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def available(target):
|
def available(target):
|
||||||
path = SchedProcFSData.sched_domain_root
|
try:
|
||||||
cpus = target.list_directory(path) if target.file_exists(path) else []
|
path = SchedProcFSData.get_data_root(target)
|
||||||
|
except TargetStableError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
cpus = target.list_directory(path)
|
||||||
if not cpus:
|
if not cpus:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -242,7 +343,7 @@ class SchedProcFSData(SchedProcFSNode):
|
|||||||
|
|
||||||
def __init__(self, target, path=None):
|
def __init__(self, target, path=None):
|
||||||
if path is None:
|
if path is None:
|
||||||
path = self.sched_domain_root
|
path = SchedProcFSData.get_data_root(target)
|
||||||
|
|
||||||
procfs = target.read_tree_values(path, depth=self._read_depth)
|
procfs = target.read_tree_values(path, depth=self._read_depth)
|
||||||
super(SchedProcFSData, self).__init__(procfs)
|
super(SchedProcFSData, self).__init__(procfs)
|
||||||
@@ -275,6 +376,15 @@ class SchedModule(Module):
|
|||||||
|
|
||||||
return schedproc or debug or dmips
|
return schedproc or debug or dmips
|
||||||
|
|
||||||
|
def __init__(self, target):
|
||||||
|
super().__init__(target)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_sched_features_path(cls, target):
|
||||||
|
# Location differs depending on kernel version
|
||||||
|
paths = ['/sys/kernel/debug/sched/features', '/sys/kernel/debug/sched_features']
|
||||||
|
return _select_path(target, paths, "sched_features file")
|
||||||
|
|
||||||
def get_kernel_attributes(self, matching=None, check_exit_code=True):
|
def get_kernel_attributes(self, matching=None, check_exit_code=True):
|
||||||
"""
|
"""
|
||||||
Get the value of scheduler attributes.
|
Get the value of scheduler attributes.
|
||||||
@@ -331,12 +441,12 @@ class SchedModule(Module):
|
|||||||
def target_has_debug(cls, target):
|
def target_has_debug(cls, target):
|
||||||
if target.config.get('SCHED_DEBUG') != 'y':
|
if target.config.get('SCHED_DEBUG') != 'y':
|
||||||
return False
|
return False
|
||||||
return target.file_exists('/sys/kernel/debug/sched_features')
|
|
||||||
|
|
||||||
@property
|
try:
|
||||||
@memoized
|
cls.get_sched_features_path(target)
|
||||||
def has_debug(self):
|
return True
|
||||||
return self.target_has_debug(self.target)
|
except TargetStableError:
|
||||||
|
return False
|
||||||
|
|
||||||
def get_features(self):
|
def get_features(self):
|
||||||
"""
|
"""
|
||||||
@@ -344,9 +454,7 @@ class SchedModule(Module):
|
|||||||
|
|
||||||
:returns: a dictionary of features and their "is enabled" status
|
:returns: a dictionary of features and their "is enabled" status
|
||||||
"""
|
"""
|
||||||
if not self.has_debug:
|
feats = self.target.read_value(self.get_sched_features_path(self.target))
|
||||||
raise RuntimeError("sched_features not available")
|
|
||||||
feats = self.target.read_value('/sys/kernel/debug/sched_features')
|
|
||||||
features = {}
|
features = {}
|
||||||
for feat in feats.split():
|
for feat in feats.split():
|
||||||
value = True
|
value = True
|
||||||
@@ -366,13 +474,11 @@ class SchedModule(Module):
|
|||||||
:raise ValueError: if the specified enable value is not bool
|
:raise ValueError: if the specified enable value is not bool
|
||||||
:raise RuntimeError: if the specified feature cannot be set
|
:raise RuntimeError: if the specified feature cannot be set
|
||||||
"""
|
"""
|
||||||
if not self.has_debug:
|
|
||||||
raise RuntimeError("sched_features not available")
|
|
||||||
feature = feature.upper()
|
feature = feature.upper()
|
||||||
feat_value = feature
|
feat_value = feature
|
||||||
if not boolean(enable):
|
if not boolean(enable):
|
||||||
feat_value = 'NO_' + feat_value
|
feat_value = 'NO_' + feat_value
|
||||||
self.target.write_value('/sys/kernel/debug/sched_features',
|
self.target.write_value(self.get_sched_features_path(self.target),
|
||||||
feat_value, verify=False)
|
feat_value, verify=False)
|
||||||
if not verify:
|
if not verify:
|
||||||
return
|
return
|
||||||
@@ -384,10 +490,10 @@ class SchedModule(Module):
|
|||||||
|
|
||||||
def get_cpu_sd_info(self, cpu):
|
def get_cpu_sd_info(self, cpu):
|
||||||
"""
|
"""
|
||||||
:returns: An object view of /proc/sys/kernel/sched_domain/cpu<cpu>/*
|
:returns: An object view of the sched_domain debug directory of 'cpu'
|
||||||
"""
|
"""
|
||||||
path = self.target.path.join(
|
path = self.target.path.join(
|
||||||
SchedProcFSData.sched_domain_root,
|
SchedProcFSData.get_data_root(self.target),
|
||||||
"cpu{}".format(cpu)
|
"cpu{}".format(cpu)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -395,7 +501,7 @@ class SchedModule(Module):
|
|||||||
|
|
||||||
def get_sd_info(self):
|
def get_sd_info(self):
|
||||||
"""
|
"""
|
||||||
:returns: An object view of /proc/sys/kernel/sched_domain/*
|
:returns: An object view of the entire sched_domain debug directory
|
||||||
"""
|
"""
|
||||||
return SchedProcFSData(self.target)
|
return SchedProcFSData(self.target)
|
||||||
|
|
||||||
|
106
devlib/target.py
106
devlib/target.py
@@ -76,6 +76,48 @@ GOOGLE_DNS_SERVER_ADDRESS = '8.8.8.8'
|
|||||||
|
|
||||||
installed_package_info = namedtuple('installed_package_info', 'apk_path package')
|
installed_package_info = namedtuple('installed_package_info', 'apk_path package')
|
||||||
|
|
||||||
|
|
||||||
|
def call_conn(f):
|
||||||
|
"""
|
||||||
|
Decorator to be used on all :class:`devlib.target.Target` methods that
|
||||||
|
directly use a method of ``self.conn``.
|
||||||
|
|
||||||
|
This ensures that if a call to any of the decorated method occurs while
|
||||||
|
executing, a new connection will be created in order to avoid possible
|
||||||
|
deadlocks. This can happen if e.g. a target's method is called from
|
||||||
|
``__del__``, which could be executed by the garbage collector, interrupting
|
||||||
|
another call to a method of the connection instance.
|
||||||
|
|
||||||
|
.. note:: This decorator could be applied directly to all methods with a
|
||||||
|
metaclass or ``__init_subclass__`` but it could create issues when
|
||||||
|
passing target methods as callbacks to connections' methods.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(self, *args, **kwargs):
|
||||||
|
reentered = self.conn.is_in_use
|
||||||
|
disconnect = False
|
||||||
|
try:
|
||||||
|
# If the connection was already in use we need to use a different
|
||||||
|
# instance to avoid reentrancy deadlocks. This can happen even in
|
||||||
|
# single threaded code via __del__ implementations that can be
|
||||||
|
# called at any point.
|
||||||
|
if reentered:
|
||||||
|
# Shallow copy so we can use another connection instance
|
||||||
|
_self = copy.copy(self)
|
||||||
|
_self.conn = _self.get_connection()
|
||||||
|
assert self.conn is not _self.conn
|
||||||
|
disconnect = True
|
||||||
|
else:
|
||||||
|
_self = self
|
||||||
|
return f(_self, *args, **kwargs)
|
||||||
|
finally:
|
||||||
|
if disconnect:
|
||||||
|
_self.disconnect()
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
class Target(object):
|
class Target(object):
|
||||||
|
|
||||||
path = None
|
path = None
|
||||||
@@ -135,6 +177,14 @@ class Target(object):
|
|||||||
def kernel_version(self):
|
def kernel_version(self):
|
||||||
return KernelVersion(self.execute('{} uname -r -v'.format(quote(self.busybox))).strip())
|
return KernelVersion(self.execute('{} uname -r -v'.format(quote(self.busybox))).strip())
|
||||||
|
|
||||||
|
@property
|
||||||
|
def hostid(self):
|
||||||
|
return int(self.execute('{} hostid'.format(self.busybox)).strip(), 16)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def hostname(self):
|
||||||
|
return self.execute('{} hostname'.format(self.busybox)).strip()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def os_version(self): # pylint: disable=no-self-use
|
def os_version(self): # pylint: disable=no-self-use
|
||||||
return {}
|
return {}
|
||||||
@@ -286,6 +336,14 @@ class Target(object):
|
|||||||
if connect:
|
if connect:
|
||||||
self.connect()
|
self.connect()
|
||||||
|
|
||||||
|
def __copy__(self):
|
||||||
|
new = self.__class__.__new__(self.__class__)
|
||||||
|
new.__dict__ = self.__dict__.copy()
|
||||||
|
# Avoid sharing the connection instance with the original target, so
|
||||||
|
# that each target can live its own independent life
|
||||||
|
del new.__dict__['_conn']
|
||||||
|
return new
|
||||||
|
|
||||||
# connection and initialization
|
# connection and initialization
|
||||||
|
|
||||||
def connect(self, timeout=None, check_boot_completed=True):
|
def connect(self, timeout=None, check_boot_completed=True):
|
||||||
@@ -425,6 +483,7 @@ class Target(object):
|
|||||||
dst_mkdir(dest)
|
dst_mkdir(dest)
|
||||||
|
|
||||||
|
|
||||||
|
@call_conn
|
||||||
def push(self, source, dest, as_root=False, timeout=None, globbing=False): # pylint: disable=arguments-differ
|
def push(self, source, dest, as_root=False, timeout=None, globbing=False): # pylint: disable=arguments-differ
|
||||||
sources = glob.glob(source) if globbing else [source]
|
sources = glob.glob(source) if globbing else [source]
|
||||||
self._prepare_xfer('push', sources, dest)
|
self._prepare_xfer('push', sources, dest)
|
||||||
@@ -480,6 +539,7 @@ class Target(object):
|
|||||||
|
|
||||||
return paths
|
return paths
|
||||||
|
|
||||||
|
@call_conn
|
||||||
def pull(self, source, dest, as_root=False, timeout=None, globbing=False): # pylint: disable=arguments-differ
|
def pull(self, source, dest, as_root=False, timeout=None, globbing=False): # pylint: disable=arguments-differ
|
||||||
if globbing:
|
if globbing:
|
||||||
sources = self._expand_glob(source, as_root=as_root)
|
sources = self._expand_glob(source, as_root=as_root)
|
||||||
@@ -536,10 +596,7 @@ class Target(object):
|
|||||||
|
|
||||||
# execution
|
# execution
|
||||||
|
|
||||||
def execute(self, command, timeout=None, check_exit_code=True,
|
def _prepare_cmd(self, command, force_locale):
|
||||||
as_root=False, strip_colors=True, will_succeed=False,
|
|
||||||
force_locale='C'):
|
|
||||||
|
|
||||||
# Force the locale if necessary for more predictable output
|
# Force the locale if necessary for more predictable output
|
||||||
if force_locale:
|
if force_locale:
|
||||||
# Use an explicit export so that the command is allowed to be any
|
# Use an explicit export so that the command is allowed to be any
|
||||||
@@ -550,12 +607,28 @@ class Target(object):
|
|||||||
if self.executables_directory:
|
if self.executables_directory:
|
||||||
command = "export PATH={}:$PATH && {}".format(quote(self.executables_directory), command)
|
command = "export PATH={}:$PATH && {}".format(quote(self.executables_directory), command)
|
||||||
|
|
||||||
|
return command
|
||||||
|
|
||||||
|
@call_conn
|
||||||
|
def execute(self, command, timeout=None, check_exit_code=True,
|
||||||
|
as_root=False, strip_colors=True, will_succeed=False,
|
||||||
|
force_locale='C'):
|
||||||
|
|
||||||
|
command = self._prepare_cmd(command, force_locale)
|
||||||
return self.conn.execute(command, timeout=timeout,
|
return self.conn.execute(command, timeout=timeout,
|
||||||
check_exit_code=check_exit_code, as_root=as_root,
|
check_exit_code=check_exit_code, as_root=as_root,
|
||||||
strip_colors=strip_colors, will_succeed=will_succeed)
|
strip_colors=strip_colors, will_succeed=will_succeed)
|
||||||
|
|
||||||
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
|
@call_conn
|
||||||
return self.conn.background(command, stdout, stderr, as_root)
|
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False,
|
||||||
|
force_locale='C', timeout=None):
|
||||||
|
command = self._prepare_cmd(command, force_locale)
|
||||||
|
bg_cmd = self.conn.background(command, stdout, stderr, as_root)
|
||||||
|
if timeout is not None:
|
||||||
|
timer = threading.Timer(timeout, function=bg_cmd.cancel)
|
||||||
|
timer.daemon = True
|
||||||
|
timer.start()
|
||||||
|
return bg_cmd
|
||||||
|
|
||||||
def invoke(self, binary, args=None, in_directory=None, on_cpus=None,
|
def invoke(self, binary, args=None, in_directory=None, on_cpus=None,
|
||||||
redirect_stderr=False, as_root=False, timeout=30):
|
redirect_stderr=False, as_root=False, timeout=30):
|
||||||
@@ -672,6 +745,7 @@ class Target(object):
|
|||||||
pass
|
pass
|
||||||
self.conn.connected_as_root = None
|
self.conn.connected_as_root = None
|
||||||
|
|
||||||
|
@call_conn
|
||||||
def check_responsive(self, explode=True):
|
def check_responsive(self, explode=True):
|
||||||
try:
|
try:
|
||||||
self.conn.execute('ls /', timeout=5)
|
self.conn.execute('ls /', timeout=5)
|
||||||
@@ -986,6 +1060,7 @@ class Target(object):
|
|||||||
os.remove(shutils_ofile)
|
os.remove(shutils_ofile)
|
||||||
os.rmdir(tmp_dir)
|
os.rmdir(tmp_dir)
|
||||||
|
|
||||||
|
@call_conn
|
||||||
def _execute_util(self, command, timeout=None, check_exit_code=True, as_root=False):
|
def _execute_util(self, command, timeout=None, check_exit_code=True, as_root=False):
|
||||||
command = '{} {}'.format(self.shutils, command)
|
command = '{} {}'.format(self.shutils, command)
|
||||||
return self.conn.execute(command, timeout, check_exit_code, as_root)
|
return self.conn.execute(command, timeout, check_exit_code, as_root)
|
||||||
@@ -1150,6 +1225,7 @@ class LinuxTarget(Target):
|
|||||||
def wait_boot_complete(self, timeout=10):
|
def wait_boot_complete(self, timeout=10):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@call_conn
|
||||||
def kick_off(self, command, as_root=False):
|
def kick_off(self, command, as_root=False):
|
||||||
command = 'sh -c {} 1>/dev/null 2>/dev/null &'.format(quote(command))
|
command = 'sh -c {} 1>/dev/null 2>/dev/null &'.format(quote(command))
|
||||||
return self.conn.execute(command, as_root=as_root)
|
return self.conn.execute(command, as_root=as_root)
|
||||||
@@ -1667,7 +1743,7 @@ class AndroidTarget(Target):
|
|||||||
self.remove(on_device_executable, as_root=self.needs_su)
|
self.remove(on_device_executable, as_root=self.needs_su)
|
||||||
|
|
||||||
def dump_logcat(self, filepath, filter=None, logcat_format=None, append=False,
|
def dump_logcat(self, filepath, filter=None, logcat_format=None, append=False,
|
||||||
timeout=30): # pylint: disable=redefined-builtin
|
timeout=60): # pylint: disable=redefined-builtin
|
||||||
op = '>>' if append else '>'
|
op = '>>' if append else '>'
|
||||||
filtstr = ' -s {}'.format(quote(filter)) if filter else ''
|
filtstr = ' -s {}'.format(quote(filter)) if filter else ''
|
||||||
formatstr = ' -v {}'.format(quote(logcat_format)) if logcat_format else ''
|
formatstr = ' -v {}'.format(quote(logcat_format)) if logcat_format else ''
|
||||||
@@ -1683,18 +1759,24 @@ class AndroidTarget(Target):
|
|||||||
self.remove(dev_path)
|
self.remove(dev_path)
|
||||||
|
|
||||||
def clear_logcat(self):
|
def clear_logcat(self):
|
||||||
with self.clear_logcat_lock:
|
locked = self.clear_logcat_lock.acquire(blocking=False)
|
||||||
if isinstance(self.conn, AdbConnection):
|
if locked:
|
||||||
adb_command(self.adb_name, 'logcat -c', timeout=30, adb_server=self.adb_server)
|
try:
|
||||||
else:
|
if isinstance(self.conn, AdbConnection):
|
||||||
self.execute('logcat -c', timeout=30)
|
adb_command(self.adb_name, 'logcat -c', timeout=30, adb_server=self.adb_server)
|
||||||
|
else:
|
||||||
|
self.execute('logcat -c', timeout=30)
|
||||||
|
finally:
|
||||||
|
self.clear_logcat_lock.release()
|
||||||
|
|
||||||
def get_logcat_monitor(self, regexps=None):
|
def get_logcat_monitor(self, regexps=None):
|
||||||
return LogcatMonitor(self, regexps)
|
return LogcatMonitor(self, regexps)
|
||||||
|
|
||||||
|
@call_conn
|
||||||
def wait_for_device(self, timeout=30):
|
def wait_for_device(self, timeout=30):
|
||||||
self.conn.wait_for_device()
|
self.conn.wait_for_device()
|
||||||
|
|
||||||
|
@call_conn
|
||||||
def reboot_bootloader(self, timeout=30):
|
def reboot_bootloader(self, timeout=30):
|
||||||
self.conn.reboot_bootloader()
|
self.conn.reboot_bootloader()
|
||||||
|
|
||||||
|
@@ -20,18 +20,20 @@ Utility functions for working with Android devices through adb.
|
|||||||
"""
|
"""
|
||||||
# pylint: disable=E1103
|
# pylint: disable=E1103
|
||||||
import glob
|
import glob
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
import logging
|
import logging
|
||||||
import tempfile
|
import os
|
||||||
import subprocess
|
|
||||||
from collections import defaultdict
|
|
||||||
import pexpect
|
import pexpect
|
||||||
import xml.etree.ElementTree
|
import re
|
||||||
import zipfile
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
|
import zipfile
|
||||||
|
|
||||||
|
from collections import defaultdict
|
||||||
|
from io import StringIO
|
||||||
|
from lxml import etree
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from shlex import quote
|
from shlex import quote
|
||||||
@@ -227,7 +229,10 @@ class ApkInfo(object):
|
|||||||
command = [dexdump, '-l', 'xml', extracted]
|
command = [dexdump, '-l', 'xml', extracted]
|
||||||
dump = self._run(command)
|
dump = self._run(command)
|
||||||
|
|
||||||
xml_tree = xml.etree.ElementTree.fromstring(dump)
|
# Dexdump from build tools v30.0.X does not seem to produce
|
||||||
|
# valid xml from certain APKs so ignore errors and attempt to recover.
|
||||||
|
parser = etree.XMLParser(encoding='utf-8', recover=True)
|
||||||
|
xml_tree = etree.parse(StringIO(dump), parser)
|
||||||
|
|
||||||
package = next((i for i in xml_tree.iter('package')
|
package = next((i for i in xml_tree.iter('package')
|
||||||
if i.attrib['name'] == self.package), None)
|
if i.attrib['name'] == self.package), None)
|
||||||
@@ -577,7 +582,7 @@ def adb_background_shell(conn, command,
|
|||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
stderr=subprocess.PIPE,
|
stderr=subprocess.PIPE,
|
||||||
as_root=False):
|
as_root=False):
|
||||||
"""Runs the sepcified command in a subprocess, returning the the Popen object."""
|
"""Runs the specified command in a subprocess, returning the the Popen object."""
|
||||||
device = conn.device
|
device = conn.device
|
||||||
adb_server = conn.adb_server
|
adb_server = conn.adb_server
|
||||||
|
|
||||||
@@ -598,7 +603,7 @@ def adb_background_shell(conn, command,
|
|||||||
p = subprocess.Popen(full_command, stdout=stdout, stderr=stderr, shell=True)
|
p = subprocess.Popen(full_command, stdout=stdout, stderr=stderr, shell=True)
|
||||||
|
|
||||||
# Out of band PID lookup, to avoid conflicting needs with stdout redirection
|
# Out of band PID lookup, to avoid conflicting needs with stdout redirection
|
||||||
find_pid = 'ps -A -o pid,args | grep {}'.format(quote(uuid_var))
|
find_pid = '{} ps -A -o pid,args | grep {}'.format(conn.busybox, quote(uuid_var))
|
||||||
ps_out = conn.execute(find_pid)
|
ps_out = conn.execute(find_pid)
|
||||||
pids = [
|
pids = [
|
||||||
int(line.strip().split(' ', 1)[0])
|
int(line.strip().split(' ', 1)[0])
|
||||||
@@ -734,11 +739,13 @@ def _discover_aapt(env):
|
|||||||
aapt2_path = ''
|
aapt2_path = ''
|
||||||
versions = os.listdir(env.build_tools)
|
versions = os.listdir(env.build_tools)
|
||||||
for version in reversed(sorted(versions)):
|
for version in reversed(sorted(versions)):
|
||||||
if not aapt2_path and not os.path.isfile(aapt2_path):
|
if not os.path.isfile(aapt2_path):
|
||||||
aapt2_path = os.path.join(env.build_tools, version, 'aapt2')
|
aapt2_path = os.path.join(env.build_tools, version, 'aapt2')
|
||||||
if not aapt_path and not os.path.isfile(aapt_path):
|
if not os.path.isfile(aapt_path):
|
||||||
aapt_path = os.path.join(env.build_tools, version, 'aapt')
|
aapt_path = os.path.join(env.build_tools, version, 'aapt')
|
||||||
aapt_version = 1
|
aapt_version = 1
|
||||||
|
# Use latest available version for aapt/appt2 but ensure at least one is valid.
|
||||||
|
if os.path.isfile(aapt2_path) or os.path.isfile(aapt_path):
|
||||||
break
|
break
|
||||||
|
|
||||||
# Use aapt2 only if present and we have a suitable version
|
# Use aapt2 only if present and we have a suitable version
|
||||||
|
@@ -37,6 +37,7 @@ import string
|
|||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
|
import types
|
||||||
import wrapt
|
import wrapt
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
@@ -152,7 +153,7 @@ def preexec_function():
|
|||||||
check_output_logger = logging.getLogger('check_output')
|
check_output_logger = logging.getLogger('check_output')
|
||||||
# Popen is not thread safe. If two threads attempt to call it at the same time,
|
# Popen is not thread safe. If two threads attempt to call it at the same time,
|
||||||
# one may lock up. See https://bugs.python.org/issue12739.
|
# one may lock up. See https://bugs.python.org/issue12739.
|
||||||
check_output_lock = threading.Lock()
|
check_output_lock = threading.RLock()
|
||||||
|
|
||||||
|
|
||||||
def get_subprocess(command, **kwargs):
|
def get_subprocess(command, **kwargs):
|
||||||
@@ -783,7 +784,7 @@ class tls_property:
|
|||||||
def __init__(self, factory):
|
def __init__(self, factory):
|
||||||
self.factory = factory
|
self.factory = factory
|
||||||
# Lock accesses to shared WeakKeyDictionary and WeakSet
|
# Lock accesses to shared WeakKeyDictionary and WeakSet
|
||||||
self.lock = threading.Lock()
|
self.lock = threading.RLock()
|
||||||
|
|
||||||
def __get__(self, instance, owner=None):
|
def __get__(self, instance, owner=None):
|
||||||
return _BoundTLSProperty(self, instance, owner)
|
return _BoundTLSProperty(self, instance, owner)
|
||||||
@@ -883,10 +884,14 @@ class _BoundTLSProperty:
|
|||||||
|
|
||||||
class InitCheckpointMeta(type):
|
class InitCheckpointMeta(type):
|
||||||
"""
|
"""
|
||||||
Metaclass providing an ``initialized`` boolean attributes on instances.
|
Metaclass providing an ``initialized`` and ``is_in_use`` boolean attributes
|
||||||
|
on instances.
|
||||||
|
|
||||||
``initialized`` is set to ``True`` once the ``__init__`` constructor has
|
``initialized`` is set to ``True`` once the ``__init__`` constructor has
|
||||||
returned. It will deal cleanly with nested calls to ``super().__init__``.
|
returned. It will deal cleanly with nested calls to ``super().__init__``.
|
||||||
|
|
||||||
|
``is_in_use`` is set to ``True`` when an instance method is being called.
|
||||||
|
This allows to detect reentrance.
|
||||||
"""
|
"""
|
||||||
def __new__(metacls, name, bases, dct, **kwargs):
|
def __new__(metacls, name, bases, dct, **kwargs):
|
||||||
cls = super().__new__(metacls, name, bases, dct, **kwargs)
|
cls = super().__new__(metacls, name, bases, dct, **kwargs)
|
||||||
@@ -895,6 +900,7 @@ class InitCheckpointMeta(type):
|
|||||||
@wraps(init_f)
|
@wraps(init_f)
|
||||||
def init_wrapper(self, *args, **kwargs):
|
def init_wrapper(self, *args, **kwargs):
|
||||||
self.initialized = False
|
self.initialized = False
|
||||||
|
self.is_in_use = False
|
||||||
|
|
||||||
# Track the nesting of super()__init__ to set initialized=True only
|
# Track the nesting of super()__init__ to set initialized=True only
|
||||||
# when the outer level is finished
|
# when the outer level is finished
|
||||||
@@ -918,6 +924,45 @@ class InitCheckpointMeta(type):
|
|||||||
|
|
||||||
cls.__init__ = init_wrapper
|
cls.__init__ = init_wrapper
|
||||||
|
|
||||||
|
# Set the is_in_use attribute to allow external code to detect if the
|
||||||
|
# methods are about to be re-entered.
|
||||||
|
def make_wrapper(f):
|
||||||
|
if f is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
@wraps(f)
|
||||||
|
def wrapper(self, *args, **kwargs):
|
||||||
|
f_ = f.__get__(self, self.__class__)
|
||||||
|
initial_state = self.is_in_use
|
||||||
|
try:
|
||||||
|
self.is_in_use = True
|
||||||
|
return f_(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
self.is_in_use = initial_state
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
# This will not decorate methods defined in base classes, but we cannot
|
||||||
|
# use inspect.getmembers() as it uses __get__ to bind the attributes to
|
||||||
|
# the class, making staticmethod indistinguishible from instance
|
||||||
|
# methods.
|
||||||
|
for name, attr in cls.__dict__.items():
|
||||||
|
# Only wrap the methods (exposed as functions), not things like
|
||||||
|
# classmethod or staticmethod
|
||||||
|
if (
|
||||||
|
name not in ('__init__', '__new__') and
|
||||||
|
isinstance(attr, types.FunctionType)
|
||||||
|
):
|
||||||
|
setattr(cls, name, make_wrapper(attr))
|
||||||
|
elif isinstance(attr, property):
|
||||||
|
prop = property(
|
||||||
|
fget=make_wrapper(attr.fget),
|
||||||
|
fset=make_wrapper(attr.fset),
|
||||||
|
fdel=make_wrapper(attr.fdel),
|
||||||
|
doc=attr.__doc__,
|
||||||
|
)
|
||||||
|
setattr(cls, name, prop)
|
||||||
|
|
||||||
return cls
|
return cls
|
||||||
|
|
||||||
|
|
||||||
|
@@ -59,7 +59,7 @@ from devlib.connection import (ConnectionBase, ParamikoBackgroundCommand, PopenB
|
|||||||
SSHTransferManager)
|
SSHTransferManager)
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_SSH_SUDO_COMMAND = "sudo -p ' ' -S -- sh -c {}"
|
DEFAULT_SSH_SUDO_COMMAND = "sudo -k -p ' ' -S -- sh -c {}"
|
||||||
|
|
||||||
|
|
||||||
ssh = None
|
ssh = None
|
||||||
@@ -466,7 +466,13 @@ class SshConnection(SshConnectionBase):
|
|||||||
return self.transfer_mgr.progress_cb if self.transfer_mgr is not None else None
|
return self.transfer_mgr.progress_cb if self.transfer_mgr is not None else None
|
||||||
|
|
||||||
def _get_sftp(self, timeout):
|
def _get_sftp(self, timeout):
|
||||||
sftp = self.client.open_sftp()
|
try:
|
||||||
|
sftp = self.client.open_sftp()
|
||||||
|
except paramiko.ssh_exception.SSHException as e:
|
||||||
|
if 'EOF during negotiation' in str(e):
|
||||||
|
raise TargetStableError('The SSH server does not support SFTP. Please install and enable appropriate module.') from e
|
||||||
|
else:
|
||||||
|
raise
|
||||||
sftp.get_channel().settimeout(timeout)
|
sftp.get_channel().settimeout(timeout)
|
||||||
return sftp
|
return sftp
|
||||||
|
|
||||||
@@ -654,7 +660,7 @@ class SshConnection(SshConnectionBase):
|
|||||||
# Read are not buffered so we will always get the data as soon as
|
# Read are not buffered so we will always get the data as soon as
|
||||||
# they arrive
|
# they arrive
|
||||||
return (
|
return (
|
||||||
channel.makefile_stdin(),
|
channel.makefile_stdin('w', 0),
|
||||||
channel.makefile(),
|
channel.makefile(),
|
||||||
channel.makefile_stderr(),
|
channel.makefile_stderr(),
|
||||||
)
|
)
|
||||||
@@ -685,11 +691,11 @@ class SshConnection(SshConnectionBase):
|
|||||||
w = os.fdopen(w, 'wb')
|
w = os.fdopen(w, 'wb')
|
||||||
# Turn a file descriptor into a file-like object
|
# Turn a file descriptor into a file-like object
|
||||||
elif isinstance(stream_out, int) and stream_out >= 0:
|
elif isinstance(stream_out, int) and stream_out >= 0:
|
||||||
r = os.fdopen(stream_out, 'rb')
|
r = os.fdopen(stream_in, 'rb')
|
||||||
w = os.fdopen(stream_out, 'wb')
|
w = os.fdopen(stream_out, 'wb')
|
||||||
# file-like object
|
# file-like object
|
||||||
else:
|
else:
|
||||||
r = stream_out
|
r = stream_in
|
||||||
w = stream_out
|
w = stream_out
|
||||||
|
|
||||||
return (r, w)
|
return (r, w)
|
||||||
|
@@ -21,7 +21,7 @@ from subprocess import Popen, PIPE
|
|||||||
|
|
||||||
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision', 'dev'])
|
VersionTuple = namedtuple('Version', ['major', 'minor', 'revision', 'dev'])
|
||||||
|
|
||||||
version = VersionTuple(1, 3, 0, '')
|
version = VersionTuple(1, 3, 2, '')
|
||||||
|
|
||||||
|
|
||||||
def get_devlib_version():
|
def get_devlib_version():
|
||||||
@@ -33,8 +33,11 @@ def get_devlib_version():
|
|||||||
|
|
||||||
|
|
||||||
def get_commit():
|
def get_commit():
|
||||||
p = Popen(['git', 'rev-parse', 'HEAD'], cwd=os.path.dirname(__file__),
|
try:
|
||||||
stdout=PIPE, stderr=PIPE)
|
p = Popen(['git', 'rev-parse', 'HEAD'], cwd=os.path.dirname(__file__),
|
||||||
|
stdout=PIPE, stderr=PIPE)
|
||||||
|
except FileNotFoundError:
|
||||||
|
return None
|
||||||
std, _ = p.communicate()
|
std, _ = p.communicate()
|
||||||
p.wait()
|
p.wait()
|
||||||
if p.returncode:
|
if p.returncode:
|
||||||
|
@@ -147,7 +147,7 @@ Connection Types
|
|||||||
.. class:: SshConnection(host, username, password=None, keyfile=None, port=22,\
|
.. class:: SshConnection(host, username, password=None, keyfile=None, port=22,\
|
||||||
timeout=None, platform=None, \
|
timeout=None, platform=None, \
|
||||||
sudo_cmd="sudo -- sh -c {}", strict_host_check=True, \
|
sudo_cmd="sudo -- sh -c {}", strict_host_check=True, \
|
||||||
use_scp=False, poll_transfers=False,
|
use_scp=False, poll_transfers=False, \
|
||||||
start_transfer_poll_delay=30, total_transfer_timeout=3600,\
|
start_transfer_poll_delay=30, total_transfer_timeout=3600,\
|
||||||
transfer_poll_period=30)
|
transfer_poll_period=30)
|
||||||
|
|
||||||
@@ -177,7 +177,7 @@ Connection Types
|
|||||||
:param platform: Specify the platform to be used. The generic :class:`~devlib.platform.Platform`
|
:param platform: Specify the platform to be used. The generic :class:`~devlib.platform.Platform`
|
||||||
class is used by default.
|
class is used by default.
|
||||||
:param sudo_cmd: Specify the format of the command used to grant sudo access.
|
:param sudo_cmd: Specify the format of the command used to grant sudo access.
|
||||||
:param strict_host_check: Specify the ssh connection parameter ``StrictHostKeyChecking``,
|
:param strict_host_check: Specify the ssh connection parameter ``StrictHostKeyChecking``,
|
||||||
:param use_scp: Use SCP for file transfers, defaults to SFTP.
|
:param use_scp: Use SCP for file transfers, defaults to SFTP.
|
||||||
:param poll_transfers: Specify whether file transfers should be polled. Polling
|
:param poll_transfers: Specify whether file transfers should be polled. Polling
|
||||||
monitors the progress of file transfers and periodically
|
monitors the progress of file transfers and periodically
|
||||||
|
@@ -125,10 +125,21 @@ Target
|
|||||||
This is a dict that contains a mapping of OS version elements to their
|
This is a dict that contains a mapping of OS version elements to their
|
||||||
values. This mapping is OS-specific.
|
values. This mapping is OS-specific.
|
||||||
|
|
||||||
|
.. attribute:: Target.hostname
|
||||||
|
|
||||||
|
A string containing the hostname of the target.
|
||||||
|
|
||||||
|
.. attribute:: Target.hostid
|
||||||
|
|
||||||
|
A numerical id used to represent the identity of the target.
|
||||||
|
|
||||||
|
.. note:: Currently on 64-bit PowerPC devices this id will always be 0. This is
|
||||||
|
due to the included busybox binary being linked with musl.
|
||||||
|
|
||||||
.. attribute:: Target.system_id
|
.. attribute:: Target.system_id
|
||||||
|
|
||||||
A unique identifier for the system running on the target. This identifier is
|
A unique identifier for the system running on the target. This identifier is
|
||||||
intended to be uninque for the combination of hardware, kernel, and file
|
intended to be unique for the combination of hardware, kernel, and file
|
||||||
system.
|
system.
|
||||||
|
|
||||||
.. attribute:: Target.model
|
.. attribute:: Target.model
|
||||||
@@ -225,15 +236,16 @@ Target
|
|||||||
If transfer polling is supported (ADB connections and SSH connections),
|
If transfer polling is supported (ADB connections and SSH connections),
|
||||||
``poll_transfers`` is set in the connection, and a timeout is not specified,
|
``poll_transfers`` is set in the connection, and a timeout is not specified,
|
||||||
the push will be polled for activity. Inactive transfers will be
|
the push will be polled for activity. Inactive transfers will be
|
||||||
cancelled. (See :ref:`connection-types`\ for more information on polling).
|
cancelled. (See :ref:`connection-types` for more information on polling).
|
||||||
|
|
||||||
:param source: path on the host
|
:param source: path on the host
|
||||||
:param dest: path on the target
|
:param dest: path on the target
|
||||||
:param as_root: whether root is required. Defaults to false.
|
:param as_root: whether root is required. Defaults to false.
|
||||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||||
not complete within this period, an exception will be raised.
|
not complete within this period, an exception will be raised. Leave unset
|
||||||
|
to utilise transfer polling if enabled.
|
||||||
:param globbing: If ``True``, the ``source`` is interpreted as a globbing
|
:param globbing: If ``True``, the ``source`` is interpreted as a globbing
|
||||||
pattern instead of being take as-is. If the pattern has mulitple
|
pattern instead of being take as-is. If the pattern has multiple
|
||||||
matches, ``dest`` must be a folder (or will be created as such if it
|
matches, ``dest`` must be a folder (or will be created as such if it
|
||||||
does not exists yet).
|
does not exists yet).
|
||||||
|
|
||||||
@@ -244,7 +256,7 @@ Target
|
|||||||
If transfer polling is supported (ADB connections and SSH connections),
|
If transfer polling is supported (ADB connections and SSH connections),
|
||||||
``poll_transfers`` is set in the connection, and a timeout is not specified,
|
``poll_transfers`` is set in the connection, and a timeout is not specified,
|
||||||
the pull will be polled for activity. Inactive transfers will be
|
the pull will be polled for activity. Inactive transfers will be
|
||||||
cancelled. (See :ref:`connection-types`\ for more information on polling).
|
cancelled. (See :ref:`connection-types` for more information on polling).
|
||||||
|
|
||||||
:param source: path on the target
|
:param source: path on the target
|
||||||
:param dest: path on the host
|
:param dest: path on the host
|
||||||
@@ -252,7 +264,7 @@ Target
|
|||||||
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
:param timeout: timeout (in seconds) for the transfer; if the transfer does
|
||||||
not complete within this period, an exception will be raised.
|
not complete within this period, an exception will be raised.
|
||||||
:param globbing: If ``True``, the ``source`` is interpreted as a globbing
|
:param globbing: If ``True``, the ``source`` is interpreted as a globbing
|
||||||
pattern instead of being take as-is. If the pattern has mulitple
|
pattern instead of being take as-is. If the pattern has multiple
|
||||||
matches, ``dest`` must be a folder (or will be created as such if it
|
matches, ``dest`` must be a folder (or will be created as such if it
|
||||||
does not exists yet).
|
does not exists yet).
|
||||||
|
|
||||||
@@ -280,7 +292,7 @@ Target
|
|||||||
command to get predictable output that can be more safely parsed.
|
command to get predictable output that can be more safely parsed.
|
||||||
If ``None``, no locale is prepended.
|
If ``None``, no locale is prepended.
|
||||||
|
|
||||||
.. method:: Target.background(command [, stdout [, stderr [, as_root]]])
|
.. method:: Target.background(command [, stdout [, stderr [, as_root, [, force_locale [, timeout]]])
|
||||||
|
|
||||||
Execute the command on the target, invoking it via subprocess on the host.
|
Execute the command on the target, invoking it via subprocess on the host.
|
||||||
This will return :class:`subprocess.Popen` instance for the command.
|
This will return :class:`subprocess.Popen` instance for the command.
|
||||||
@@ -292,6 +304,12 @@ Target
|
|||||||
this may be used to redirect it to an alternative file handle.
|
this may be used to redirect it to an alternative file handle.
|
||||||
:param as_root: The command will be executed as root. This will fail on
|
:param as_root: The command will be executed as root. This will fail on
|
||||||
unrooted targets.
|
unrooted targets.
|
||||||
|
:param force_locale: Prepend ``LC_ALL=<force_locale>`` in front of the
|
||||||
|
command to get predictable output that can be more safely parsed.
|
||||||
|
If ``None``, no locale is prepended.
|
||||||
|
:param timeout: Timeout (in seconds) for the execution of the command. When
|
||||||
|
the timeout expires, :meth:`BackgroundCommand.cancel` is executed to
|
||||||
|
terminate the command.
|
||||||
|
|
||||||
.. note:: This **will block the connection** until the command completes.
|
.. note:: This **will block the connection** until the command completes.
|
||||||
|
|
||||||
@@ -700,7 +718,7 @@ Android Target
|
|||||||
.. method:: AndroidTarget.get_stay_on_mode()
|
.. method:: AndroidTarget.get_stay_on_mode()
|
||||||
|
|
||||||
Returns an integer between ``0`` and ``7`` representing the current
|
Returns an integer between ``0`` and ``7`` representing the current
|
||||||
stay-on mode of the device.
|
stay-on mode of the device.
|
||||||
|
|
||||||
.. method:: AndroidTarget.ensure_screen_is_off(verify=True)
|
.. method:: AndroidTarget.ensure_screen_is_off(verify=True)
|
||||||
|
|
||||||
|
7
setup.py
7
setup.py
@@ -69,9 +69,13 @@ for root, dirs, files in os.walk(devlib_dir):
|
|||||||
filepaths = [os.path.join(root, f) for f in files]
|
filepaths = [os.path.join(root, f) for f in files]
|
||||||
data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths])
|
data_files[package_name].extend([os.path.relpath(f, package_dir) for f in filepaths])
|
||||||
|
|
||||||
|
with open("README.rst", "r") as fh:
|
||||||
|
long_description = fh.read()
|
||||||
|
|
||||||
params = dict(
|
params = dict(
|
||||||
name='devlib',
|
name='devlib',
|
||||||
description='A framework for automating workload execution and measurment collection on ARM devices.',
|
description='A library for interacting with and instrumentation of remote devices.',
|
||||||
|
long_description=long_description,
|
||||||
version=__version__,
|
version=__version__,
|
||||||
packages=packages,
|
packages=packages,
|
||||||
package_data=data_files,
|
package_data=data_files,
|
||||||
@@ -92,6 +96,7 @@ params = dict(
|
|||||||
'numpy; python_version>="3"',
|
'numpy; python_version>="3"',
|
||||||
'pandas<=0.24.2; python_version<"3"',
|
'pandas<=0.24.2; python_version<"3"',
|
||||||
'pandas; python_version>"3"',
|
'pandas; python_version>"3"',
|
||||||
|
'lxml', # More robust xml parsing
|
||||||
],
|
],
|
||||||
extras_require={
|
extras_require={
|
||||||
'daq': ['daqpower>=2'],
|
'daq': ['daqpower>=2'],
|
||||||
|
Reference in New Issue
Block a user