mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-03-21 18:18:41 +00:00
Fixed WA extensions for LinuxManager
Changed method calls to devlib naming
This commit is contained in:
parent
6f0de17201
commit
001239dfe4
@ -122,7 +122,7 @@ class RecordCommand(Command):
|
|||||||
self.device.killall("revent")
|
self.device.killall("revent")
|
||||||
|
|
||||||
self.logger.info("Pulling files from device")
|
self.logger.info("Pulling files from device")
|
||||||
self.device.pull_file(revent_file, args.output or os.getcwdu())
|
self.device.pull(revent_file, args.output or os.getcwdu())
|
||||||
|
|
||||||
|
|
||||||
class ReplayCommand(RecordCommand):
|
class ReplayCommand(RecordCommand):
|
||||||
@ -144,7 +144,7 @@ class ReplayCommand(RecordCommand):
|
|||||||
# pylint: disable=W0201
|
# pylint: disable=W0201
|
||||||
def run(self, args):
|
def run(self, args):
|
||||||
self.logger.info("Pushing file to device")
|
self.logger.info("Pushing file to device")
|
||||||
self.device.push_file(args.revent, self.device.working_directory)
|
self.device.push(args.revent, self.device.working_directory)
|
||||||
revent_file = self.device.path.join(self.device.working_directory, os.path.split(args.revent)[1])
|
revent_file = self.device.path.join(self.device.working_directory, os.path.split(args.revent)[1])
|
||||||
|
|
||||||
if args.clear:
|
if args.clear:
|
||||||
|
@ -89,7 +89,7 @@ class UiAutomatorWorkload(Workload):
|
|||||||
for k, v in self.uiauto_params.iteritems():
|
for k, v in self.uiauto_params.iteritems():
|
||||||
params += ' -e {} {}'.format(k, v)
|
params += ' -e {} {}'.format(k, v)
|
||||||
self.command = 'uiautomator runtest {}{} -c {}'.format(self.device_uiauto_file, params, method_string)
|
self.command = 'uiautomator runtest {}{} -c {}'.format(self.device_uiauto_file, params, method_string)
|
||||||
self.device.push_file(self.uiauto_file, self.device_uiauto_file)
|
self.device.push(self.uiauto_file, self.device_uiauto_file)
|
||||||
self.device.killall('uiautomator')
|
self.device.killall('uiautomator')
|
||||||
|
|
||||||
def run(self, context):
|
def run(self, context):
|
||||||
@ -104,7 +104,7 @@ class UiAutomatorWorkload(Workload):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self.device.delete_file(self.device_uiauto_file)
|
self.device.remove(self.device_uiauto_file)
|
||||||
|
|
||||||
def validate(self):
|
def validate(self):
|
||||||
if not self.uiauto_file:
|
if not self.uiauto_file:
|
||||||
@ -333,8 +333,8 @@ class ReventWorkload(Workload):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self.device.delete_file(self.on_device_setup_revent)
|
self.device.remove(self.on_device_setup_revent)
|
||||||
self.device.delete_file(self.on_device_run_revent)
|
self.device.remove(self.on_device_run_revent)
|
||||||
|
|
||||||
def _check_revent_files(self, context):
|
def _check_revent_files(self, context):
|
||||||
# check the revent binary
|
# check the revent binary
|
||||||
@ -353,8 +353,8 @@ class ReventWorkload(Workload):
|
|||||||
raise WorkloadError(message)
|
raise WorkloadError(message)
|
||||||
|
|
||||||
self.on_device_revent_binary = self.device.install_executable(revent_binary)
|
self.on_device_revent_binary = self.device.install_executable(revent_binary)
|
||||||
self.device.push_file(self.revent_run_file, self.on_device_run_revent)
|
self.device.push(self.revent_run_file, self.on_device_run_revent)
|
||||||
self.device.push_file(self.revent_setup_file, self.on_device_setup_revent)
|
self.device.push(self.revent_setup_file, self.on_device_setup_revent)
|
||||||
|
|
||||||
|
|
||||||
class AndroidUiAutoBenchmark(UiAutomatorWorkload, AndroidBenchmark):
|
class AndroidUiAutoBenchmark(UiAutomatorWorkload, AndroidBenchmark):
|
||||||
@ -486,7 +486,7 @@ class GameWorkload(ApkWorkload, ReventWorkload):
|
|||||||
raise WorkloadError(message.format(resource_file, self.name))
|
raise WorkloadError(message.format(resource_file, self.name))
|
||||||
# adb push will create intermediate directories if they don't
|
# adb push will create intermediate directories if they don't
|
||||||
# exist.
|
# exist.
|
||||||
self.device.push_file(asset_tarball, ondevice_cache, timeout=timeout)
|
self.device.push(asset_tarball, ondevice_cache, timeout=timeout)
|
||||||
|
|
||||||
device_asset_directory = self.device.path.join(self.device.external_storage_directory, 'Android', kind)
|
device_asset_directory = self.device.path.join(self.device.external_storage_directory, 'Android', kind)
|
||||||
deploy_command = 'cd {} && {} tar -xzf {}'.format(device_asset_directory,
|
deploy_command = 'cd {} && {} tar -xzf {}'.format(device_asset_directory,
|
||||||
|
@ -61,7 +61,7 @@ we want to push the file to the target device and then change the file mode to
|
|||||||
755 ::
|
755 ::
|
||||||
|
|
||||||
def setup(self, context):
|
def setup(self, context):
|
||||||
self.device.push_file(BINARY_FILE, self.device.working_directory)
|
self.device.push(BINARY_FILE, self.device.working_directory)
|
||||||
self.device.execute('chmod 755 {}'.format(self.trace_on_device))
|
self.device.execute('chmod 755 {}'.format(self.trace_on_device))
|
||||||
|
|
||||||
Then we implemented the start method, which will simply run the file to start
|
Then we implemented the start method, which will simply run the file to start
|
||||||
@ -85,7 +85,7 @@ are metric key, value, unit and lower_is_better, which is a boolean. ::
|
|||||||
def update_result(self, context):
|
def update_result(self, context):
|
||||||
# pull the trace file to the device
|
# pull the trace file to the device
|
||||||
result = os.path.join(self.device.working_directory, 'trace.txt')
|
result = os.path.join(self.device.working_directory, 'trace.txt')
|
||||||
self.device.pull_file(result, context.working_directory)
|
self.device.pull(result, context.working_directory)
|
||||||
|
|
||||||
# parse the file if needs to be parsed, or add result to
|
# parse the file if needs to be parsed, or add result to
|
||||||
# context.result
|
# context.result
|
||||||
@ -94,7 +94,7 @@ At the end, we might want to delete any files generated by the instrumentation
|
|||||||
and the code to clear these file goes in teardown method. ::
|
and the code to clear these file goes in teardown method. ::
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self.device.delete_file(os.path.join(self.device.working_directory, 'trace.txt'))
|
self.device.remove(os.path.join(self.device.working_directory, 'trace.txt'))
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -47,8 +47,9 @@ class Workload(Extension):
|
|||||||
super(Workload, self).__init__(**kwargs)
|
super(Workload, self).__init__(**kwargs)
|
||||||
if self.supported_devices and device.name not in self.supported_devices:
|
if self.supported_devices and device.name not in self.supported_devices:
|
||||||
raise WorkloadError('Workload {} does not support device {}'.format(self.name, device.name))
|
raise WorkloadError('Workload {} does not support device {}'.format(self.name, device.name))
|
||||||
if self.supported_platforms and device.platform not in self.supported_platforms:
|
|
||||||
raise WorkloadError('Workload {} does not support platform {}'.format(self.name, device.platform))
|
if self.supported_platforms and device.os not in self.supported_platforms:
|
||||||
|
raise WorkloadError('Workload {} does not support platform {}'.format(self.name, device.os))
|
||||||
self.device = device
|
self.device = device
|
||||||
|
|
||||||
def init_resources(self, context):
|
def init_resources(self, context):
|
||||||
@ -101,4 +102,3 @@ class Workload(Extension):
|
|||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '<Workload {}>'.format(self.name)
|
return '<Workload {}>'.format(self.name)
|
||||||
|
|
||||||
|
@ -197,12 +197,12 @@ class Daq(Instrument):
|
|||||||
raise InstrumentError('GPIO sysfs not enabled on the device.')
|
raise InstrumentError('GPIO sysfs not enabled on the device.')
|
||||||
try:
|
try:
|
||||||
export_path = self.device.path.join(GPIO_ROOT, 'export')
|
export_path = self.device.path.join(GPIO_ROOT, 'export')
|
||||||
self.device.set_sysfile_value(export_path, self.gpio_sync, verify=False)
|
self.device.write_value(export_path, self.gpio_sync, verify=False)
|
||||||
pin_root = self.device.path.join(GPIO_ROOT, 'gpio{}'.format(self.gpio_sync))
|
pin_root = self.device.path.join(GPIO_ROOT, 'gpio{}'.format(self.gpio_sync))
|
||||||
direction_path = self.device.path.join(pin_root, 'direction')
|
direction_path = self.device.path.join(pin_root, 'direction')
|
||||||
self.device.set_sysfile_value(direction_path, 'out')
|
self.device.write_value(direction_path, 'out')
|
||||||
self.gpio_path = self.device.path.join(pin_root, 'value')
|
self.gpio_path = self.device.path.join(pin_root, 'value')
|
||||||
self.device.set_sysfile_value(self.gpio_path, 0, verify=False)
|
self.device.write_value(self.gpio_path, 0, verify=False)
|
||||||
signal.connect(self.insert_start_marker, signal.BEFORE_WORKLOAD_EXECUTION, priority=11)
|
signal.connect(self.insert_start_marker, signal.BEFORE_WORKLOAD_EXECUTION, priority=11)
|
||||||
signal.connect(self.insert_stop_marker, signal.AFTER_WORKLOAD_EXECUTION, priority=11)
|
signal.connect(self.insert_stop_marker, signal.AFTER_WORKLOAD_EXECUTION, priority=11)
|
||||||
except DeviceError as e:
|
except DeviceError as e:
|
||||||
@ -276,7 +276,7 @@ class Daq(Instrument):
|
|||||||
def finalize(self, context):
|
def finalize(self, context):
|
||||||
if self.gpio_path:
|
if self.gpio_path:
|
||||||
unexport_path = self.device.path.join(GPIO_ROOT, 'unexport')
|
unexport_path = self.device.path.join(GPIO_ROOT, 'unexport')
|
||||||
self.device.set_sysfile_value(unexport_path, self.gpio_sync, verify=False)
|
self.device.write_value(unexport_path, self.gpio_sync, verify=False)
|
||||||
|
|
||||||
def validate(self): # pylint: disable=too-many-branches
|
def validate(self): # pylint: disable=too-many-branches
|
||||||
if not daq:
|
if not daq:
|
||||||
|
@ -39,7 +39,7 @@ class DmesgInstrument(Instrument):
|
|||||||
def setup(self, context):
|
def setup(self, context):
|
||||||
if self.loglevel:
|
if self.loglevel:
|
||||||
self.old_loglevel = self.device.get_sysfile_value(self.loglevel_file)
|
self.old_loglevel = self.device.get_sysfile_value(self.loglevel_file)
|
||||||
self.device.set_sysfile_value(self.loglevel_file, self.loglevel, verify=False)
|
self.device.write_value(self.loglevel_file, self.loglevel, verify=False)
|
||||||
self.before_file = _f(os.path.join(context.output_directory, 'dmesg', 'before'))
|
self.before_file = _f(os.path.join(context.output_directory, 'dmesg', 'before'))
|
||||||
self.after_file = _f(os.path.join(context.output_directory, 'dmesg', 'after'))
|
self.after_file = _f(os.path.join(context.output_directory, 'dmesg', 'after'))
|
||||||
|
|
||||||
@ -57,6 +57,6 @@ class DmesgInstrument(Instrument):
|
|||||||
|
|
||||||
def teardown(self, context): # pylint: disable=unused-argument
|
def teardown(self, context): # pylint: disable=unused-argument
|
||||||
if self.loglevel:
|
if self.loglevel:
|
||||||
self.device.set_sysfile_value(self.loglevel_file, self.old_loglevel, verify=False)
|
self.device.write_value(self.loglevel_file, self.old_loglevel, verify=False)
|
||||||
|
|
||||||
|
|
||||||
|
@ -634,7 +634,7 @@ class EnergyModelInstrument(Instrument):
|
|||||||
self.enable_all_idle_states()
|
self.enable_all_idle_states()
|
||||||
self.reset_cgroups()
|
self.reset_cgroups()
|
||||||
self.cpuset.move_all_tasks_to(self.measuring_cluster)
|
self.cpuset.move_all_tasks_to(self.measuring_cluster)
|
||||||
server_process = 'adbd' if self.device.platform == 'android' else 'sshd'
|
server_process = 'adbd' if self.device.os == 'android' else 'sshd'
|
||||||
server_pids = self.device.get_pids_of(server_process)
|
server_pids = self.device.get_pids_of(server_process)
|
||||||
children_ps = [e for e in self.device.ps()
|
children_ps = [e for e in self.device.ps()
|
||||||
if e.ppid in server_pids and e.name != 'sshd']
|
if e.ppid in server_pids and e.name != 'sshd']
|
||||||
@ -769,7 +769,7 @@ class EnergyModelInstrument(Instrument):
|
|||||||
for tzpath in tzone_paths.strip().split():
|
for tzpath in tzone_paths.strip().split():
|
||||||
mode_file = '{}/mode'.format(tzpath)
|
mode_file = '{}/mode'.format(tzpath)
|
||||||
if self.device.file_exists(mode_file):
|
if self.device.file_exists(mode_file):
|
||||||
self.device.set_sysfile_value(mode_file, 'disabled')
|
self.device.write_value(mode_file, 'disabled')
|
||||||
|
|
||||||
def get_device_idle_states(self, cluster):
|
def get_device_idle_states(self, cluster):
|
||||||
if cluster == 'big':
|
if cluster == 'big':
|
||||||
|
@ -81,7 +81,7 @@ class JunoEnergy(Instrument):
|
|||||||
self.device.killall('readenergy', signal='TERM', as_root=True)
|
self.device.killall('readenergy', signal='TERM', as_root=True)
|
||||||
|
|
||||||
def update_result(self, context):
|
def update_result(self, context):
|
||||||
self.device.pull_file(self.device_output_file, self.host_output_file)
|
self.device.pull(self.device_output_file, self.host_output_file)
|
||||||
context.add_artifact('junoenergy', self.host_output_file, 'data')
|
context.add_artifact('junoenergy', self.host_output_file, 'data')
|
||||||
|
|
||||||
with open(self.host_output_file) as fh:
|
with open(self.host_output_file) as fh:
|
||||||
@ -99,7 +99,7 @@ class JunoEnergy(Instrument):
|
|||||||
context.add_metric(header, value, UNIT_MAP[header.split('_')[-1]])
|
context.add_metric(header, value, UNIT_MAP[header.split('_')[-1]])
|
||||||
|
|
||||||
def teardown(self, conetext):
|
def teardown(self, conetext):
|
||||||
self.device.delete_file(self.device_output_file)
|
self.device.remove(self.device_output_file)
|
||||||
|
|
||||||
def validate(self):
|
def validate(self):
|
||||||
if self.strict:
|
if self.strict:
|
||||||
|
@ -33,9 +33,11 @@ import tarfile
|
|||||||
from itertools import izip, izip_longest
|
from itertools import izip, izip_longest
|
||||||
from subprocess import CalledProcessError
|
from subprocess import CalledProcessError
|
||||||
|
|
||||||
|
from devlib.exception import TargetError
|
||||||
|
|
||||||
from wlauto import Instrument, Parameter
|
from wlauto import Instrument, Parameter
|
||||||
from wlauto.core import signal
|
from wlauto.core import signal
|
||||||
from wlauto.exceptions import DeviceError, ConfigError
|
from wlauto.exceptions import ConfigError
|
||||||
from wlauto.utils.misc import diff_tokens, write_table, check_output, as_relative
|
from wlauto.utils.misc import diff_tokens, write_table, check_output, as_relative
|
||||||
from wlauto.utils.misc import ensure_file_directory_exists as _f
|
from wlauto.utils.misc import ensure_file_directory_exists as _f
|
||||||
from wlauto.utils.misc import ensure_directory_exists as _d
|
from wlauto.utils.misc import ensure_directory_exists as _d
|
||||||
@ -134,7 +136,7 @@ class SysfsExtractor(Instrument):
|
|||||||
as_root=True, check_exit_code=False)
|
as_root=True, check_exit_code=False)
|
||||||
else: # not rooted
|
else: # not rooted
|
||||||
for dev_dir, before_dir, _, _ in self.device_and_host_paths:
|
for dev_dir, before_dir, _, _ in self.device_and_host_paths:
|
||||||
self.device.pull_file(dev_dir, before_dir)
|
self.device.pull(dev_dir, before_dir)
|
||||||
|
|
||||||
def slow_stop(self, context):
|
def slow_stop(self, context):
|
||||||
if self.use_tmpfs:
|
if self.use_tmpfs:
|
||||||
@ -146,7 +148,7 @@ class SysfsExtractor(Instrument):
|
|||||||
as_root=True, check_exit_code=False)
|
as_root=True, check_exit_code=False)
|
||||||
else: # not using tmpfs
|
else: # not using tmpfs
|
||||||
for dev_dir, _, after_dir, _ in self.device_and_host_paths:
|
for dev_dir, _, after_dir, _ in self.device_and_host_paths:
|
||||||
self.device.pull_file(dev_dir, after_dir)
|
self.device.pull(dev_dir, after_dir)
|
||||||
|
|
||||||
def update_result(self, context):
|
def update_result(self, context):
|
||||||
if self.use_tmpfs:
|
if self.use_tmpfs:
|
||||||
@ -157,10 +159,10 @@ class SysfsExtractor(Instrument):
|
|||||||
self.tmpfs_mount_point),
|
self.tmpfs_mount_point),
|
||||||
as_root=True)
|
as_root=True)
|
||||||
self.device.execute('chmod 0777 {}'.format(on_device_tarball), as_root=True)
|
self.device.execute('chmod 0777 {}'.format(on_device_tarball), as_root=True)
|
||||||
self.device.pull_file(on_device_tarball, on_host_tarball)
|
self.device.pull(on_device_tarball, on_host_tarball)
|
||||||
with tarfile.open(on_host_tarball, 'r:gz') as tf:
|
with tarfile.open(on_host_tarball, 'r:gz') as tf:
|
||||||
tf.extractall(context.output_directory)
|
tf.extractall(context.output_directory)
|
||||||
self.device.delete_file(on_device_tarball)
|
self.device.remove(on_device_tarball)
|
||||||
os.remove(on_host_tarball)
|
os.remove(on_host_tarball)
|
||||||
|
|
||||||
for paths in self.device_and_host_paths:
|
for paths in self.device_and_host_paths:
|
||||||
@ -181,7 +183,7 @@ class SysfsExtractor(Instrument):
|
|||||||
if self.use_tmpfs:
|
if self.use_tmpfs:
|
||||||
try:
|
try:
|
||||||
self.device.execute('umount {}'.format(self.tmpfs_mount_point), as_root=True)
|
self.device.execute('umount {}'.format(self.tmpfs_mount_point), as_root=True)
|
||||||
except (DeviceError, CalledProcessError):
|
except (TargetError, CalledProcessError):
|
||||||
# assume a directory but not mount point
|
# assume a directory but not mount point
|
||||||
pass
|
pass
|
||||||
self.device.execute('rm -rf {}'.format(self.tmpfs_mount_point),
|
self.device.execute('rm -rf {}'.format(self.tmpfs_mount_point),
|
||||||
@ -386,4 +388,3 @@ def _diff_sysfs_dirs(before, after, result): # pylint: disable=R0914
|
|||||||
else:
|
else:
|
||||||
dchunks = [diff_tokens(b, a) for b, a in zip(bchunks, achunks)]
|
dchunks = [diff_tokens(b, a) for b, a in zip(bchunks, achunks)]
|
||||||
dfh.write(''.join(dchunks))
|
dfh.write(''.join(dchunks))
|
||||||
|
|
||||||
|
@ -159,7 +159,7 @@ class NetstatsInstrument(Instrument):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def initialize(self, context):
|
def initialize(self, context):
|
||||||
if self.device.platform != 'android':
|
if self.device.os != 'android':
|
||||||
raise DeviceError('nestats instrument only supports on Android devices.')
|
raise DeviceError('nestats instrument only supports on Android devices.')
|
||||||
apk = context.resolver.get(ApkFile(self))
|
apk = context.resolver.get(ApkFile(self))
|
||||||
self.collector = NetstatsCollector(self.device, apk) # pylint: disable=attribute-defined-outside-init
|
self.collector = NetstatsCollector(self.device, apk) # pylint: disable=attribute-defined-outside-init
|
||||||
|
@ -106,7 +106,7 @@ class PerfInstrument(Instrument):
|
|||||||
self.device.kick_off(command)
|
self.device.kick_off(command)
|
||||||
|
|
||||||
def stop(self, context):
|
def stop(self, context):
|
||||||
as_root = self.device.platform == 'android'
|
as_root = self.device.os == 'android'
|
||||||
self.device.killall('sleep', as_root=as_root)
|
self.device.killall('sleep', as_root=as_root)
|
||||||
|
|
||||||
def update_result(self, context):
|
def update_result(self, context):
|
||||||
@ -114,7 +114,7 @@ class PerfInstrument(Instrument):
|
|||||||
device_file = self._get_device_outfile(label)
|
device_file = self._get_device_outfile(label)
|
||||||
host_relpath = os.path.join('perf', os.path.basename(device_file))
|
host_relpath = os.path.join('perf', os.path.basename(device_file))
|
||||||
host_file = _f(os.path.join(context.output_directory, host_relpath))
|
host_file = _f(os.path.join(context.output_directory, host_relpath))
|
||||||
self.device.pull_file(device_file, host_file)
|
self.device.pull(device_file, host_file)
|
||||||
context.add_iteration_artifact(label, kind='raw', path=host_relpath)
|
context.add_iteration_artifact(label, kind='raw', path=host_relpath)
|
||||||
with open(host_file) as fh:
|
with open(host_file) as fh:
|
||||||
in_results_section = False
|
in_results_section = False
|
||||||
@ -165,7 +165,7 @@ class PerfInstrument(Instrument):
|
|||||||
def _clean_device(self):
|
def _clean_device(self):
|
||||||
for label in self.labels:
|
for label in self.labels:
|
||||||
filepath = self._get_device_outfile(label)
|
filepath = self._get_device_outfile(label)
|
||||||
self.device.delete_file(filepath)
|
self.device.remove(filepath)
|
||||||
|
|
||||||
def _get_device_outfile(self, label):
|
def _get_device_outfile(self, label):
|
||||||
return self.device.path.join(self.device.working_directory, '{}.out'.format(label))
|
return self.device.path.join(self.device.working_directory, '{}.out'.format(label))
|
||||||
|
@ -91,21 +91,21 @@ class CciPmuLogger(Instrument):
|
|||||||
if self.install_module:
|
if self.install_module:
|
||||||
self.device_driver_file = self.device.path.join(self.device.working_directory, DRIVER)
|
self.device_driver_file = self.device.path.join(self.device.working_directory, DRIVER)
|
||||||
host_driver_file = os.path.join(settings.dependencies_directory, DRIVER)
|
host_driver_file = os.path.join(settings.dependencies_directory, DRIVER)
|
||||||
self.device.push_file(host_driver_file, self.device_driver_file)
|
self.device.push(host_driver_file, self.device_driver_file)
|
||||||
|
|
||||||
def setup(self, context):
|
def setup(self, context):
|
||||||
if self.install_module:
|
if self.install_module:
|
||||||
self.device.execute('insmod {}'.format(self.device_driver_file), check_exit_code=False)
|
self.device.execute('insmod {}'.format(self.device_driver_file), check_exit_code=False)
|
||||||
self.device.set_sysfile_value(CPL_PERIOD_FILE, self.period)
|
self.device.write_value(CPL_PERIOD_FILE, self.period)
|
||||||
for i, event in enumerate(self.events):
|
for i, event in enumerate(self.events):
|
||||||
counter = CPL_BASE + 'counter{}'.format(i)
|
counter = CPL_BASE + 'counter{}'.format(i)
|
||||||
self.device.set_sysfile_value(counter, event, verify=False)
|
self.device.write_value(counter, event, verify=False)
|
||||||
|
|
||||||
def start(self, context):
|
def start(self, context):
|
||||||
self.device.set_sysfile_value(CPL_CONTROL_FILE, 1, verify=False)
|
self.device.write_value(CPL_CONTROL_FILE, 1, verify=False)
|
||||||
|
|
||||||
def stop(self, context):
|
def stop(self, context):
|
||||||
self.device.set_sysfile_value(CPL_CONTROL_FILE, 1, verify=False)
|
self.device.write_value(CPL_CONTROL_FILE, 1, verify=False)
|
||||||
|
|
||||||
# Doing result processing inside teardown because need to make sure that
|
# Doing result processing inside teardown because need to make sure that
|
||||||
# trace-cmd has processed its results and generated the trace.txt
|
# trace-cmd has processed its results and generated the trace.txt
|
||||||
|
@ -65,7 +65,7 @@ class ScreenOnInstrument(Instrument):
|
|||||||
|
|
||||||
def initialize(self, context):
|
def initialize(self, context):
|
||||||
self.monitor = None
|
self.monitor = None
|
||||||
if self.device.platform != 'android':
|
if self.device.os != 'android':
|
||||||
raise InstrumentError('screenon instrument currently only supports Android devices.')
|
raise InstrumentError('screenon instrument currently only supports Android devices.')
|
||||||
|
|
||||||
def slow_setup(self, context): # slow to run before most other setups
|
def slow_setup(self, context): # slow to run before most other setups
|
||||||
|
@ -182,13 +182,13 @@ class StreamlineInstrument(Instrument):
|
|||||||
caiman_path = subprocess.check_output('which caiman', shell=True).strip() # pylint: disable=E1103
|
caiman_path = subprocess.check_output('which caiman', shell=True).strip() # pylint: disable=E1103
|
||||||
self.session_file = os.path.join(context.host_working_directory, 'streamline_session.xml')
|
self.session_file = os.path.join(context.host_working_directory, 'streamline_session.xml')
|
||||||
with open(self.session_file, 'w') as wfh:
|
with open(self.session_file, 'w') as wfh:
|
||||||
if self.device.platform == "android":
|
if self.device.os == "android":
|
||||||
wfh.write(SESSION_TEXT_TEMPLATE.format('127.0.0.1', self.port, caiman_path))
|
wfh.write(SESSION_TEXT_TEMPLATE.format('127.0.0.1', self.port, caiman_path))
|
||||||
else:
|
else:
|
||||||
wfh.write(SESSION_TEXT_TEMPLATE.format(self.device.host, self.port, caiman_path))
|
wfh.write(SESSION_TEXT_TEMPLATE.format(self.device.host, self.port, caiman_path))
|
||||||
|
|
||||||
if self.configuration_file:
|
if self.configuration_file:
|
||||||
self.device.push_file(self.configuration_file, self.on_device_config)
|
self.device.push(self.configuration_file, self.on_device_config)
|
||||||
self._initialize_daemon()
|
self._initialize_daemon()
|
||||||
|
|
||||||
def setup(self, context):
|
def setup(self, context):
|
||||||
@ -220,7 +220,7 @@ class StreamlineInstrument(Instrument):
|
|||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self._kill_daemon()
|
self._kill_daemon()
|
||||||
self.device.delete_file(self.on_device_config)
|
self.device.remove(self.on_device_config)
|
||||||
|
|
||||||
def _check_has_valid_display(self): # pylint: disable=R0201
|
def _check_has_valid_display(self): # pylint: disable=R0201
|
||||||
reason = None
|
reason = None
|
||||||
@ -243,7 +243,7 @@ class StreamlineInstrument(Instrument):
|
|||||||
raise
|
raise
|
||||||
self.logger.debug('Driver was already installed.')
|
self.logger.debug('Driver was already installed.')
|
||||||
self._start_daemon()
|
self._start_daemon()
|
||||||
if self.device.platform == "android":
|
if self.device.os == "android":
|
||||||
port_spec = 'tcp:{}'.format(self.port)
|
port_spec = 'tcp:{}'.format(self.port)
|
||||||
self.device.forward_port(port_spec, port_spec)
|
self.device.forward_port(port_spec, port_spec)
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@ class TraceCmdInstrument(Instrument):
|
|||||||
host_file = context.resolver.get(Executable(self, self.device.abi, 'trace-cmd'))
|
host_file = context.resolver.get(Executable(self, self.device.abi, 'trace-cmd'))
|
||||||
self.trace_cmd = self.device.install(host_file)
|
self.trace_cmd = self.device.install(host_file)
|
||||||
else:
|
else:
|
||||||
self.trace_cmd = self.device.get_binary_path("trace-cmd")
|
self.trace_cmd = self.device.get_installed("trace-cmd")
|
||||||
if not self.trace_cmd:
|
if not self.trace_cmd:
|
||||||
raise ConfigError('No trace-cmd found on device and no_install=True is specified.')
|
raise ConfigError('No trace-cmd found on device and no_install=True is specified.')
|
||||||
|
|
||||||
@ -233,7 +233,7 @@ class TraceCmdInstrument(Instrument):
|
|||||||
# Therefore timout for the pull command must also be adjusted
|
# Therefore timout for the pull command must also be adjusted
|
||||||
# accordingly.
|
# accordingly.
|
||||||
self._pull_timeout = (self.stop_time - self.start_time) # pylint: disable=attribute-defined-outside-init
|
self._pull_timeout = (self.stop_time - self.start_time) # pylint: disable=attribute-defined-outside-init
|
||||||
self.device.pull_file(self.output_file, context.output_directory, timeout=self._pull_timeout)
|
self.device.pull(self.output_file, context.output_directory, timeout=self._pull_timeout)
|
||||||
context.add_iteration_artifact('bintrace', OUTPUT_TRACE_FILE, kind='data',
|
context.add_iteration_artifact('bintrace', OUTPUT_TRACE_FILE, kind='data',
|
||||||
description='trace-cmd generated ftrace dump.')
|
description='trace-cmd generated ftrace dump.')
|
||||||
|
|
||||||
@ -263,7 +263,7 @@ class TraceCmdInstrument(Instrument):
|
|||||||
self.logger.warning('Could not generate trace.txt.')
|
self.logger.warning('Could not generate trace.txt.')
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self.device.delete_file(os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE))
|
self.device.remove(os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE))
|
||||||
|
|
||||||
def on_run_end(self, context):
|
def on_run_end(self, context):
|
||||||
pass
|
pass
|
||||||
@ -282,11 +282,11 @@ class TraceCmdInstrument(Instrument):
|
|||||||
|
|
||||||
def insert_start_mark(self, context):
|
def insert_start_mark(self, context):
|
||||||
# trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_START in info field
|
# trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_START in info field
|
||||||
self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_START", verify=False)
|
self.device.write_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_START", verify=False)
|
||||||
|
|
||||||
def insert_end_mark(self, context):
|
def insert_end_mark(self, context):
|
||||||
# trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_STOP in info field
|
# trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_STOP in info field
|
||||||
self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_STOP", verify=False)
|
self.device.write_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_STOP", verify=False)
|
||||||
|
|
||||||
def _set_buffer_size(self):
|
def _set_buffer_size(self):
|
||||||
target_buffer_size = self.buffer_size
|
target_buffer_size = self.buffer_size
|
||||||
@ -294,7 +294,7 @@ class TraceCmdInstrument(Instrument):
|
|||||||
buffer_size = 0
|
buffer_size = 0
|
||||||
floor = 1000 if target_buffer_size > 1000 else target_buffer_size
|
floor = 1000 if target_buffer_size > 1000 else target_buffer_size
|
||||||
while attempt_buffer_size >= floor:
|
while attempt_buffer_size >= floor:
|
||||||
self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False)
|
self.device.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
|
||||||
buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
|
buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
|
||||||
if buffer_size == attempt_buffer_size:
|
if buffer_size == attempt_buffer_size:
|
||||||
break
|
break
|
||||||
@ -304,7 +304,7 @@ class TraceCmdInstrument(Instrument):
|
|||||||
return
|
return
|
||||||
while attempt_buffer_size < target_buffer_size:
|
while attempt_buffer_size < target_buffer_size:
|
||||||
attempt_buffer_size += self.buffer_size_step
|
attempt_buffer_size += self.buffer_size_step
|
||||||
self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False)
|
self.device.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
|
||||||
buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
|
buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
|
||||||
if attempt_buffer_size != buffer_size:
|
if attempt_buffer_size != buffer_size:
|
||||||
self.logger.warning('Failed to set trace buffer size to {}, value set was {}'.format(target_buffer_size, buffer_size))
|
self.logger.warning('Failed to set trace buffer size to {}, value set was {}'.format(target_buffer_size, buffer_size))
|
||||||
@ -316,7 +316,7 @@ class TraceCmdInstrument(Instrument):
|
|||||||
txt_trace_file = os.path.join(self.device.working_directory, OUTPUT_TEXT_FILE)
|
txt_trace_file = os.path.join(self.device.working_directory, OUTPUT_TEXT_FILE)
|
||||||
command = 'trace-cmd report {} > {}'.format(trace_file, txt_trace_file)
|
command = 'trace-cmd report {} > {}'.format(trace_file, txt_trace_file)
|
||||||
self.device.execute(command)
|
self.device.execute(command)
|
||||||
self.device.pull_file(txt_trace_file, context.output_directory, timeout=self._pull_timeout)
|
self.device.pull(txt_trace_file, context.output_directory, timeout=self._pull_timeout)
|
||||||
except DeviceError:
|
except DeviceError:
|
||||||
raise InstrumentError('Could not generate TXT report on target.')
|
raise InstrumentError('Could not generate TXT report on target.')
|
||||||
|
|
||||||
|
@ -1,16 +0,0 @@
|
|||||||
# Copyright 2014-2015 ARM Limited
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
|
|
@ -1,73 +0,0 @@
|
|||||||
# Copyright 2014-2015 ARM Limited
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
|
|
||||||
from wlauto import Module, Parameter
|
|
||||||
from wlauto.utils.serial_port import open_serial_connection
|
|
||||||
|
|
||||||
|
|
||||||
class MbedFanActiveCooling(Module):
|
|
||||||
|
|
||||||
name = 'mbed-fan'
|
|
||||||
description = 'Controls a cooling fan via an mbed connected to a serial port.'
|
|
||||||
|
|
||||||
capabilities = ['active_cooling']
|
|
||||||
|
|
||||||
parameters = [
|
|
||||||
Parameter('port', default='/dev/ttyACM0',
|
|
||||||
description="""The serial port for the active cooling solution (see above)."""),
|
|
||||||
Parameter('buad', kind=int, default=115200,
|
|
||||||
description="""Baud for the serial port (see above)."""),
|
|
||||||
Parameter('fan_pin', kind=int, default=0,
|
|
||||||
description="""Which controller pin on the mbed the fan for the active cooling solution is
|
|
||||||
connected to (controller pin 0 is physical pin 22 on the mbed)."""),
|
|
||||||
]
|
|
||||||
|
|
||||||
timeout = 30
|
|
||||||
|
|
||||||
def start_active_cooling(self):
|
|
||||||
with open_serial_connection(timeout=self.timeout,
|
|
||||||
port=self.port,
|
|
||||||
baudrate=self.buad) as target:
|
|
||||||
target.sendline('motor_{}_1'.format(self.fan_pin))
|
|
||||||
|
|
||||||
def stop_active_cooling(self):
|
|
||||||
with open_serial_connection(timeout=self.timeout,
|
|
||||||
port=self.port,
|
|
||||||
baudrate=self.buad) as target:
|
|
||||||
target.sendline('motor_{}_0'.format(self.fan_pin))
|
|
||||||
|
|
||||||
|
|
||||||
class OdroidXU3ctiveCooling(Module):
|
|
||||||
|
|
||||||
name = 'odroidxu3-fan'
|
|
||||||
description = """
|
|
||||||
Enabled active cooling by controling the fan an Odroid XU3
|
|
||||||
|
|
||||||
.. note:: depending on the kernel used, it may not be possible to turn the fan
|
|
||||||
off completely; in such situations, the fan will be set to its minimum
|
|
||||||
speed.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
capabilities = ['active_cooling']
|
|
||||||
|
|
||||||
def start_active_cooling(self):
|
|
||||||
self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/fan_mode', 0, verify=False)
|
|
||||||
self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/pwm_duty', 255, verify=False)
|
|
||||||
|
|
||||||
def stop_active_cooling(self):
|
|
||||||
self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/fan_mode', 0, verify=False)
|
|
||||||
self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/pwm_duty', 1, verify=False)
|
|
@ -1,178 +0,0 @@
|
|||||||
# Copyright 2014-2015 ARM Limited
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
# pylint: disable=attribute-defined-outside-init
|
|
||||||
import logging
|
|
||||||
|
|
||||||
import wlauto.core.signal as signal
|
|
||||||
from wlauto import Module, Parameter
|
|
||||||
from wlauto.utils.misc import list_to_ranges, isiterable
|
|
||||||
|
|
||||||
|
|
||||||
class CgroupController(object):
|
|
||||||
|
|
||||||
kind = 'cpuset'
|
|
||||||
|
|
||||||
def __new__(cls, arg):
|
|
||||||
if isinstance(arg, cls):
|
|
||||||
return arg
|
|
||||||
else:
|
|
||||||
return object.__new__(cls, arg)
|
|
||||||
|
|
||||||
def __init__(self, mount_name):
|
|
||||||
self.mount_point = None
|
|
||||||
self.mount_name = mount_name
|
|
||||||
self.logger = logging.getLogger(self.kind)
|
|
||||||
|
|
||||||
def mount(self, device, mount_root):
|
|
||||||
self.device = device
|
|
||||||
self.mount_point = device.path.join(mount_root, self.mount_name)
|
|
||||||
mounted = self.device.list_file_systems()
|
|
||||||
if self.mount_point in [e.mount_point for e in mounted]:
|
|
||||||
self.logger.debug('controller is already mounted.')
|
|
||||||
else:
|
|
||||||
self.device.execute('mkdir -p {} 2>/dev/null'.format(self.mount_point),
|
|
||||||
as_root=True)
|
|
||||||
self.device.execute('mount -t cgroup -o {} {} {}'.format(self.kind,
|
|
||||||
self.mount_name,
|
|
||||||
self.mount_point),
|
|
||||||
as_root=True)
|
|
||||||
|
|
||||||
|
|
||||||
class CpusetGroup(object):
|
|
||||||
|
|
||||||
def __init__(self, controller, name, cpus, mems):
|
|
||||||
self.controller = controller
|
|
||||||
self.device = controller.device
|
|
||||||
self.name = name
|
|
||||||
if name == 'root':
|
|
||||||
self.directory = controller.mount_point
|
|
||||||
else:
|
|
||||||
self.directory = self.device.path.join(controller.mount_point, name)
|
|
||||||
self.device.execute('mkdir -p {}'.format(self.directory), as_root=True)
|
|
||||||
self.cpus_file = self.device.path.join(self.directory, 'cpuset.cpus')
|
|
||||||
self.mems_file = self.device.path.join(self.directory, 'cpuset.mems')
|
|
||||||
self.tasks_file = self.device.path.join(self.directory, 'tasks')
|
|
||||||
self.set(cpus, mems)
|
|
||||||
|
|
||||||
def set(self, cpus, mems):
|
|
||||||
if isiterable(cpus):
|
|
||||||
cpus = list_to_ranges(cpus)
|
|
||||||
if isiterable(mems):
|
|
||||||
mems = list_to_ranges(mems)
|
|
||||||
self.device.set_sysfile_value(self.cpus_file, cpus)
|
|
||||||
self.device.set_sysfile_value(self.mems_file, mems)
|
|
||||||
|
|
||||||
def get(self):
|
|
||||||
cpus = self.device.get_sysfile_value(self.cpus_file)
|
|
||||||
mems = self.device.get_sysfile_value(self.mems_file)
|
|
||||||
return (cpus, mems)
|
|
||||||
|
|
||||||
def get_tasks(self):
|
|
||||||
task_ids = self.device.get_sysfile_value(self.tasks_file).split()
|
|
||||||
return map(int, task_ids)
|
|
||||||
|
|
||||||
def add_tasks(self, tasks):
|
|
||||||
for tid in tasks:
|
|
||||||
self.add_task(tid)
|
|
||||||
|
|
||||||
def add_task(self, tid):
|
|
||||||
self.device.set_sysfile_value(self.tasks_file, tid, verify=False)
|
|
||||||
|
|
||||||
|
|
||||||
class CpusetController(CgroupController):
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super(CpusetController, self).__init__(*args, **kwargs)
|
|
||||||
self.groups = {}
|
|
||||||
|
|
||||||
def mount(self, device, mount_root):
|
|
||||||
super(CpusetController, self).mount(device, mount_root)
|
|
||||||
self.create_group('root', self.device.online_cpus, 0)
|
|
||||||
|
|
||||||
def create_group(self, name, cpus, mems):
|
|
||||||
if not hasattr(self, 'device'):
|
|
||||||
raise RuntimeError('Attempting to create group for unmounted controller {}'.format(self.kind))
|
|
||||||
if name in self.groups:
|
|
||||||
raise ValueError('Group {} already exists'.format(name))
|
|
||||||
self.groups[name] = CpusetGroup(self, name, cpus, mems)
|
|
||||||
|
|
||||||
def move_tasks(self, source, dest):
|
|
||||||
try:
|
|
||||||
source_group = self.groups[source]
|
|
||||||
dest_group = self.groups[dest]
|
|
||||||
command = 'for task in $(cat {}); do echo $task>{}; done'
|
|
||||||
self.device.execute(command.format(source_group.tasks_file, dest_group.tasks_file),
|
|
||||||
# this will always fail as some of the tasks
|
|
||||||
# are kthreads that cannot be migrated, but we
|
|
||||||
# don't care about those, so don't check exit
|
|
||||||
# code.
|
|
||||||
check_exit_code=False, as_root=True)
|
|
||||||
except KeyError as e:
|
|
||||||
raise ValueError('Unkown group: {}'.format(e))
|
|
||||||
|
|
||||||
def move_all_tasks_to(self, target_group):
|
|
||||||
for group in self.groups:
|
|
||||||
if group != target_group:
|
|
||||||
self.move_tasks(group, target_group)
|
|
||||||
|
|
||||||
def __getattr__(self, name):
|
|
||||||
try:
|
|
||||||
return self.groups[name]
|
|
||||||
except KeyError:
|
|
||||||
raise AttributeError(name)
|
|
||||||
|
|
||||||
|
|
||||||
class Cgroups(Module):
|
|
||||||
|
|
||||||
name = 'cgroups'
|
|
||||||
description = """
|
|
||||||
Adds cgroups query and manupution APIs to a Device interface.
|
|
||||||
|
|
||||||
Currently, only cpusets controller is supported.
|
|
||||||
|
|
||||||
"""
|
|
||||||
capabilities = ['cgroups']
|
|
||||||
|
|
||||||
controllers = [
|
|
||||||
CpusetController('wa_cpuset'),
|
|
||||||
]
|
|
||||||
|
|
||||||
parameters = [
|
|
||||||
Parameter('cgroup_root', default='/sys/fs/cgroup',
|
|
||||||
description='Location where cgroups are mounted on the device.'),
|
|
||||||
]
|
|
||||||
|
|
||||||
def initialize(self, context):
|
|
||||||
self.device = self.root_owner
|
|
||||||
signal.connect(self._on_device_init, signal.RUN_INIT, priority=1)
|
|
||||||
|
|
||||||
def get_cgroup_controller(self, kind):
|
|
||||||
for controller in self.controllers:
|
|
||||||
if controller.kind == kind:
|
|
||||||
return controller
|
|
||||||
raise ValueError(kind)
|
|
||||||
|
|
||||||
def _on_device_init(self, context): # pylint: disable=unused-argument
|
|
||||||
mounted = self.device.list_file_systems()
|
|
||||||
if self.cgroup_root not in [e.mount_point for e in mounted]:
|
|
||||||
self.device.execute('mount -t tmpfs {} {}'.format('cgroup_root', self.cgroup_root),
|
|
||||||
as_root=True)
|
|
||||||
else:
|
|
||||||
self.logger.debug('cgroup_root already mounted at {}'.format(self.cgroup_root))
|
|
||||||
for controller in self.controllers:
|
|
||||||
if controller.kind in [e.device for e in mounted]:
|
|
||||||
self.logger.debug('controller {} is already mounted.'.format(controller.kind))
|
|
||||||
else:
|
|
||||||
controller.mount(self.device, self.cgroup_root)
|
|
@ -1,450 +0,0 @@
|
|||||||
from wlauto import Module
|
|
||||||
from wlauto.exceptions import ConfigError, DeviceError
|
|
||||||
|
|
||||||
|
|
||||||
# a dict of governor name and a list of it tunables that can't be read
|
|
||||||
WRITE_ONLY_TUNABLES = {
|
|
||||||
'interactive': ['boostpulse']
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class CpufreqModule(Module):
|
|
||||||
|
|
||||||
name = 'devcpufreq'
|
|
||||||
description = """
|
|
||||||
cpufreq-related functionality module for the device. Query and set frequencies, governors, etc.
|
|
||||||
|
|
||||||
APIs in this module break down into three categories: those that operate on cpus, those that
|
|
||||||
operate on cores, and those that operate on clusters.
|
|
||||||
|
|
||||||
"cpu" APIs expect a cpufreq CPU id, which could be either an integer or or a string of the
|
|
||||||
form "cpu0".
|
|
||||||
|
|
||||||
"cluster" APIs expect a cluster ID. This is an integer as defined by the
|
|
||||||
``device.core_clusters`` list.
|
|
||||||
|
|
||||||
"core" APIs expect a core name, as defined by ``device.core_names`` list.
|
|
||||||
|
|
||||||
"""
|
|
||||||
capabilities = ['cpufreq']
|
|
||||||
|
|
||||||
def probe(self, device): # pylint: disable=no-self-use
|
|
||||||
path = '/sys/devices/system/cpu/cpu{}/cpufreq'.format(device.online_cpus[0])
|
|
||||||
return device.file_exists(path)
|
|
||||||
|
|
||||||
def initialize(self, context):
|
|
||||||
# pylint: disable=W0201
|
|
||||||
CpufreqModule._available_governors = {}
|
|
||||||
CpufreqModule._available_governor_tunables = {}
|
|
||||||
CpufreqModule.device = self.root_owner
|
|
||||||
|
|
||||||
def list_available_cpu_governors(self, cpu):
|
|
||||||
"""Returns a list of governors supported by the cpu."""
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
if cpu not in self._available_governors:
|
|
||||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_available_governors'.format(cpu)
|
|
||||||
output = self.device.get_sysfile_value(sysfile)
|
|
||||||
self._available_governors[cpu] = output.strip().split() # pylint: disable=E1103
|
|
||||||
return self._available_governors[cpu]
|
|
||||||
|
|
||||||
def get_cpu_governor(self, cpu):
|
|
||||||
"""Returns the governor currently set for the specified CPU."""
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
|
|
||||||
return self.device.get_sysfile_value(sysfile)
|
|
||||||
|
|
||||||
def set_cpu_governor(self, cpu, governor, **kwargs):
|
|
||||||
"""
|
|
||||||
Set the governor for the specified CPU.
|
|
||||||
See https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
|
|
||||||
|
|
||||||
:param cpu: The CPU for which the governor is to be set. This must be
|
|
||||||
the full name as it appears in sysfs, e.g. "cpu0".
|
|
||||||
:param governor: The name of the governor to be used. This must be
|
|
||||||
supported by the specific device.
|
|
||||||
|
|
||||||
Additional keyword arguments can be used to specify governor tunables for
|
|
||||||
governors that support them.
|
|
||||||
|
|
||||||
:note: On big.LITTLE all cores in a cluster must be using the same governor.
|
|
||||||
Setting the governor on any core in a cluster will also set it on all
|
|
||||||
other cores in that cluster.
|
|
||||||
|
|
||||||
:raises: ConfigError if governor is not supported by the CPU.
|
|
||||||
:raises: DeviceError if, for some reason, the governor could not be set.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
supported = self.list_available_cpu_governors(cpu)
|
|
||||||
if governor not in supported:
|
|
||||||
raise ConfigError('Governor {} not supported for cpu {}'.format(governor, cpu))
|
|
||||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
|
|
||||||
self.device.set_sysfile_value(sysfile, governor)
|
|
||||||
self.set_cpu_governor_tunables(cpu, governor, **kwargs)
|
|
||||||
|
|
||||||
def list_available_cpu_governor_tunables(self, cpu):
|
|
||||||
"""Returns a list of tunables available for the governor on the specified CPU."""
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
governor = self.get_cpu_governor(cpu)
|
|
||||||
if governor not in self._available_governor_tunables:
|
|
||||||
try:
|
|
||||||
tunables_path = '/sys/devices/system/cpu/{}/cpufreq/{}'.format(cpu, governor)
|
|
||||||
self._available_governor_tunables[governor] = self.device.listdir(tunables_path)
|
|
||||||
except DeviceError: # probably an older kernel
|
|
||||||
try:
|
|
||||||
tunables_path = '/sys/devices/system/cpu/cpufreq/{}'.format(governor)
|
|
||||||
self._available_governor_tunables[governor] = self.device.listdir(tunables_path)
|
|
||||||
except DeviceError: # governor does not support tunables
|
|
||||||
self._available_governor_tunables[governor] = []
|
|
||||||
return self._available_governor_tunables[governor]
|
|
||||||
|
|
||||||
def get_cpu_governor_tunables(self, cpu):
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
governor = self.get_cpu_governor(cpu)
|
|
||||||
tunables = {}
|
|
||||||
for tunable in self.list_available_cpu_governor_tunables(cpu):
|
|
||||||
if tunable not in WRITE_ONLY_TUNABLES.get(governor, []):
|
|
||||||
try:
|
|
||||||
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
|
|
||||||
tunables[tunable] = self.device.get_sysfile_value(path)
|
|
||||||
except DeviceError: # May be an older kernel
|
|
||||||
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
|
|
||||||
tunables[tunable] = self.device.get_sysfile_value(path)
|
|
||||||
return tunables
|
|
||||||
|
|
||||||
def set_cpu_governor_tunables(self, cpu, governor, **kwargs):
|
|
||||||
"""
|
|
||||||
Set tunables for the specified governor. Tunables should be specified as
|
|
||||||
keyword arguments. Which tunables and values are valid depends on the
|
|
||||||
governor.
|
|
||||||
|
|
||||||
:param cpu: The cpu for which the governor will be set. This must be the
|
|
||||||
full cpu name as it appears in sysfs, e.g. ``cpu0``.
|
|
||||||
:param governor: The name of the governor. Must be all lower case.
|
|
||||||
|
|
||||||
The rest should be keyword parameters mapping tunable name onto the value to
|
|
||||||
be set for it.
|
|
||||||
|
|
||||||
:raises: ConfigError if governor specified is not a valid governor name, or if
|
|
||||||
a tunable specified is not valid for the governor.
|
|
||||||
:raises: DeviceError if could not set tunable.
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
valid_tunables = self.list_available_cpu_governor_tunables(cpu)
|
|
||||||
for tunable, value in kwargs.iteritems():
|
|
||||||
if tunable in valid_tunables:
|
|
||||||
try:
|
|
||||||
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
|
|
||||||
self.device.set_sysfile_value(path, value)
|
|
||||||
except DeviceError: # May be an older kernel
|
|
||||||
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
|
|
||||||
self.device.set_sysfile_value(path, value)
|
|
||||||
else:
|
|
||||||
message = 'Unexpected tunable {} for governor {} on {}.\n'.format(tunable, governor, cpu)
|
|
||||||
message += 'Available tunables are: {}'.format(valid_tunables)
|
|
||||||
raise ConfigError(message)
|
|
||||||
|
|
||||||
def list_available_core_frequencies(self, core):
|
|
||||||
cpu = self.get_core_online_cpu(core)
|
|
||||||
return self.list_available_cpu_frequencies(cpu)
|
|
||||||
|
|
||||||
def list_available_cpu_frequencies(self, cpu):
|
|
||||||
"""Returns a list of frequencies supported by the cpu or an empty list
|
|
||||||
if not could be found."""
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
try:
|
|
||||||
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_frequencies'.format(cpu)
|
|
||||||
output = self.device.execute(cmd)
|
|
||||||
#available_frequencies = map(int, output.strip().split()) # pylint: disable=E1103
|
|
||||||
available_frequencies = []
|
|
||||||
for f in output.strip().split():
|
|
||||||
try:
|
|
||||||
available_frequencies.append(int(f))
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
except DeviceError:
|
|
||||||
# On some devices scaling_available_frequencies is not generated.
|
|
||||||
# http://adrynalyne-teachtofish.blogspot.co.uk/2011/11/how-to-enable-scalingavailablefrequenci.html
|
|
||||||
# Fall back to parsing stats/time_in_state
|
|
||||||
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/stats/time_in_state'.format(cpu)
|
|
||||||
out_iter = iter(self.device.execute(cmd).strip().split())
|
|
||||||
available_frequencies = map(int, reversed([f for f, _ in zip(out_iter, out_iter)]))
|
|
||||||
return available_frequencies
|
|
||||||
|
|
||||||
def get_cpu_min_frequency(self, cpu):
|
|
||||||
"""
|
|
||||||
Returns the min frequency currently set for the specified CPU.
|
|
||||||
|
|
||||||
Warning, this method does not check if the cpu is online or not. It will
|
|
||||||
try to read the minimum frequency and the following exception will be
|
|
||||||
raised ::
|
|
||||||
|
|
||||||
:raises: DeviceError if for some reason the frequency could not be read.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
|
|
||||||
return self.device.get_sysfile_value(sysfile)
|
|
||||||
|
|
||||||
def set_cpu_min_frequency(self, cpu, frequency):
|
|
||||||
"""
|
|
||||||
Set's the minimum value for CPU frequency. Actual frequency will
|
|
||||||
depend on the Governor used and may vary during execution. The value should be
|
|
||||||
either an int or a string representing an integer. The Value must also be
|
|
||||||
supported by the device. The available frequencies can be obtained by calling
|
|
||||||
get_available_frequencies() or examining
|
|
||||||
|
|
||||||
/sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies
|
|
||||||
|
|
||||||
on the device.
|
|
||||||
|
|
||||||
:raises: ConfigError if the frequency is not supported by the CPU.
|
|
||||||
:raises: DeviceError if, for some reason, frequency could not be set.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
available_frequencies = self.list_available_cpu_frequencies(cpu)
|
|
||||||
try:
|
|
||||||
value = int(frequency)
|
|
||||||
if available_frequencies and value not in available_frequencies:
|
|
||||||
raise ConfigError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
|
||||||
value,
|
|
||||||
available_frequencies))
|
|
||||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
|
|
||||||
self.device.set_sysfile_value(sysfile, value)
|
|
||||||
except ValueError:
|
|
||||||
raise ValueError('value must be an integer; got: "{}"'.format(value))
|
|
||||||
|
|
||||||
def get_cpu_frequency(self, cpu):
|
|
||||||
"""
|
|
||||||
Returns the current frequency currently set for the specified CPU.
|
|
||||||
|
|
||||||
Warning, this method does not check if the cpu is online or not. It will
|
|
||||||
try to read the current frequency and the following exception will be
|
|
||||||
raised ::
|
|
||||||
|
|
||||||
:raises: DeviceError if for some reason the frequency could not be read.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_cur_freq'.format(cpu)
|
|
||||||
return self.device.get_sysfile_value(sysfile)
|
|
||||||
|
|
||||||
def set_cpu_frequency(self, cpu, frequency, exact=True):
|
|
||||||
"""
|
|
||||||
Set's the minimum value for CPU frequency. Actual frequency will
|
|
||||||
depend on the Governor used and may vary during execution. The value should be
|
|
||||||
either an int or a string representing an integer.
|
|
||||||
|
|
||||||
If ``exact`` flag is set (the default), the Value must also be supported by
|
|
||||||
the device. The available frequencies can be obtained by calling
|
|
||||||
get_available_frequencies() or examining
|
|
||||||
|
|
||||||
/sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies
|
|
||||||
|
|
||||||
on the device (if it exists).
|
|
||||||
|
|
||||||
:raises: ConfigError if the frequency is not supported by the CPU.
|
|
||||||
:raises: DeviceError if, for some reason, frequency could not be set.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
try:
|
|
||||||
value = int(frequency)
|
|
||||||
if exact:
|
|
||||||
available_frequencies = self.list_available_cpu_frequencies(cpu)
|
|
||||||
if available_frequencies and value not in available_frequencies:
|
|
||||||
raise ConfigError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
|
||||||
value,
|
|
||||||
available_frequencies))
|
|
||||||
if self.get_cpu_governor(cpu) != 'userspace':
|
|
||||||
raise ConfigError('Can\'t set {} frequency; governor must be "userspace"'.format(cpu))
|
|
||||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_setspeed'.format(cpu)
|
|
||||||
self.device.set_sysfile_value(sysfile, value, verify=False)
|
|
||||||
except ValueError:
|
|
||||||
raise ValueError('frequency must be an integer; got: "{}"'.format(value))
|
|
||||||
|
|
||||||
def get_cpu_max_frequency(self, cpu):
|
|
||||||
"""
|
|
||||||
Returns the max frequency currently set for the specified CPU.
|
|
||||||
|
|
||||||
Warning, this method does not check if the cpu is online or not. It will
|
|
||||||
try to read the maximum frequency and the following exception will be
|
|
||||||
raised ::
|
|
||||||
|
|
||||||
:raises: DeviceError if for some reason the frequency could not be read.
|
|
||||||
"""
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
|
|
||||||
return self.device.get_sysfile_value(sysfile)
|
|
||||||
|
|
||||||
def set_cpu_max_frequency(self, cpu, frequency):
|
|
||||||
"""
|
|
||||||
Set's the minimum value for CPU frequency. Actual frequency will
|
|
||||||
depend on the Governor used and may vary during execution. The value should be
|
|
||||||
either an int or a string representing an integer. The Value must also be
|
|
||||||
supported by the device. The available frequencies can be obtained by calling
|
|
||||||
get_available_frequencies() or examining
|
|
||||||
|
|
||||||
/sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies
|
|
||||||
|
|
||||||
on the device.
|
|
||||||
|
|
||||||
:raises: ConfigError if the frequency is not supported by the CPU.
|
|
||||||
:raises: DeviceError if, for some reason, frequency could not be set.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
available_frequencies = self.list_available_cpu_frequencies(cpu)
|
|
||||||
try:
|
|
||||||
value = int(frequency)
|
|
||||||
if available_frequencies and value not in available_frequencies:
|
|
||||||
raise DeviceError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
|
||||||
value,
|
|
||||||
available_frequencies))
|
|
||||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
|
|
||||||
self.device.set_sysfile_value(sysfile, value)
|
|
||||||
except ValueError:
|
|
||||||
raise ValueError('value must be an integer; got: "{}"'.format(value))
|
|
||||||
|
|
||||||
# Core- and cluster-level mapping for the above cpu-level APIs above. The
|
|
||||||
# APIs make the following assumptions, which were True for all devices that
|
|
||||||
# existed at the time of writing:
|
|
||||||
# 1. A cluster can only contain cores of one type.
|
|
||||||
# 2. All cores in a cluster are tied to the same DVFS domain, therefore
|
|
||||||
# changes to cpufreq for a core will affect all other cores on the
|
|
||||||
# same cluster.
|
|
||||||
|
|
||||||
def get_core_clusters(self, core, strict=True):
|
|
||||||
"""Returns the list of clusters that contain the specified core. if ``strict``
|
|
||||||
is ``True``, raises ValueError if no clusters has been found (returns empty list
|
|
||||||
if ``strict`` is ``False``)."""
|
|
||||||
core_indexes = [i for i, c in enumerate(self.device.core_names) if c == core]
|
|
||||||
clusters = sorted(list(set(self.device.core_clusters[i] for i in core_indexes)))
|
|
||||||
if strict and not clusters:
|
|
||||||
raise ValueError('No active clusters for core {}'.format(core))
|
|
||||||
return clusters
|
|
||||||
|
|
||||||
def get_cluster_active_cpu(self, cluster):
|
|
||||||
"""Returns the first *active* cpu for the cluster. If the entire cluster
|
|
||||||
has been hotplugged, this will raise a ``ValueError``."""
|
|
||||||
cpu_indexes = set([i for i, c in enumerate(self.device.core_clusters) if c == cluster])
|
|
||||||
active_cpus = sorted(list(cpu_indexes.intersection(self.device.online_cpus)))
|
|
||||||
if not active_cpus:
|
|
||||||
raise ValueError('All cpus for cluster {} are offline'.format(cluster))
|
|
||||||
return active_cpus[0]
|
|
||||||
|
|
||||||
def list_available_core_governors(self, core):
|
|
||||||
cpu = self.get_core_online_cpu(core)
|
|
||||||
return self.list_available_cpu_governors(cpu)
|
|
||||||
|
|
||||||
def list_available_cluster_governors(self, cluster):
|
|
||||||
cpu = self.get_cluster_active_cpu(cluster)
|
|
||||||
return self.list_available_cpu_governors(cpu)
|
|
||||||
|
|
||||||
def get_core_governor(self, core):
|
|
||||||
cpu = self.get_core_online_cpu(core)
|
|
||||||
return self.get_cpu_governor(cpu)
|
|
||||||
|
|
||||||
def set_core_governor(self, core, governor, **tunables):
|
|
||||||
for cluster in self.get_core_clusters(core):
|
|
||||||
self.set_cluster_governor(cluster, governor, **tunables)
|
|
||||||
|
|
||||||
def get_cluster_governor(self, cluster):
|
|
||||||
cpu = self.get_cluster_active_cpu(cluster)
|
|
||||||
return self.get_cpu_governor(cpu)
|
|
||||||
|
|
||||||
def set_cluster_governor(self, cluster, governor, **tunables):
|
|
||||||
cpu = self.get_cluster_active_cpu(cluster)
|
|
||||||
return self.set_cpu_governor(cpu, governor, **tunables)
|
|
||||||
|
|
||||||
def list_available_cluster_governor_tunables(self, cluster):
|
|
||||||
cpu = self.get_cluster_active_cpu(cluster)
|
|
||||||
return self.list_available_cpu_governor_tunables(cpu)
|
|
||||||
|
|
||||||
def get_cluster_governor_tunables(self, cluster):
|
|
||||||
cpu = self.get_cluster_active_cpu(cluster)
|
|
||||||
return self.get_cpu_governor_tunables(cpu)
|
|
||||||
|
|
||||||
def set_cluster_governor_tunables(self, cluster, governor, **tunables):
|
|
||||||
cpu = self.get_cluster_active_cpu(cluster)
|
|
||||||
return self.set_cpu_governor_tunables(cpu, governor, **tunables)
|
|
||||||
|
|
||||||
def get_cluster_min_frequency(self, cluster):
|
|
||||||
cpu = self.get_cluster_active_cpu(cluster)
|
|
||||||
return self.get_cpu_min_frequency(cpu)
|
|
||||||
|
|
||||||
def set_cluster_min_frequency(self, cluster, freq):
|
|
||||||
cpu = self.get_cluster_active_cpu(cluster)
|
|
||||||
return self.set_cpu_min_frequency(cpu, freq)
|
|
||||||
|
|
||||||
def get_cluster_cur_frequency(self, cluster):
|
|
||||||
cpu = self.get_cluster_active_cpu(cluster)
|
|
||||||
return self.get_cpu_frequency(cpu)
|
|
||||||
|
|
||||||
def set_cluster_cur_frequency(self, cluster, freq):
|
|
||||||
cpu = self.get_cluster_active_cpu(cluster)
|
|
||||||
return self.set_cpu_frequency(cpu, freq)
|
|
||||||
|
|
||||||
def get_cluster_max_frequency(self, cluster):
|
|
||||||
cpu = self.get_cluster_active_cpu(cluster)
|
|
||||||
return self.get_cpu_max_frequency(cpu)
|
|
||||||
|
|
||||||
def set_cluster_max_frequency(self, cluster, freq):
|
|
||||||
cpu = self.get_cluster_active_cpu(cluster)
|
|
||||||
return self.set_cpu_max_frequency(cpu, freq)
|
|
||||||
|
|
||||||
def get_core_online_cpu(self, core):
|
|
||||||
for cluster in self.get_core_clusters(core):
|
|
||||||
try:
|
|
||||||
return self.get_cluster_active_cpu(cluster)
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
raise ValueError('No active CPUs found for core {}'.format(core))
|
|
||||||
|
|
||||||
def list_available_core_governor_tunables(self, core):
|
|
||||||
return self.list_available_cpu_governor_tunables(self.get_core_online_cpu(core))
|
|
||||||
|
|
||||||
def get_core_governor_tunables(self, core):
|
|
||||||
return self.get_cpu_governor_tunables(self.get_core_online_cpu(core))
|
|
||||||
|
|
||||||
def set_core_governor_tunables(self, core, tunables):
|
|
||||||
for cluster in self.get_core_clusters(core):
|
|
||||||
governor = self.get_cluster_governor(cluster)
|
|
||||||
self.set_cluster_governor_tunables(cluster, governor, **tunables)
|
|
||||||
|
|
||||||
def get_core_min_frequency(self, core):
|
|
||||||
return self.get_cpu_min_frequency(self.get_core_online_cpu(core))
|
|
||||||
|
|
||||||
def set_core_min_frequency(self, core, freq):
|
|
||||||
for cluster in self.get_core_clusters(core):
|
|
||||||
self.set_cluster_min_frequency(cluster, freq)
|
|
||||||
|
|
||||||
def get_core_cur_frequency(self, core):
|
|
||||||
return self.get_cpu_frequency(self.get_core_online_cpu(core))
|
|
||||||
|
|
||||||
def set_core_cur_frequency(self, core, freq):
|
|
||||||
for cluster in self.get_core_clusters(core):
|
|
||||||
self.set_cluster_cur_frequency(cluster, freq)
|
|
||||||
|
|
||||||
def get_core_max_frequency(self, core):
|
|
||||||
return self.get_cpu_max_frequency(self.get_core_online_cpu(core))
|
|
||||||
|
|
||||||
def set_core_max_frequency(self, core, freq):
|
|
||||||
for cluster in self.get_core_clusters(core):
|
|
||||||
self.set_cluster_max_frequency(cluster, freq)
|
|
@ -1,117 +0,0 @@
|
|||||||
# Copyright 2014-2015 ARM Limited
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
# pylint: disable=attribute-defined-outside-init
|
|
||||||
import wlauto.core.signal as signal
|
|
||||||
from wlauto import Module
|
|
||||||
from wlauto.exceptions import DeviceError
|
|
||||||
|
|
||||||
|
|
||||||
class CpuidleState(object):
|
|
||||||
|
|
||||||
@property
|
|
||||||
def usage(self):
|
|
||||||
return self.get('usage')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def time(self):
|
|
||||||
return self.get('time')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def disable(self):
|
|
||||||
return self.get('disable')
|
|
||||||
|
|
||||||
@disable.setter
|
|
||||||
def disable(self, value):
|
|
||||||
self.set('disable', value)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ordinal(self):
|
|
||||||
i = len(self.id)
|
|
||||||
while self.id[i - 1].isdigit():
|
|
||||||
i -= 1
|
|
||||||
if not i:
|
|
||||||
raise ValueError('invalid idle state name: "{}"'.format(self.id))
|
|
||||||
return int(self.id[i:])
|
|
||||||
|
|
||||||
def __init__(self, device, index, path):
|
|
||||||
self.device = device
|
|
||||||
self.index = index
|
|
||||||
self.path = path
|
|
||||||
self.id = self.device.path.basename(self.path)
|
|
||||||
self.cpu = self.device.path.basename(self.device.path.dirname(path))
|
|
||||||
self.desc = self.get('desc')
|
|
||||||
self.name = self.get('name')
|
|
||||||
self.latency = self.get('latency')
|
|
||||||
self.power = self.get('power')
|
|
||||||
|
|
||||||
def get(self, prop):
|
|
||||||
property_path = self.device.path.join(self.path, prop)
|
|
||||||
return self.device.get_sysfile_value(property_path)
|
|
||||||
|
|
||||||
def set(self, prop, value):
|
|
||||||
property_path = self.device.path.join(self.path, prop)
|
|
||||||
self.device.set_sysfile_value(property_path, value)
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
|
||||||
if isinstance(other, CpuidleState):
|
|
||||||
return (self.name == other.name) and (self.desc == other.desc)
|
|
||||||
elif isinstance(other, basestring):
|
|
||||||
return (self.name == other) or (self.desc == other)
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def __ne__(self, other):
|
|
||||||
return not self.__eq__(other)
|
|
||||||
|
|
||||||
|
|
||||||
class Cpuidle(Module):
|
|
||||||
|
|
||||||
name = 'cpuidle'
|
|
||||||
description = """
|
|
||||||
Adds cpuidle state query and manupution APIs to a Device interface.
|
|
||||||
|
|
||||||
"""
|
|
||||||
capabilities = ['cpuidle']
|
|
||||||
|
|
||||||
root_path = '/sys/devices/system/cpu/cpuidle'
|
|
||||||
|
|
||||||
def probe(self, device):
|
|
||||||
return device.file_exists(self.root_path)
|
|
||||||
|
|
||||||
def initialize(self, context):
|
|
||||||
self.device = self.root_owner
|
|
||||||
signal.connect(self._on_device_init, signal.RUN_INIT, priority=1)
|
|
||||||
|
|
||||||
def get_cpuidle_driver(self):
|
|
||||||
return self.device.get_sysfile_value(self.device.path.join(self.root_path, 'current_driver')).strip()
|
|
||||||
|
|
||||||
def get_cpuidle_governor(self):
|
|
||||||
return self.device.get_sysfile_value(self.device.path.join(self.root_path, 'current_governor_ro')).strip()
|
|
||||||
|
|
||||||
def get_cpuidle_states(self, cpu=0):
|
|
||||||
if isinstance(cpu, int):
|
|
||||||
cpu = 'cpu{}'.format(cpu)
|
|
||||||
states_dir = self.device.path.join(self.device.path.dirname(self.root_path), cpu, 'cpuidle')
|
|
||||||
idle_states = []
|
|
||||||
for state in self.device.listdir(states_dir):
|
|
||||||
if state.startswith('state'):
|
|
||||||
index = int(state[5:])
|
|
||||||
idle_states.append(CpuidleState(self.device, index, self.device.path.join(states_dir, state)))
|
|
||||||
return idle_states
|
|
||||||
|
|
||||||
def _on_device_init(self, context): # pylint: disable=unused-argument
|
|
||||||
if not self.device.file_exists(self.root_path):
|
|
||||||
raise DeviceError('Device kernel does not appear to have cpuidle enabled.')
|
|
||||||
|
|
@ -1,248 +0,0 @@
|
|||||||
# Copyright 2014-2015 ARM Limited
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
# pylint: disable=attribute-defined-outside-init
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
import tarfile
|
|
||||||
import tempfile
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
from wlauto import Module
|
|
||||||
from wlauto.exceptions import ConfigError, DeviceError
|
|
||||||
from wlauto.utils.android import fastboot_flash_partition, fastboot_command
|
|
||||||
from wlauto.utils.serial_port import open_serial_connection
|
|
||||||
from wlauto.utils.uefi import UefiMenu
|
|
||||||
from wlauto.utils.misc import merge_dicts
|
|
||||||
|
|
||||||
|
|
||||||
class Flasher(Module):
|
|
||||||
"""
|
|
||||||
Implements a mechanism for flashing a device. The images to be flashed can be
|
|
||||||
specified either as a tarball "image bundle" (in which case instructions for
|
|
||||||
flashing are provided as flasher-specific metadata also in the bundle), or as
|
|
||||||
individual image files, in which case instructions for flashing as specified
|
|
||||||
as part of flashing config.
|
|
||||||
|
|
||||||
.. note:: It is important that when resolving configuration, concrete flasher
|
|
||||||
implementations prioritise settings specified in the config over those
|
|
||||||
in the bundle (if they happen to clash).
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
capabilities = ['flash']
|
|
||||||
|
|
||||||
def flash(self, image_bundle=None, images=None):
|
|
||||||
"""
|
|
||||||
Flashes the specified device using the specified config. As a post condition,
|
|
||||||
the device must be ready to run workloads upon returning from this method (e.g.
|
|
||||||
it must be fully-booted into the OS).
|
|
||||||
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
|
|
||||||
class FastbootFlasher(Flasher):
|
|
||||||
|
|
||||||
name = 'fastboot'
|
|
||||||
description = """
|
|
||||||
Enables automated flashing of images using the fastboot utility.
|
|
||||||
|
|
||||||
To use this flasher, a set of image files to be flused are required.
|
|
||||||
In addition a mapping between partitions and image file is required. There are two ways
|
|
||||||
to specify those requirements:
|
|
||||||
|
|
||||||
- Image mapping: In this mode, a mapping between partitions and images is given in the agenda.
|
|
||||||
- Image Bundle: In This mode a tarball is specified, which must contain all image files as well
|
|
||||||
as well as a partition file, named ``partitions.txt`` which contains the mapping between
|
|
||||||
partitions and images.
|
|
||||||
|
|
||||||
The format of ``partitions.txt`` defines one mapping per line as such: ::
|
|
||||||
|
|
||||||
kernel zImage-dtb
|
|
||||||
ramdisk ramdisk_image
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
delay = 0.5
|
|
||||||
serial_timeout = 30
|
|
||||||
partitions_file_name = 'partitions.txt'
|
|
||||||
|
|
||||||
def flash(self, image_bundle=None, images=None):
|
|
||||||
self.prelude_done = False
|
|
||||||
to_flash = {}
|
|
||||||
if image_bundle: # pylint: disable=access-member-before-definition
|
|
||||||
image_bundle = expand_path(image_bundle)
|
|
||||||
to_flash = self._bundle_to_images(image_bundle)
|
|
||||||
to_flash = merge_dicts(to_flash, images or {}, should_normalize=False)
|
|
||||||
for partition, image_path in to_flash.iteritems():
|
|
||||||
self.logger.debug('flashing {}'.format(partition))
|
|
||||||
self._flash_image(self.owner, partition, expand_path(image_path))
|
|
||||||
fastboot_command('reboot')
|
|
||||||
|
|
||||||
def _validate_image_bundle(self, image_bundle):
|
|
||||||
if not tarfile.is_tarfile(image_bundle):
|
|
||||||
raise ConfigError('File {} is not a tarfile'.format(image_bundle))
|
|
||||||
with tarfile.open(image_bundle) as tar:
|
|
||||||
files = [tf.name for tf in tar.getmembers()]
|
|
||||||
if not any(pf in files for pf in (self.partitions_file_name, '{}/{}'.format(files[0], self.partitions_file_name))):
|
|
||||||
ConfigError('Image bundle does not contain the required partition file (see documentation)')
|
|
||||||
|
|
||||||
def _bundle_to_images(self, image_bundle):
|
|
||||||
"""
|
|
||||||
Extracts the bundle to a temporary location and creates a mapping between the contents of the bundle
|
|
||||||
and images to be flushed.
|
|
||||||
"""
|
|
||||||
self._validate_image_bundle(image_bundle)
|
|
||||||
extract_dir = tempfile.mkdtemp()
|
|
||||||
with tarfile.open(image_bundle) as tar:
|
|
||||||
tar.extractall(path=extract_dir)
|
|
||||||
files = [tf.name for tf in tar.getmembers()]
|
|
||||||
if self.partitions_file_name not in files:
|
|
||||||
extract_dir = os.path.join(extract_dir, files[0])
|
|
||||||
partition_file = os.path.join(extract_dir, self.partitions_file_name)
|
|
||||||
return get_mapping(extract_dir, partition_file)
|
|
||||||
|
|
||||||
def _flash_image(self, device, partition, image_path):
|
|
||||||
if not self.prelude_done:
|
|
||||||
self._fastboot_prelude(device)
|
|
||||||
fastboot_flash_partition(partition, image_path)
|
|
||||||
time.sleep(self.delay)
|
|
||||||
|
|
||||||
def _fastboot_prelude(self, device):
|
|
||||||
with open_serial_connection(port=device.port,
|
|
||||||
baudrate=device.baudrate,
|
|
||||||
timeout=self.serial_timeout,
|
|
||||||
init_dtr=0,
|
|
||||||
get_conn=False) as target:
|
|
||||||
device.reset()
|
|
||||||
time.sleep(self.delay)
|
|
||||||
target.sendline(' ')
|
|
||||||
time.sleep(self.delay)
|
|
||||||
target.sendline('fast')
|
|
||||||
time.sleep(self.delay)
|
|
||||||
self.prelude_done = True
|
|
||||||
|
|
||||||
|
|
||||||
class VersatileExpressFlasher(Flasher):
|
|
||||||
|
|
||||||
name = 'vexpress'
|
|
||||||
description = """
|
|
||||||
Enables flashing of kernels and firmware to ARM Versatile Express devices.
|
|
||||||
|
|
||||||
This modules enables flashing of image bundles or individual images to ARM
|
|
||||||
Versatile Express-based devices (e.g. JUNO) via host-mounted MicroSD on the
|
|
||||||
board.
|
|
||||||
|
|
||||||
The bundle, if specified, must reflect the directory structure of the MicroSD
|
|
||||||
and will be extracted directly into the location it is mounted on the host. The
|
|
||||||
images, if specified, must be a dict mapping the absolute path of the image on
|
|
||||||
the host to the destination path within the board's MicroSD; the destination path
|
|
||||||
may be either absolute, or relative to the MicroSD mount location.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def flash(self, image_bundle=None, images=None, recreate_uefi_entry=True): # pylint: disable=arguments-differ
|
|
||||||
device = self.owner
|
|
||||||
if not hasattr(device, 'port') or not hasattr(device, 'microsd_mount_point'):
|
|
||||||
msg = 'Device {} does not appear to support VExpress flashing.'
|
|
||||||
raise ConfigError(msg.format(device.name))
|
|
||||||
with open_serial_connection(port=device.port,
|
|
||||||
baudrate=device.baudrate,
|
|
||||||
timeout=device.timeout,
|
|
||||||
init_dtr=0) as target:
|
|
||||||
target.sendline('usb_on') # this will cause the MicroSD to be mounted on the host
|
|
||||||
device.wait_for_microsd_mount_point(target)
|
|
||||||
self.deploy_images(device, image_bundle, images)
|
|
||||||
|
|
||||||
self.logger.debug('Resetting the device.')
|
|
||||||
device.hard_reset()
|
|
||||||
|
|
||||||
with open_serial_connection(port=device.port,
|
|
||||||
baudrate=device.baudrate,
|
|
||||||
timeout=device.timeout,
|
|
||||||
init_dtr=0) as target:
|
|
||||||
menu = UefiMenu(target)
|
|
||||||
menu.open(timeout=300)
|
|
||||||
if recreate_uefi_entry and menu.has_option(device.uefi_entry):
|
|
||||||
self.logger.debug('Deleting existing device entry.')
|
|
||||||
menu.delete_entry(device.uefi_entry)
|
|
||||||
menu.create_entry(device.uefi_entry, device.uefi_config)
|
|
||||||
elif not menu.has_option(device.uefi_entry):
|
|
||||||
menu.create_entry(device.uefi_entry, device.uefi_config)
|
|
||||||
menu.select(device.uefi_entry)
|
|
||||||
target.expect(device.android_prompt, timeout=device.timeout)
|
|
||||||
|
|
||||||
def deploy_images(self, device, image_bundle=None, images=None):
|
|
||||||
try:
|
|
||||||
if image_bundle:
|
|
||||||
self.deploy_image_bundle(device, image_bundle)
|
|
||||||
if images:
|
|
||||||
self.overlay_images(device, images)
|
|
||||||
os.system('sync')
|
|
||||||
except (IOError, OSError), e:
|
|
||||||
msg = 'Could not deploy images to {}; got: {}'
|
|
||||||
raise DeviceError(msg.format(device.microsd_mount_point, e))
|
|
||||||
|
|
||||||
def deploy_image_bundle(self, device, bundle):
|
|
||||||
self.logger.debug('Validating {}'.format(bundle))
|
|
||||||
validate_image_bundle(bundle)
|
|
||||||
self.logger.debug('Extracting {} into {}...'.format(bundle, device.microsd_mount_point))
|
|
||||||
with tarfile.open(bundle) as tar:
|
|
||||||
tar.extractall(device.microsd_mount_point)
|
|
||||||
|
|
||||||
def overlay_images(self, device, images):
|
|
||||||
for dest, src in images.iteritems():
|
|
||||||
dest = os.path.join(device.microsd_mount_point, dest)
|
|
||||||
self.logger.debug('Copying {} to {}'.format(src, dest))
|
|
||||||
shutil.copy(src, dest)
|
|
||||||
|
|
||||||
|
|
||||||
# utility functions
|
|
||||||
|
|
||||||
def get_mapping(base_dir, partition_file):
|
|
||||||
mapping = {}
|
|
||||||
with open(partition_file) as pf:
|
|
||||||
for line in pf:
|
|
||||||
pair = line.split()
|
|
||||||
if len(pair) != 2:
|
|
||||||
ConfigError('partitions.txt is not properly formated')
|
|
||||||
image_path = os.path.join(base_dir, pair[1])
|
|
||||||
if not os.path.isfile(expand_path(image_path)):
|
|
||||||
ConfigError('file {} was not found in the bundle or was misplaced'.format(pair[1]))
|
|
||||||
mapping[pair[0]] = image_path
|
|
||||||
return mapping
|
|
||||||
|
|
||||||
|
|
||||||
def expand_path(original_path):
|
|
||||||
path = os.path.abspath(os.path.expanduser(original_path))
|
|
||||||
if not os.path.exists(path):
|
|
||||||
raise ConfigError('{} does not exist.'.format(path))
|
|
||||||
return path
|
|
||||||
|
|
||||||
|
|
||||||
def validate_image_bundle(bundle):
|
|
||||||
if not tarfile.is_tarfile(bundle):
|
|
||||||
raise ConfigError('Image bundle {} does not appear to be a valid TAR file.'.format(bundle))
|
|
||||||
with tarfile.open(bundle) as tar:
|
|
||||||
try:
|
|
||||||
tar.getmember('config.txt')
|
|
||||||
except KeyError:
|
|
||||||
try:
|
|
||||||
tar.getmember('./config.txt')
|
|
||||||
except KeyError:
|
|
||||||
msg = 'Tarball {} does not appear to be a valid image bundle (did not see config.txt).'
|
|
||||||
raise ConfigError(msg.format(bundle))
|
|
||||||
|
|
@ -1,55 +0,0 @@
|
|||||||
# Copyright 2014-2015 ARM Limited
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
import time
|
|
||||||
|
|
||||||
from wlauto import Module, Parameter
|
|
||||||
from wlauto.exceptions import DeviceError
|
|
||||||
from wlauto.utils.netio import KshellConnection
|
|
||||||
|
|
||||||
|
|
||||||
class NetioSwitchReset(Module):
|
|
||||||
|
|
||||||
#pylint: disable=E1101
|
|
||||||
name = 'netio_switch'
|
|
||||||
description = """
|
|
||||||
Enables hard reset of devices connected to a Netio ethernet power switch
|
|
||||||
"""
|
|
||||||
capabilities = ['reset_power']
|
|
||||||
|
|
||||||
parameters = [
|
|
||||||
Parameter('host', default='ippowerbar',
|
|
||||||
description='IP address or DNS name of the Netio power switch.'),
|
|
||||||
Parameter('port', kind=int, default=1234,
|
|
||||||
description='Port on which KSHELL is listening.'),
|
|
||||||
Parameter('username', default='admin',
|
|
||||||
description='User name for the administrator on the Netio.'),
|
|
||||||
Parameter('password', default='admin',
|
|
||||||
description='User name for the administrator on the Netio.'),
|
|
||||||
Parameter('psu', kind=int, default=1,
|
|
||||||
description='The device port number on the Netio, i.e. which '
|
|
||||||
'PSU port the device is connected to.'),
|
|
||||||
]
|
|
||||||
|
|
||||||
def hard_reset(self):
|
|
||||||
try:
|
|
||||||
conn = KshellConnection(host=self.host, port=self.port)
|
|
||||||
conn.login(self.username, self.password)
|
|
||||||
conn.disable_port(self.psu)
|
|
||||||
time.sleep(2)
|
|
||||||
conn.enable_port(self.psu)
|
|
||||||
conn.close()
|
|
||||||
except Exception as e:
|
|
||||||
raise DeviceError('Could not reset power: {}'.format(e))
|
|
@ -40,7 +40,7 @@ class Androbench(AndroidUiAutoBenchmark):
|
|||||||
dbn = 'databases/history.db'
|
dbn = 'databases/history.db'
|
||||||
db = self.device.path.join(self.device.package_data_directory, self.package, dbn)
|
db = self.device.path.join(self.device.package_data_directory, self.package, dbn)
|
||||||
host_results = os.path.join(context.output_directory, 'history.db')
|
host_results = os.path.join(context.output_directory, 'history.db')
|
||||||
self.device.pull_file(db, host_results, as_root=True)
|
self.device.pull(db, host_results, as_root=True)
|
||||||
qs = 'select * from history'
|
qs = 'select * from history'
|
||||||
conn = sqlite3.connect(host_results)
|
conn = sqlite3.connect(host_results)
|
||||||
c = conn.cursor()
|
c = conn.cursor()
|
||||||
|
@ -132,7 +132,7 @@ class ApplaunchWorkload(Workload):
|
|||||||
if self.io_stress:
|
if self.io_stress:
|
||||||
host_scheduler_file = os.path.join(context.output_directory, 'scheduler')
|
host_scheduler_file = os.path.join(context.output_directory, 'scheduler')
|
||||||
device_scheduler_file = '/sys/block/mmcblk0/queue/scheduler'
|
device_scheduler_file = '/sys/block/mmcblk0/queue/scheduler'
|
||||||
self.device.pull_file(device_scheduler_file, host_scheduler_file)
|
self.device.pull(device_scheduler_file, host_scheduler_file)
|
||||||
with open(host_scheduler_file) as fh:
|
with open(host_scheduler_file) as fh:
|
||||||
scheduler = fh.read()
|
scheduler = fh.read()
|
||||||
scheduler_used = scheduler[scheduler.index("[") + 1:scheduler.index("]")]
|
scheduler_used = scheduler[scheduler.index("[") + 1:scheduler.index("]")]
|
||||||
@ -144,7 +144,7 @@ class ApplaunchWorkload(Workload):
|
|||||||
if self.set_launcher_affinity:
|
if self.set_launcher_affinity:
|
||||||
self._reset_launcher_affinity()
|
self._reset_launcher_affinity()
|
||||||
if self.cleanup:
|
if self.cleanup:
|
||||||
self.device.delete_file(self.device_script_file)
|
self.device.remove(self.device_script_file)
|
||||||
|
|
||||||
def _set_launcher_affinity(self):
|
def _set_launcher_affinity(self):
|
||||||
try:
|
try:
|
||||||
@ -169,7 +169,7 @@ class ApplaunchWorkload(Workload):
|
|||||||
def _extract_results_from_file(self, context, filename, metric_suffix):
|
def _extract_results_from_file(self, context, filename, metric_suffix):
|
||||||
host_result_file = os.path.join(context.output_directory, filename)
|
host_result_file = os.path.join(context.output_directory, filename)
|
||||||
device_result_file = self.device.path.join(self.device.working_directory, filename)
|
device_result_file = self.device.path.join(self.device.working_directory, filename)
|
||||||
self.device.pull_file(device_result_file, host_result_file)
|
self.device.pull(device_result_file, host_result_file)
|
||||||
|
|
||||||
with open(host_result_file) as fh:
|
with open(host_result_file) as fh:
|
||||||
if filename == 'time.result':
|
if filename == 'time.result':
|
||||||
|
@ -57,7 +57,7 @@ class Audio(Workload):
|
|||||||
self.on_device_file = os.path.join(self.device.working_directory,
|
self.on_device_file = os.path.join(self.device.working_directory,
|
||||||
os.path.basename(self.audio_file))
|
os.path.basename(self.audio_file))
|
||||||
|
|
||||||
self.device.push_file(self.audio_file, self.on_device_file, timeout=120)
|
self.device.push(self.audio_file, self.on_device_file, timeout=120)
|
||||||
|
|
||||||
# Open the browser with default page
|
# Open the browser with default page
|
||||||
self.device.execute('am start -n com.android.browser/.BrowserActivity about:blank')
|
self.device.execute('am start -n com.android.browser/.BrowserActivity about:blank')
|
||||||
@ -75,7 +75,7 @@ class Audio(Workload):
|
|||||||
|
|
||||||
if self.clear_file_cache:
|
if self.clear_file_cache:
|
||||||
self.device.execute('sync')
|
self.device.execute('sync')
|
||||||
self.device.set_sysfile_value('/proc/sys/vm/drop_caches', 3)
|
self.device.write_value('/proc/sys/vm/drop_caches', 3)
|
||||||
|
|
||||||
# Start the background music
|
# Start the background music
|
||||||
self.device.execute('am start -W -S -n com.android.music/.MediaPlaybackActivity -d {}'.format(self.on_device_file))
|
self.device.execute('am start -W -S -n com.android.music/.MediaPlaybackActivity -d {}'.format(self.on_device_file))
|
||||||
@ -95,7 +95,7 @@ class Audio(Workload):
|
|||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
if self.perform_cleanup:
|
if self.perform_cleanup:
|
||||||
self.device.delete_file(self.on_device_file)
|
self.device.remove(self.on_device_file)
|
||||||
|
|
||||||
def _download_audio_file(self):
|
def _download_audio_file(self):
|
||||||
self.logger.debug('Downloading audio file from {}'.format(DEFAULT_AUDIO_FILE_URL))
|
self.logger.debug('Downloading audio file from {}'.format(DEFAULT_AUDIO_FILE_URL))
|
||||||
|
@ -59,7 +59,7 @@ class ChromeAutotest(Workload):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def setup(self, context):
|
def setup(self, context):
|
||||||
if self.device.platform != 'chromeos':
|
if self.device.os != 'chromeos':
|
||||||
raise WorkloadError('{} only supports ChromeOS devices'.format(self.name))
|
raise WorkloadError('{} only supports ChromeOS devices'.format(self.name))
|
||||||
self.test_that = which('test_that')
|
self.test_that = which('test_that')
|
||||||
if not self.test_that:
|
if not self.test_that:
|
||||||
|
@ -93,12 +93,12 @@ class BBench(Workload):
|
|||||||
|
|
||||||
if self.with_audio:
|
if self.with_audio:
|
||||||
if self.force_dependency_push or not self.device.file_exists(self.audio_on_device):
|
if self.force_dependency_push or not self.device.file_exists(self.audio_on_device):
|
||||||
self.device.push_file(self.audio_file, self.audio_on_device, timeout=120)
|
self.device.push(self.audio_file, self.audio_on_device, timeout=120)
|
||||||
|
|
||||||
# Push the bbench site pages and http server to target device
|
# Push the bbench site pages and http server to target device
|
||||||
if self.force_dependency_push or not self.device.file_exists(self.bbench_on_device):
|
if self.force_dependency_push or not self.device.file_exists(self.bbench_on_device):
|
||||||
self.logger.debug('Copying bbench sites to device.')
|
self.logger.debug('Copying bbench sites to device.')
|
||||||
self.device.push_file(self.dependencies_directory, self.bbench_on_device, timeout=300)
|
self.device.push(self.dependencies_directory, self.bbench_on_device, timeout=300)
|
||||||
|
|
||||||
# Push the bbench server
|
# Push the bbench server
|
||||||
host_binary = context.resolver.get(Executable(self, self.device.abi, 'bbench_server'))
|
host_binary = context.resolver.get(Executable(self, self.device.abi, 'bbench_server'))
|
||||||
@ -120,7 +120,7 @@ class BBench(Workload):
|
|||||||
self.device.execute('pm clear {}'.format(self.browser_package))
|
self.device.execute('pm clear {}'.format(self.browser_package))
|
||||||
if self.clear_file_cache:
|
if self.clear_file_cache:
|
||||||
self.device.execute('sync')
|
self.device.execute('sync')
|
||||||
self.device.set_sysfile_value('/proc/sys/vm/drop_caches', 3)
|
self.device.write_value('/proc/sys/vm/drop_caches', 3)
|
||||||
|
|
||||||
#On android 6+ the web browser requires permissions to access the sd card
|
#On android 6+ the web browser requires permissions to access the sd card
|
||||||
if self.device.get_sdk_version() >= 23:
|
if self.device.get_sdk_version() >= 23:
|
||||||
@ -148,12 +148,12 @@ class BBench(Workload):
|
|||||||
|
|
||||||
# Get index_no_input.html
|
# Get index_no_input.html
|
||||||
indexfile = os.path.join(self.device.working_directory, 'bbench/index_noinput.html')
|
indexfile = os.path.join(self.device.working_directory, 'bbench/index_noinput.html')
|
||||||
self.device.pull_file(indexfile, context.output_directory)
|
self.device.pull(indexfile, context.output_directory)
|
||||||
|
|
||||||
# Get the logs
|
# Get the logs
|
||||||
output_file = os.path.join(self.device.working_directory, 'browser_bbench_logcat.txt')
|
output_file = os.path.join(self.device.working_directory, 'browser_bbench_logcat.txt')
|
||||||
self.device.execute('logcat -v time -d > {}'.format(output_file))
|
self.device.execute('logcat -v time -d > {}'.format(output_file))
|
||||||
self.device.pull_file(output_file, context.output_directory)
|
self.device.pull(output_file, context.output_directory)
|
||||||
|
|
||||||
metrics = _parse_metrics(os.path.join(context.output_directory, 'browser_bbench_logcat.txt'),
|
metrics = _parse_metrics(os.path.join(context.output_directory, 'browser_bbench_logcat.txt'),
|
||||||
os.path.join(context.output_directory, 'index_noinput.html'),
|
os.path.join(context.output_directory, 'index_noinput.html'),
|
||||||
|
@ -57,7 +57,7 @@ class Cfbench(AndroidUiAutoBenchmark):
|
|||||||
self.package,
|
self.package,
|
||||||
'shared_prefs', 'eu.chainfire.cfbench_preferences.xml ')
|
'shared_prefs', 'eu.chainfire.cfbench_preferences.xml ')
|
||||||
self.device.execute('cp {} {}'.format(device_results_file, self.device.working_directory), as_root=True)
|
self.device.execute('cp {} {}'.format(device_results_file, self.device.working_directory), as_root=True)
|
||||||
self.device.pull_file(os.path.join(self.device.working_directory, 'eu.chainfire.cfbench_preferences.xml'), context.output_directory)
|
self.device.pull(os.path.join(self.device.working_directory, 'eu.chainfire.cfbench_preferences.xml'), context.output_directory)
|
||||||
result_file = os.path.join(context.output_directory, 'eu.chainfire.cfbench_preferences.xml')
|
result_file = os.path.join(context.output_directory, 'eu.chainfire.cfbench_preferences.xml')
|
||||||
tree = ET.parse(result_file)
|
tree = ET.parse(result_file)
|
||||||
root = tree.getroot()
|
root = tree.getroot()
|
||||||
|
@ -93,9 +93,9 @@ class Cyclictest(Workload):
|
|||||||
|
|
||||||
if self.clear_file_cache:
|
if self.clear_file_cache:
|
||||||
self.device.execute('sync')
|
self.device.execute('sync')
|
||||||
self.device.set_sysfile_value('/proc/sys/vm/drop_caches', 3)
|
self.device.write_value('/proc/sys/vm/drop_caches', 3)
|
||||||
|
|
||||||
if self.device.platform == 'android':
|
if self.device.os == 'android':
|
||||||
if self.screen_off and self.device.is_screen_on:
|
if self.screen_off and self.device.is_screen_on:
|
||||||
self.device.execute('input keyevent 26')
|
self.device.execute('input keyevent 26')
|
||||||
|
|
||||||
@ -103,7 +103,7 @@ class Cyclictest(Workload):
|
|||||||
self.device.execute(self.cyclictest_command, self.duration * 2, as_root=True)
|
self.device.execute(self.cyclictest_command, self.duration * 2, as_root=True)
|
||||||
|
|
||||||
def update_result(self, context):
|
def update_result(self, context):
|
||||||
self.device.pull_file(self.cyclictest_result, context.output_directory)
|
self.device.pull(self.cyclictest_result, context.output_directory)
|
||||||
|
|
||||||
# Parsing the output
|
# Parsing the output
|
||||||
# Standard Cyclictest Output:
|
# Standard Cyclictest Output:
|
||||||
@ -132,7 +132,7 @@ class Cyclictest(Workload):
|
|||||||
context.result.add_metric(full_key, value, 'microseconds')
|
context.result.add_metric(full_key, value, 'microseconds')
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
if self.device.platform == 'android':
|
if self.device.os == 'android':
|
||||||
if self.screen_off:
|
if self.screen_off:
|
||||||
self.device.ensure_screen_is_on()
|
self.device.ensure_screen_is_on()
|
||||||
self.device.execute('rm -f {}'.format(self.cyclictest_result))
|
self.device.execute('rm -f {}'.format(self.cyclictest_result))
|
||||||
|
@ -71,7 +71,7 @@ class Dex2oatBenchmark(Workload):
|
|||||||
self.command = self.command_template.format(on_device_apk, self.on_device_oat, self.instruction_set)
|
self.command = self.command_template.format(on_device_apk, self.on_device_oat, self.instruction_set)
|
||||||
|
|
||||||
if not self.device.file_exists(on_device_apk):
|
if not self.device.file_exists(on_device_apk):
|
||||||
self.device.push_file(self.apk_file, on_device_apk)
|
self.device.push(self.apk_file, on_device_apk)
|
||||||
|
|
||||||
def run(self, context):
|
def run(self, context):
|
||||||
self.device.execute(self.command, self.run_timeout)
|
self.device.execute(self.command, self.run_timeout)
|
||||||
@ -118,5 +118,4 @@ class Dex2oatBenchmark(Workload):
|
|||||||
context.result.add_metric('dex2oat_comp_time', time, "ms", lower_is_better=True)
|
context.result.add_metric('dex2oat_comp_time', time, "ms", lower_is_better=True)
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self.device.delete_file(self.on_device_oat)
|
self.device.remove(self.on_device_oat)
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ class Dhrystone(Workload):
|
|||||||
context.result.add_metric('total score', total_score)
|
context.result.add_metric('total score', total_score)
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self.device.uninstall_executable('dhrystone')
|
self.device.uninstall('dhrystone')
|
||||||
|
|
||||||
def validate(self):
|
def validate(self):
|
||||||
if self.mloops and self.duration: # pylint: disable=E0203
|
if self.mloops and self.duration: # pylint: disable=E0203
|
||||||
|
@ -67,7 +67,7 @@ class Ebizzy(Workload):
|
|||||||
self.device.execute(self.command, timeout=self.run_timeout)
|
self.device.execute(self.command, timeout=self.run_timeout)
|
||||||
|
|
||||||
def update_result(self, context):
|
def update_result(self, context):
|
||||||
self.device.pull_file(self.ebizzy_results, context.output_directory)
|
self.device.pull(self.ebizzy_results, context.output_directory)
|
||||||
|
|
||||||
with open(os.path.join(context.output_directory, results_txt)) as ebizzy_file:
|
with open(os.path.join(context.output_directory, results_txt)) as ebizzy_file:
|
||||||
for line in ebizzy_file:
|
for line in ebizzy_file:
|
||||||
@ -83,7 +83,7 @@ class Ebizzy(Workload):
|
|||||||
results_match.group('unit'))
|
results_match.group('unit'))
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self.device.uninstall_executable(self.device_binary)
|
self.device.uninstall(self.device_binary)
|
||||||
|
|
||||||
def validate(self):
|
def validate(self):
|
||||||
pass
|
pass
|
||||||
|
@ -71,7 +71,7 @@ class Hackbench(Workload):
|
|||||||
self.device.execute(self.command, timeout=self.run_timeout)
|
self.device.execute(self.command, timeout=self.run_timeout)
|
||||||
|
|
||||||
def update_result(self, context):
|
def update_result(self, context):
|
||||||
self.device.pull_file(self.hackbench_result, context.output_directory)
|
self.device.pull(self.hackbench_result, context.output_directory)
|
||||||
|
|
||||||
with open(os.path.join(context.output_directory, hackbench_results_txt)) as hackbench_file:
|
with open(os.path.join(context.output_directory, hackbench_results_txt)) as hackbench_file:
|
||||||
for line in hackbench_file:
|
for line in hackbench_file:
|
||||||
@ -81,7 +81,7 @@ class Hackbench(Workload):
|
|||||||
context.result.add_metric(label, float(match.group(1)), units)
|
context.result.add_metric(label, float(match.group(1)), units)
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self.device.uninstall_executable(self.binary_name)
|
self.device.uninstall(self.binary_name)
|
||||||
self.device.execute('rm -f {}'.format(self.hackbench_result))
|
self.device.execute('rm -f {}'.format(self.hackbench_result))
|
||||||
|
|
||||||
def validate(self):
|
def validate(self):
|
||||||
|
@ -105,4 +105,4 @@ class HWUITest(Workload):
|
|||||||
"frames": self.frames})
|
"frames": self.frames})
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self.device.uninstall_executable(BINARY)
|
self.device.uninstall(BINARY)
|
||||||
|
@ -44,7 +44,7 @@ class IdleWorkload(Workload):
|
|||||||
|
|
||||||
def setup(self, context):
|
def setup(self, context):
|
||||||
if self.stop_android:
|
if self.stop_android:
|
||||||
if self.device.platform != 'android':
|
if self.device.os != 'android':
|
||||||
raise ConfigError('stop_android can only be set for Android devices')
|
raise ConfigError('stop_android can only be set for Android devices')
|
||||||
if not self.device.is_rooted:
|
if not self.device.is_rooted:
|
||||||
raise WorkloadError('Idle workload requires the device to be rooted in order to stop Android.')
|
raise WorkloadError('Idle workload requires the device to be rooted in order to stop Android.')
|
||||||
|
@ -177,12 +177,12 @@ class Iozone(Workload):
|
|||||||
self.device.execute(self.command, timeout=self.timeout)
|
self.device.execute(self.command, timeout=self.timeout)
|
||||||
|
|
||||||
def update_result(self, context):
|
def update_result(self, context):
|
||||||
self.device.pull_file(self.results, context.output_directory)
|
self.device.pull(self.results, context.output_directory)
|
||||||
self.outfile = os.path.join(context.output_directory,
|
self.outfile = os.path.join(context.output_directory,
|
||||||
iozone_results_txt)
|
iozone_results_txt)
|
||||||
|
|
||||||
if '-b' in self.other_params:
|
if '-b' in self.other_params:
|
||||||
self.device.pull_file(self.device_output_file,
|
self.device.pull(self.device_output_file,
|
||||||
context.output_directory)
|
context.output_directory)
|
||||||
|
|
||||||
# if running in thread mode
|
# if running in thread mode
|
||||||
@ -313,4 +313,4 @@ class Iozone(Workload):
|
|||||||
return results
|
return results
|
||||||
|
|
||||||
def finalize(self, context):
|
def finalize(self, context):
|
||||||
self.device.uninstall_executable(self.device_binary)
|
self.device.uninstall(self.device_binary)
|
||||||
|
@ -122,7 +122,7 @@ class lmbench(Workload):
|
|||||||
context.add_artifact('lmbench', 'lmbench.output', 'data')
|
context.add_artifact('lmbench', 'lmbench.output', 'data')
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self.device.uninstall_executable(self.test)
|
self.device.uninstall(self.test)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Test setup routines
|
# Test setup routines
|
||||||
|
@ -71,7 +71,7 @@ class Peacekeeper(AndroidUiAutoBenchmark):
|
|||||||
|
|
||||||
# Pull the result page url, which contains the results, from the
|
# Pull the result page url, which contains the results, from the
|
||||||
# peacekeeper.txt file and process it
|
# peacekeeper.txt file and process it
|
||||||
self.device.pull_file(self.output_file, context.output_directory)
|
self.device.pull(self.output_file, context.output_directory)
|
||||||
result_file = os.path.join(context.output_directory, 'peacekeeper.txt')
|
result_file = os.path.join(context.output_directory, 'peacekeeper.txt')
|
||||||
with open(result_file) as fh:
|
with open(result_file) as fh:
|
||||||
for line in fh:
|
for line in fh:
|
||||||
|
@ -68,7 +68,7 @@ class PowerLoadtest(Workload):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def setup(self, context):
|
def setup(self, context):
|
||||||
if self.device.platform != 'chromeos':
|
if self.device.os != 'chromeos':
|
||||||
raise WorkloadError('{} only supports ChromeOS devices'.format(self.name))
|
raise WorkloadError('{} only supports ChromeOS devices'.format(self.name))
|
||||||
self.test_that = which('test_that')
|
self.test_that = which('test_that')
|
||||||
if not self.test_that:
|
if not self.test_that:
|
||||||
|
@ -57,8 +57,8 @@ class Recentfling(Workload):
|
|||||||
def setup(self, context):
|
def setup(self, context):
|
||||||
self.defs_host = context.resolver.get(File(self, "defs.sh"))
|
self.defs_host = context.resolver.get(File(self, "defs.sh"))
|
||||||
self.recentfling_host = context.resolver.get(File(self, "recentfling.sh"))
|
self.recentfling_host = context.resolver.get(File(self, "recentfling.sh"))
|
||||||
self.device.push_file(self.recentfling_host, self.device.working_directory)
|
self.device.push(self.recentfling_host, self.device.working_directory)
|
||||||
self.device.push_file(self.defs_host, self.device.working_directory)
|
self.device.push(self.defs_host, self.device.working_directory)
|
||||||
self._kill_recentfling()
|
self._kill_recentfling()
|
||||||
self.device.ensure_screen_is_on()
|
self.device.ensure_screen_is_on()
|
||||||
|
|
||||||
@ -89,9 +89,9 @@ class Recentfling(Workload):
|
|||||||
classifiers={"loop": count or "Average"})
|
classifiers={"loop": count or "Average"})
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self.device.delete_file(self.device.path.join(self.device.working_directory,
|
self.device.remove(self.device.path.join(self.device.working_directory,
|
||||||
"recentfling.sh"))
|
"recentfling.sh"))
|
||||||
self.device.delete_file(self.device.path.join(self.device.working_directory,
|
self.device.remove(self.device.path.join(self.device.working_directory,
|
||||||
"defs.sh"))
|
"defs.sh"))
|
||||||
|
|
||||||
def _kill_recentfling(self):
|
def _kill_recentfling(self):
|
||||||
|
@ -164,7 +164,7 @@ class RtApp(Workload):
|
|||||||
self.host_json_config = self._load_json_config(context)
|
self.host_json_config = self._load_json_config(context)
|
||||||
self.config_file_on_device = self.device.path.join(self.device_working_directory,
|
self.config_file_on_device = self.device.path.join(self.device_working_directory,
|
||||||
os.path.basename(self.host_json_config))
|
os.path.basename(self.host_json_config))
|
||||||
self.device.push_file(self.host_json_config, self.config_file_on_device, timeout=60)
|
self.device.push(self.host_json_config, self.config_file_on_device, timeout=60)
|
||||||
self.command = '{} {}'.format(self.device_binary, self.config_file_on_device)
|
self.command = '{} {}'.format(self.device_binary, self.config_file_on_device)
|
||||||
|
|
||||||
time_buffer = 30
|
time_buffer = 30
|
||||||
@ -216,7 +216,7 @@ class RtApp(Workload):
|
|||||||
|
|
||||||
def _deploy_rt_app_binary_if_necessary(self):
|
def _deploy_rt_app_binary_if_necessary(self):
|
||||||
# called from initialize() so gets invoked once per run
|
# called from initialize() so gets invoked once per run
|
||||||
RtApp.device_binary = self.device.get_binary_path("rt-app")
|
RtApp.device_binary = self.device.get_installed("rt-app")
|
||||||
if self.force_install or not RtApp.device_binary:
|
if self.force_install or not RtApp.device_binary:
|
||||||
if not self.host_binary:
|
if not self.host_binary:
|
||||||
message = '''rt-app is not installed on the device and could not be
|
message = '''rt-app is not installed on the device and could not be
|
||||||
@ -274,7 +274,7 @@ class RtApp(Workload):
|
|||||||
self.device.execute(tar_command, timeout=300)
|
self.device.execute(tar_command, timeout=300)
|
||||||
device_path = self.device.path.join(self.device_working_directory, TARBALL_FILENAME)
|
device_path = self.device.path.join(self.device_working_directory, TARBALL_FILENAME)
|
||||||
host_path = os.path.join(context.output_directory, TARBALL_FILENAME)
|
host_path = os.path.join(context.output_directory, TARBALL_FILENAME)
|
||||||
self.device.pull_file(device_path, host_path, timeout=120)
|
self.device.pull(device_path, host_path, timeout=120)
|
||||||
with tarfile.open(host_path, 'r:gz') as tf:
|
with tarfile.open(host_path, 'r:gz') as tf:
|
||||||
tf.extractall(context.output_directory)
|
tf.extractall(context.output_directory)
|
||||||
os.remove(host_path)
|
os.remove(host_path)
|
||||||
|
@ -51,7 +51,7 @@ class ShellScript(Workload):
|
|||||||
def setup(self, context):
|
def setup(self, context):
|
||||||
self.on_device_script_file = self.device.path.join(self.device.working_directory,
|
self.on_device_script_file = self.device.path.join(self.device.working_directory,
|
||||||
os.path.basename(self.script_file))
|
os.path.basename(self.script_file))
|
||||||
self.device.push_file(self.script_file, self.on_device_script_file)
|
self.device.push(self.script_file, self.on_device_script_file)
|
||||||
self.command = 'sh {} {}'.format(self.on_device_script_file, self.argstring)
|
self.command = 'sh {} {}'.format(self.on_device_script_file, self.argstring)
|
||||||
|
|
||||||
def run(self, context):
|
def run(self, context):
|
||||||
@ -62,4 +62,4 @@ class ShellScript(Workload):
|
|||||||
wfh.write(self.output)
|
wfh.write(self.output)
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self.device.delete_file(self.on_device_script_file)
|
self.device.remove(self.on_device_script_file)
|
||||||
|
@ -253,7 +253,7 @@ class Spec2000(Workload):
|
|||||||
if self.force_push_assets or not self.device.file_exists(datadir):
|
if self.force_push_assets or not self.device.file_exists(datadir):
|
||||||
self.device.execute('mkdir -p {}'.format(datadir))
|
self.device.execute('mkdir -p {}'.format(datadir))
|
||||||
for datafile in bench.datafiles:
|
for datafile in bench.datafiles:
|
||||||
self.device.push_file(datafile, self.device.path.join(datadir, os.path.basename(datafile)))
|
self.device.push(datafile, self.device.path.join(datadir, os.path.basename(datafile)))
|
||||||
|
|
||||||
if self.mode == 'speed':
|
if self.mode == 'speed':
|
||||||
cpus = [self._get_fastest_cpu().lower()]
|
cpus = [self._get_fastest_cpu().lower()]
|
||||||
|
@ -69,7 +69,7 @@ class Stream(Workload):
|
|||||||
self.output = self.device.execute(self.command, timeout=self.timeout)
|
self.output = self.device.execute(self.command, timeout=self.timeout)
|
||||||
|
|
||||||
def update_result(self, context):
|
def update_result(self, context):
|
||||||
self.device.pull_file(self.results, context.output_directory)
|
self.device.pull(self.results, context.output_directory)
|
||||||
outfile = os.path.join(context.output_directory, stream_results_txt)
|
outfile = os.path.join(context.output_directory, stream_results_txt)
|
||||||
|
|
||||||
with open(outfile, 'r') as stream_file:
|
with open(outfile, 'r') as stream_file:
|
||||||
@ -89,5 +89,5 @@ class Stream(Workload):
|
|||||||
context.result.add_metric(label, float(match.group(1)), match.group(2))
|
context.result.add_metric(label, float(match.group(1)), match.group(2))
|
||||||
|
|
||||||
def finalize(self, context):
|
def finalize(self, context):
|
||||||
self.device.uninstall_executable(self.stream_default)
|
self.device.uninstall(self.stream_default)
|
||||||
self.device.uninstall_executable(self.stream_optional)
|
self.device.uninstall(self.stream_optional)
|
||||||
|
@ -112,7 +112,7 @@ class Sysbench(Workload):
|
|||||||
|
|
||||||
def update_result(self, context):
|
def update_result(self, context):
|
||||||
host_results_file = os.path.join(context.output_directory, 'sysbench_result.txt')
|
host_results_file = os.path.join(context.output_directory, 'sysbench_result.txt')
|
||||||
self.device.pull_file(self.results_file, host_results_file)
|
self.device.pull(self.results_file, host_results_file)
|
||||||
context.add_iteration_artifact('sysbench_output', kind='raw', path=host_results_file)
|
context.add_iteration_artifact('sysbench_output', kind='raw', path=host_results_file)
|
||||||
|
|
||||||
with open(host_results_file) as fh:
|
with open(host_results_file) as fh:
|
||||||
@ -129,10 +129,10 @@ class Sysbench(Workload):
|
|||||||
extract_threads_fairness_metric('execution time', fh.next(), context.result)
|
extract_threads_fairness_metric('execution time', fh.next(), context.result)
|
||||||
|
|
||||||
def teardown(self, context):
|
def teardown(self, context):
|
||||||
self.device.delete_file(self.results_file)
|
self.device.remove(self.results_file)
|
||||||
|
|
||||||
def _check_executable(self):
|
def _check_executable(self):
|
||||||
self.on_device_binary = self.device.get_binary_path("sysbench")
|
self.on_device_binary = self.device.get_installed("sysbench")
|
||||||
if not self.on_device_binary and not self.on_host_binary:
|
if not self.on_device_binary and not self.on_host_binary:
|
||||||
raise WorkloadError('sysbench binary is not installed on the device, and it is not found on the host.')
|
raise WorkloadError('sysbench binary is not installed on the device, and it is not found on the host.')
|
||||||
if self.force_install:
|
if self.force_install:
|
||||||
|
@ -189,12 +189,12 @@ class Telemetry(Workload):
|
|||||||
if self.target_config:
|
if self.target_config:
|
||||||
device_opts = self.target_config
|
device_opts = self.target_config
|
||||||
else:
|
else:
|
||||||
if self.device.platform == 'chromeos':
|
if self.device.os == 'chromeos':
|
||||||
if '--remote' not in self.run_benchmark_params:
|
if '--remote' not in self.run_benchmark_params:
|
||||||
device_opts += '--remote={} '.format(self.device.host)
|
device_opts += '--remote={} '.format(self.device.host)
|
||||||
if '--browser' not in self.run_benchmark_params:
|
if '--browser' not in self.run_benchmark_params:
|
||||||
device_opts += '--browser=cros-chrome '
|
device_opts += '--browser=cros-chrome '
|
||||||
elif self.device.platform == 'android':
|
elif self.device.os == 'android':
|
||||||
if '--device' not in self.run_benchmark_params and self.device.adb_name:
|
if '--device' not in self.run_benchmark_params and self.device.adb_name:
|
||||||
device_opts += '--device={} '.format(self.device.adb_name)
|
device_opts += '--device={} '.format(self.device.adb_name)
|
||||||
if '--browser' not in self.run_benchmark_params:
|
if '--browser' not in self.run_benchmark_params:
|
||||||
|
@ -108,7 +108,7 @@ class VideoWorkload(Workload):
|
|||||||
on_device_video_file = os.path.join(self.device.working_directory, os.path.basename(self.host_video_file))
|
on_device_video_file = os.path.join(self.device.working_directory, os.path.basename(self.host_video_file))
|
||||||
if self.force_dependency_push or not self.device.file_exists(on_device_video_file):
|
if self.force_dependency_push or not self.device.file_exists(on_device_video_file):
|
||||||
self.logger.debug('Copying {} to device.'.format(self.host_video_file))
|
self.logger.debug('Copying {} to device.'.format(self.host_video_file))
|
||||||
self.device.push_file(self.host_video_file, on_device_video_file, timeout=120)
|
self.device.push(self.host_video_file, on_device_video_file, timeout=120)
|
||||||
self.device.execute('am start -n com.android.browser/.BrowserActivity about:blank')
|
self.device.execute('am start -n com.android.browser/.BrowserActivity about:blank')
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
self.device.execute('am force-stop com.android.browser')
|
self.device.execute('am force-stop com.android.browser')
|
||||||
|
Loading…
x
Reference in New Issue
Block a user