mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-09-04 20:32:36 +01:00
Compare commits
11 Commits
v2.5.0
...
next-worki
Author | SHA1 | Date | |
---|---|---|---|
|
0b9d8f1c5e | ||
|
a4a428c9ae | ||
|
d89a52584b | ||
|
41a3877640 | ||
|
0b1b9d304c | ||
|
a3962b6323 | ||
|
001239dfe4 | ||
|
6f0de17201 | ||
|
1599c1e0ed | ||
|
4fc93a8a3c | ||
|
cd0186d14e |
3
setup.py
3
setup.py
@@ -76,7 +76,8 @@ params = dict(
|
||||
'pyserial', # Serial port interface
|
||||
'colorama', # Printing with colors
|
||||
'pyYAML', # YAML-formatted agenda parsing
|
||||
'requests', # Fetch assets over HTTP
|
||||
'requests', # Fetch assets over HTTP
|
||||
'devlib', # Interacting with devices
|
||||
],
|
||||
extras_require={
|
||||
'other': ['jinja2', 'pandas>=0.13.1'],
|
||||
|
@@ -14,7 +14,7 @@
|
||||
#
|
||||
|
||||
from wlauto.core.bootstrap import settings # NOQA
|
||||
from wlauto.core.device import Device, RuntimeParameter, CoreParameter # NOQA
|
||||
from wlauto.core.device_manager import DeviceManager, RuntimeParameter, CoreParameter # NOQA
|
||||
from wlauto.core.command import Command # NOQA
|
||||
from wlauto.core.workload import Workload # NOQA
|
||||
from wlauto.core.extension import Module, Parameter, Artifact, Alias # NOQA
|
||||
@@ -25,8 +25,6 @@ from wlauto.core.resource import ResourceGetter, Resource, GetterPriority, NO_ON
|
||||
from wlauto.core.exttype import get_extension_type # NOQA Note: MUST be imported after other core imports.
|
||||
|
||||
from wlauto.common.resources import File, ExtensionAsset, Executable
|
||||
from wlauto.common.linux.device import LinuxDevice # NOQA
|
||||
from wlauto.common.android.device import AndroidDevice, BigLittleDevice # NOQA
|
||||
from wlauto.common.android.resources import ApkFile, JarFile
|
||||
from wlauto.common.android.workload import (UiAutomatorWorkload, ApkWorkload, AndroidBenchmark, # NOQA
|
||||
AndroidUiAutoBenchmark, GameWorkload) # NOQA
|
||||
|
@@ -122,7 +122,7 @@ class RecordCommand(Command):
|
||||
self.device.killall("revent")
|
||||
|
||||
self.logger.info("Pulling files from device")
|
||||
self.device.pull_file(revent_file, args.output or os.getcwdu())
|
||||
self.device.pull(revent_file, args.output or os.getcwdu())
|
||||
|
||||
|
||||
class ReplayCommand(RecordCommand):
|
||||
@@ -144,7 +144,7 @@ class ReplayCommand(RecordCommand):
|
||||
# pylint: disable=W0201
|
||||
def run(self, args):
|
||||
self.logger.info("Pushing file to device")
|
||||
self.device.push_file(args.revent, self.device.working_directory)
|
||||
self.device.push(args.revent, self.device.working_directory)
|
||||
revent_file = self.device.path.join(self.device.working_directory, os.path.split(args.revent)[1])
|
||||
|
||||
if args.clear:
|
||||
|
@@ -1,765 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# pylint: disable=E1101
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import time
|
||||
import tempfile
|
||||
import shutil
|
||||
import threading
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
from wlauto.core.extension import Parameter
|
||||
from wlauto.common.linux.device import BaseLinuxDevice, PsEntry
|
||||
from wlauto.exceptions import DeviceError, WorkerThreadError, TimeoutError, DeviceNotRespondingError
|
||||
from wlauto.utils.misc import convert_new_lines
|
||||
from wlauto.utils.types import boolean, regex
|
||||
from wlauto.utils.android import (adb_shell, adb_background_shell, adb_list_devices,
|
||||
adb_command, AndroidProperties, ANDROID_VERSION_MAP)
|
||||
|
||||
|
||||
SCREEN_STATE_REGEX = re.compile('(?:mPowerState|mScreenOn|Display Power: state)=([0-9]+|true|false|ON|OFF)', re.I)
|
||||
SCREEN_SIZE_REGEX = re.compile(r'mUnrestrictedScreen=\(\d+,\d+\)\s+(?P<width>\d+)x(?P<height>\d+)')
|
||||
|
||||
|
||||
class AndroidDevice(BaseLinuxDevice): # pylint: disable=W0223
|
||||
"""
|
||||
Device running Android OS.
|
||||
|
||||
"""
|
||||
|
||||
platform = 'android'
|
||||
|
||||
parameters = [
|
||||
Parameter('adb_name',
|
||||
description='The unique ID of the device as output by "adb devices".'),
|
||||
Parameter('android_prompt', kind=regex, default=re.compile('^.*(shell|root)@.*:/\S* [#$] ', re.MULTILINE),
|
||||
description='The format of matching the shell prompt in Android.'),
|
||||
Parameter('working_directory', default='/sdcard/wa-working',
|
||||
description='Directory that will be used WA on the device for output files etc.'),
|
||||
Parameter('binaries_directory', default='/data/local/tmp', override=True,
|
||||
description='Location of binaries on the device.'),
|
||||
Parameter('package_data_directory', default='/data/data',
|
||||
description='Location of of data for an installed package (APK).'),
|
||||
Parameter('external_storage_directory', default='/sdcard',
|
||||
description='Mount point for external storage.'),
|
||||
Parameter('connection', default='usb', allowed_values=['usb', 'ethernet'],
|
||||
description='Specified the nature of adb connection.'),
|
||||
Parameter('logcat_poll_period', kind=int,
|
||||
description="""
|
||||
If specified and is not ``0``, logcat will be polled every
|
||||
``logcat_poll_period`` seconds, and buffered on the host. This
|
||||
can be used if a lot of output is expected in logcat and the fixed
|
||||
logcat buffer on the device is not big enough. The trade off is that
|
||||
this introduces some minor runtime overhead. Not set by default.
|
||||
"""),
|
||||
Parameter('enable_screen_check', kind=boolean, default=False,
|
||||
description="""
|
||||
Specified whether the device should make sure that the screen is on
|
||||
during initialization.
|
||||
"""),
|
||||
Parameter('swipe_to_unlock', kind=str, default=None,
|
||||
allowed_values=[None, "horizontal", "vertical"],
|
||||
description="""
|
||||
If set a swipe of the specified direction will be performed.
|
||||
This should unlock the screen.
|
||||
"""),
|
||||
]
|
||||
|
||||
default_timeout = 30
|
||||
delay = 2
|
||||
long_delay = 3 * delay
|
||||
ready_timeout = 60
|
||||
|
||||
# Overwritten from Device. For documentation, see corresponding method in
|
||||
# Device.
|
||||
|
||||
@property
|
||||
def is_rooted(self):
|
||||
if self._is_rooted is None:
|
||||
try:
|
||||
result = adb_shell(self.adb_name, 'su', timeout=1)
|
||||
if 'not found' in result:
|
||||
self._is_rooted = False
|
||||
else:
|
||||
self._is_rooted = True
|
||||
except TimeoutError:
|
||||
self._is_rooted = True
|
||||
except DeviceError:
|
||||
self._is_rooted = False
|
||||
return self._is_rooted
|
||||
|
||||
@property
|
||||
def abi(self):
|
||||
return self.getprop()['ro.product.cpu.abi'].split('-')[0]
|
||||
|
||||
@property
|
||||
def supported_eabi(self):
|
||||
props = self.getprop()
|
||||
result = [props['ro.product.cpu.abi']]
|
||||
if 'ro.product.cpu.abi2' in props:
|
||||
result.append(props['ro.product.cpu.abi2'])
|
||||
if 'ro.product.cpu.abilist' in props:
|
||||
for eabi in props['ro.product.cpu.abilist'].split(','):
|
||||
if eabi not in result:
|
||||
result.append(eabi)
|
||||
return result
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(AndroidDevice, self).__init__(**kwargs)
|
||||
self._logcat_poller = None
|
||||
|
||||
def reset(self):
|
||||
self._is_ready = False
|
||||
self._just_rebooted = True
|
||||
adb_command(self.adb_name, 'reboot', timeout=self.default_timeout)
|
||||
|
||||
def hard_reset(self):
|
||||
super(AndroidDevice, self).hard_reset()
|
||||
self._is_ready = False
|
||||
self._just_rebooted = True
|
||||
|
||||
def boot(self, hard=False, **kwargs):
|
||||
if hard:
|
||||
self.hard_reset()
|
||||
else:
|
||||
self.reset()
|
||||
|
||||
def connect(self): # NOQA pylint: disable=R0912
|
||||
iteration_number = 0
|
||||
max_iterations = self.ready_timeout / self.delay
|
||||
available = False
|
||||
self.logger.debug('Polling for device {}...'.format(self.adb_name))
|
||||
while iteration_number < max_iterations:
|
||||
devices = adb_list_devices()
|
||||
if self.adb_name:
|
||||
for device in devices:
|
||||
if device.name == self.adb_name and device.status != 'offline':
|
||||
available = True
|
||||
else: # adb_name not set
|
||||
if len(devices) == 1:
|
||||
available = True
|
||||
elif len(devices) > 1:
|
||||
raise DeviceError('More than one device is connected and adb_name is not set.')
|
||||
|
||||
if available:
|
||||
break
|
||||
else:
|
||||
time.sleep(self.delay)
|
||||
iteration_number += 1
|
||||
else:
|
||||
raise DeviceError('Could not boot {} ({}).'.format(self.name, self.adb_name))
|
||||
|
||||
while iteration_number < max_iterations:
|
||||
available = (int('0' + (adb_shell(self.adb_name, 'getprop sys.boot_completed', timeout=self.default_timeout))) == 1)
|
||||
if available:
|
||||
break
|
||||
else:
|
||||
time.sleep(self.delay)
|
||||
iteration_number += 1
|
||||
else:
|
||||
raise DeviceError('Could not boot {} ({}).'.format(self.name, self.adb_name))
|
||||
|
||||
if self._just_rebooted:
|
||||
self.logger.debug('Waiting for boot to complete...')
|
||||
# On some devices, adb connection gets reset some time after booting.
|
||||
# This causes errors during execution. To prevent this, open a shell
|
||||
# session and wait for it to be killed. Once its killed, give adb
|
||||
# enough time to restart, and then the device should be ready.
|
||||
# TODO: This is more of a work-around rather than an actual solution.
|
||||
# Need to figure out what is going on the "proper" way of handling it.
|
||||
try:
|
||||
adb_shell(self.adb_name, '', timeout=20)
|
||||
time.sleep(5) # give adb time to re-initialize
|
||||
except TimeoutError:
|
||||
pass # timed out waiting for the session to be killed -- assume not going to be.
|
||||
|
||||
self.logger.debug('Boot completed.')
|
||||
self._just_rebooted = False
|
||||
self._is_ready = True
|
||||
|
||||
def initialize(self, context):
|
||||
if self.is_rooted:
|
||||
self.disable_screen_lock()
|
||||
self.disable_selinux()
|
||||
if self.enable_screen_check:
|
||||
self.ensure_screen_is_on()
|
||||
|
||||
def disconnect(self):
|
||||
if self._logcat_poller:
|
||||
self._logcat_poller.close()
|
||||
|
||||
def ping(self):
|
||||
try:
|
||||
# May be triggered inside initialize()
|
||||
adb_shell(self.adb_name, 'ls /', timeout=10)
|
||||
except (TimeoutError, CalledProcessError):
|
||||
raise DeviceNotRespondingError(self.adb_name or self.name)
|
||||
|
||||
def start(self):
|
||||
if self.logcat_poll_period:
|
||||
if self._logcat_poller:
|
||||
self._logcat_poller.close()
|
||||
self._logcat_poller = _LogcatPoller(self, self.logcat_poll_period, timeout=self.default_timeout)
|
||||
self._logcat_poller.start()
|
||||
|
||||
def stop(self):
|
||||
if self._logcat_poller:
|
||||
self._logcat_poller.stop()
|
||||
|
||||
def get_android_version(self):
|
||||
return ANDROID_VERSION_MAP.get(self.get_sdk_version(), None)
|
||||
|
||||
def get_android_id(self):
|
||||
"""
|
||||
Get the device's ANDROID_ID. Which is
|
||||
|
||||
"A 64-bit number (as a hex string) that is randomly generated when the user
|
||||
first sets up the device and should remain constant for the lifetime of the
|
||||
user's device."
|
||||
|
||||
.. note:: This will get reset on userdata erasure.
|
||||
|
||||
"""
|
||||
output = self.execute('content query --uri content://settings/secure --projection value --where "name=\'android_id\'"').strip()
|
||||
return output.split('value=')[-1]
|
||||
|
||||
def get_sdk_version(self):
|
||||
try:
|
||||
return int(self.getprop('ro.build.version.sdk'))
|
||||
except (ValueError, TypeError):
|
||||
return None
|
||||
|
||||
def get_installed_package_version(self, package):
|
||||
"""
|
||||
Returns the version (versionName) of the specified package if it is installed
|
||||
on the device, or ``None`` otherwise.
|
||||
|
||||
Added in version 2.1.4
|
||||
|
||||
"""
|
||||
output = self.execute('dumpsys package {}'.format(package))
|
||||
for line in convert_new_lines(output).split('\n'):
|
||||
if 'versionName' in line:
|
||||
return line.split('=', 1)[1]
|
||||
return None
|
||||
|
||||
def list_packages(self):
|
||||
"""
|
||||
List packages installed on the device.
|
||||
|
||||
Added in version 2.1.4
|
||||
|
||||
"""
|
||||
output = self.execute('pm list packages')
|
||||
output = output.replace('package:', '')
|
||||
return output.split()
|
||||
|
||||
def package_is_installed(self, package_name):
|
||||
"""
|
||||
Returns ``True`` the if a package with the specified name is installed on
|
||||
the device, and ``False`` otherwise.
|
||||
|
||||
Added in version 2.1.4
|
||||
|
||||
"""
|
||||
return package_name in self.list_packages()
|
||||
|
||||
def executable_is_installed(self, executable_name): # pylint: disable=unused-argument,no-self-use
|
||||
raise AttributeError("""Instead of using is_installed, please use
|
||||
``get_binary_path`` or ``install_if_needed`` instead. You should
|
||||
use the path returned by these functions to then invoke the binary
|
||||
|
||||
please see: https://pythonhosted.org/wlauto/writing_extensions.html""")
|
||||
|
||||
def is_installed(self, name):
|
||||
if self.package_is_installed(name):
|
||||
return True
|
||||
elif "." in name: # assumes android packages have a . in their name and binaries documentation
|
||||
return False
|
||||
else:
|
||||
raise AttributeError("""Instead of using is_installed, please use
|
||||
``get_binary_path`` or ``install_if_needed`` instead. You should
|
||||
use the path returned by these functions to then invoke the binary
|
||||
|
||||
please see: https://pythonhosted.org/wlauto/writing_extensions.html""")
|
||||
|
||||
def listdir(self, path, as_root=False, **kwargs):
|
||||
contents = self.execute('ls {}'.format(path), as_root=as_root)
|
||||
return [x.strip() for x in contents.split()]
|
||||
|
||||
def push_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
|
||||
"""
|
||||
Modified in version 2.1.4: added ``as_root`` parameter.
|
||||
|
||||
"""
|
||||
self._check_ready()
|
||||
try:
|
||||
if not as_root:
|
||||
adb_command(self.adb_name, "push '{}' '{}'".format(source, dest), timeout=timeout)
|
||||
else:
|
||||
device_tempfile = self.path.join(self.file_transfer_cache, source.lstrip(self.path.sep))
|
||||
self.execute('mkdir -p {}'.format(self.path.dirname(device_tempfile)))
|
||||
adb_command(self.adb_name, "push '{}' '{}'".format(source, device_tempfile), timeout=timeout)
|
||||
self.execute('cp {} {}'.format(device_tempfile, dest), as_root=True)
|
||||
except CalledProcessError as e:
|
||||
raise DeviceError(e)
|
||||
|
||||
def pull_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
|
||||
"""
|
||||
Modified in version 2.1.4: added ``as_root`` parameter.
|
||||
|
||||
"""
|
||||
self._check_ready()
|
||||
try:
|
||||
if not as_root:
|
||||
adb_command(self.adb_name, "pull '{}' '{}'".format(source, dest), timeout=timeout)
|
||||
else:
|
||||
device_tempfile = self.path.join(self.file_transfer_cache, source.lstrip(self.path.sep))
|
||||
self.execute('mkdir -p {}'.format(self.path.dirname(device_tempfile)))
|
||||
self.execute('cp {} {}'.format(source, device_tempfile), as_root=True)
|
||||
adb_command(self.adb_name, "pull '{}' '{}'".format(device_tempfile, dest), timeout=timeout)
|
||||
except CalledProcessError as e:
|
||||
raise DeviceError(e)
|
||||
|
||||
def delete_file(self, filepath, as_root=False): # pylint: disable=W0221
|
||||
self._check_ready()
|
||||
adb_shell(self.adb_name, "rm '{}'".format(filepath), as_root=as_root, timeout=self.default_timeout)
|
||||
|
||||
def file_exists(self, filepath):
|
||||
self._check_ready()
|
||||
output = adb_shell(self.adb_name, 'if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath),
|
||||
timeout=self.default_timeout)
|
||||
return bool(int(output))
|
||||
|
||||
def install(self, filepath, timeout=default_timeout, with_name=None): # pylint: disable=W0221
|
||||
ext = os.path.splitext(filepath)[1].lower()
|
||||
if ext == '.apk':
|
||||
return self.install_apk(filepath, timeout)
|
||||
else:
|
||||
return self.install_executable(filepath, with_name)
|
||||
|
||||
def install_apk(self, filepath, timeout=default_timeout): # pylint: disable=W0221
|
||||
self._check_ready()
|
||||
ext = os.path.splitext(filepath)[1].lower()
|
||||
if ext == '.apk':
|
||||
return adb_command(self.adb_name, "install {}".format(filepath), timeout=timeout)
|
||||
else:
|
||||
raise DeviceError('Can\'t install {}: unsupported format.'.format(filepath))
|
||||
|
||||
def install_executable(self, filepath, with_name=None):
|
||||
"""
|
||||
Installs a binary executable on device. Returns
|
||||
the path to the installed binary, or ``None`` if the installation has failed.
|
||||
Optionally, ``with_name`` parameter may be used to specify a different name under
|
||||
which the executable will be installed.
|
||||
|
||||
Added in version 2.1.3.
|
||||
Updated in version 2.1.5 with ``with_name`` parameter.
|
||||
|
||||
"""
|
||||
self._ensure_binaries_directory_is_writable()
|
||||
executable_name = with_name or os.path.basename(filepath)
|
||||
on_device_file = self.path.join(self.working_directory, executable_name)
|
||||
on_device_executable = self.path.join(self.binaries_directory, executable_name)
|
||||
self.push_file(filepath, on_device_file)
|
||||
self.execute('cp {} {}'.format(on_device_file, on_device_executable), as_root=self.is_rooted)
|
||||
self.execute('chmod 0777 {}'.format(on_device_executable), as_root=self.is_rooted)
|
||||
return on_device_executable
|
||||
|
||||
def uninstall(self, package):
|
||||
self._check_ready()
|
||||
adb_command(self.adb_name, "uninstall {}".format(package), timeout=self.default_timeout)
|
||||
|
||||
def uninstall_executable(self, executable_name):
|
||||
"""
|
||||
|
||||
Added in version 2.1.3.
|
||||
|
||||
"""
|
||||
on_device_executable = self.get_binary_path(executable_name, search_system_binaries=False)
|
||||
if not on_device_executable:
|
||||
raise DeviceError("Could not uninstall {}, binary not found".format(on_device_executable))
|
||||
self._ensure_binaries_directory_is_writable()
|
||||
self.delete_file(on_device_executable, as_root=self.is_rooted)
|
||||
|
||||
def execute(self, command, timeout=default_timeout, check_exit_code=True, background=False,
|
||||
as_root=False, busybox=False, **kwargs):
|
||||
"""
|
||||
Execute the specified command on the device using adb.
|
||||
|
||||
Parameters:
|
||||
|
||||
:param command: The command to be executed. It should appear exactly
|
||||
as if you were typing it into a shell.
|
||||
:param timeout: Time, in seconds, to wait for adb to return before aborting
|
||||
and raising an error. Defaults to ``AndroidDevice.default_timeout``.
|
||||
:param check_exit_code: If ``True``, the return code of the command on the Device will
|
||||
be check and exception will be raised if it is not 0.
|
||||
Defaults to ``True``.
|
||||
:param background: If ``True``, will execute adb in a subprocess, and will return
|
||||
immediately, not waiting for adb to return. Defaults to ``False``
|
||||
:param busybox: If ``True``, will use busybox to execute the command. Defaults to ``False``.
|
||||
|
||||
Added in version 2.1.3
|
||||
|
||||
.. note:: The device must be rooted to be able to use some busybox features.
|
||||
|
||||
:param as_root: If ``True``, will attempt to execute command in privileged mode. The device
|
||||
must be rooted, otherwise an error will be raised. Defaults to ``False``.
|
||||
|
||||
Added in version 2.1.3
|
||||
|
||||
:returns: If ``background`` parameter is set to ``True``, the subprocess object will
|
||||
be returned; otherwise, the contents of STDOUT from the device will be returned.
|
||||
|
||||
:raises: DeviceError if adb timed out or if the command returned non-zero exit
|
||||
code on the device, or if attempting to execute a command in privileged mode on an
|
||||
unrooted device.
|
||||
|
||||
"""
|
||||
self._check_ready()
|
||||
if as_root and not self.is_rooted:
|
||||
raise DeviceError('Attempting to execute "{}" as root on unrooted device.'.format(command))
|
||||
if busybox:
|
||||
command = ' '.join([self.busybox, command])
|
||||
if background:
|
||||
return adb_background_shell(self.adb_name, command, as_root=as_root)
|
||||
else:
|
||||
return adb_shell(self.adb_name, command, timeout, check_exit_code, as_root)
|
||||
|
||||
def kick_off(self, command):
|
||||
"""
|
||||
Like execute but closes adb session and returns immediately, leaving the command running on the
|
||||
device (this is different from execute(background=True) which keeps adb connection open and returns
|
||||
a subprocess object).
|
||||
|
||||
.. note:: This relies on busybox's nohup applet and so won't work on unrooted devices.
|
||||
|
||||
Added in version 2.1.4
|
||||
|
||||
"""
|
||||
if not self.is_rooted:
|
||||
raise DeviceError('kick_off uses busybox\'s nohup applet and so can only be run a rooted device.')
|
||||
try:
|
||||
command = 'cd {} && busybox nohup {}'.format(self.working_directory, command)
|
||||
output = self.execute(command, timeout=1, as_root=True)
|
||||
except TimeoutError:
|
||||
pass
|
||||
else:
|
||||
raise ValueError('Background command exited before timeout; got "{}"'.format(output))
|
||||
|
||||
def get_pids_of(self, process_name):
|
||||
"""Returns a list of PIDs of all processes with the specified name."""
|
||||
result = self.execute('ps | {} grep {}'.format(self.busybox, process_name),
|
||||
check_exit_code=False).strip()
|
||||
if result and 'not found' not in result:
|
||||
return [int(x.split()[1]) for x in result.split('\n')]
|
||||
else:
|
||||
return []
|
||||
|
||||
def ps(self, **kwargs):
|
||||
"""
|
||||
Returns the list of running processes on the device. Keyword arguments may
|
||||
be used to specify simple filters for columns.
|
||||
|
||||
Added in version 2.1.4
|
||||
|
||||
"""
|
||||
lines = iter(convert_new_lines(self.execute('ps')).split('\n'))
|
||||
lines.next() # header
|
||||
result = []
|
||||
for line in lines:
|
||||
parts = line.split()
|
||||
if parts:
|
||||
result.append(PsEntry(*(parts[0:1] + map(int, parts[1:5]) + parts[5:])))
|
||||
if not kwargs:
|
||||
return result
|
||||
else:
|
||||
filtered_result = []
|
||||
for entry in result:
|
||||
if all(getattr(entry, k) == v for k, v in kwargs.iteritems()):
|
||||
filtered_result.append(entry)
|
||||
return filtered_result
|
||||
|
||||
def get_properties(self, context):
|
||||
"""Captures and saves the information from /system/build.prop and /proc/version"""
|
||||
props = super(AndroidDevice, self).get_properties(context)
|
||||
props.update(self._get_android_properties(context))
|
||||
return props
|
||||
|
||||
def _get_android_properties(self, context):
|
||||
props = {}
|
||||
props['android_id'] = self.get_android_id()
|
||||
buildprop_file = os.path.join(context.host_working_directory, 'build.prop')
|
||||
if not os.path.isfile(buildprop_file):
|
||||
self.pull_file('/system/build.prop', context.host_working_directory)
|
||||
self._update_build_properties(buildprop_file, props)
|
||||
context.add_run_artifact('build_properties', buildprop_file, 'export')
|
||||
|
||||
dumpsys_target_file = self.path.join(self.working_directory, 'window.dumpsys')
|
||||
dumpsys_host_file = os.path.join(context.host_working_directory, 'window.dumpsys')
|
||||
self.execute('{} > {}'.format('dumpsys window', dumpsys_target_file))
|
||||
self.pull_file(dumpsys_target_file, dumpsys_host_file)
|
||||
context.add_run_artifact('dumpsys_window', dumpsys_host_file, 'meta')
|
||||
return props
|
||||
|
||||
def getprop(self, prop=None):
|
||||
"""Returns parsed output of Android getprop command. If a property is
|
||||
specified, only the value for that property will be returned (with
|
||||
``None`` returned if the property doesn't exist. Otherwise,
|
||||
``wlauto.utils.android.AndroidProperties`` will be returned, which is
|
||||
a dict-like object."""
|
||||
props = AndroidProperties(self.execute('getprop'))
|
||||
if prop:
|
||||
return props[prop]
|
||||
return props
|
||||
|
||||
# Android-specific methods. These either rely on specifics of adb or other
|
||||
# Android-only concepts in their interface and/or implementation.
|
||||
|
||||
def forward_port(self, from_port, to_port):
|
||||
"""
|
||||
Forward a port on the device to a port on localhost.
|
||||
|
||||
:param from_port: Port on the device which to forward.
|
||||
:param to_port: Port on the localhost to which the device port will be forwarded.
|
||||
|
||||
Ports should be specified using adb spec. See the "adb forward" section in "adb help".
|
||||
|
||||
"""
|
||||
adb_command(self.adb_name, 'forward {} {}'.format(from_port, to_port), timeout=self.default_timeout)
|
||||
|
||||
def dump_logcat(self, outfile, filter_spec=None):
|
||||
"""
|
||||
Dump the contents of logcat, for the specified filter spec to the
|
||||
specified output file.
|
||||
See http://developer.android.com/tools/help/logcat.html
|
||||
|
||||
:param outfile: Output file on the host into which the contents of the
|
||||
log will be written.
|
||||
:param filter_spec: Logcat filter specification.
|
||||
see http://developer.android.com/tools/debugging/debugging-log.html#filteringOutput
|
||||
|
||||
"""
|
||||
if self._logcat_poller:
|
||||
return self._logcat_poller.write_log(outfile)
|
||||
else:
|
||||
if filter_spec:
|
||||
command = 'logcat -d -s {} > {}'.format(filter_spec, outfile)
|
||||
else:
|
||||
command = 'logcat -d > {}'.format(outfile)
|
||||
return adb_command(self.adb_name, command, timeout=self.default_timeout)
|
||||
|
||||
def clear_logcat(self):
|
||||
"""Clear (flush) logcat log."""
|
||||
if self._logcat_poller:
|
||||
return self._logcat_poller.clear_buffer()
|
||||
else:
|
||||
return adb_shell(self.adb_name, 'logcat -c', timeout=self.default_timeout)
|
||||
|
||||
def get_screen_size(self):
|
||||
output = self.execute('dumpsys window')
|
||||
match = SCREEN_SIZE_REGEX.search(output)
|
||||
if match:
|
||||
return (int(match.group('width')),
|
||||
int(match.group('height')))
|
||||
else:
|
||||
return (0, 0)
|
||||
|
||||
def perform_unlock_swipe(self):
|
||||
width, height = self.get_screen_size()
|
||||
command = 'input swipe {} {} {} {}'
|
||||
if self.swipe_to_unlock == "horizontal":
|
||||
swipe_heigh = height * 2 // 3
|
||||
start = 100
|
||||
stop = width - start
|
||||
self.execute(command.format(start, swipe_heigh, stop, swipe_heigh))
|
||||
if self.swipe_to_unlock == "vertical":
|
||||
swipe_middle = height / 2
|
||||
swipe_heigh = height * 2 // 3
|
||||
self.execute(command.format(swipe_middle, swipe_heigh, swipe_middle, 0))
|
||||
else: # Should never reach here
|
||||
raise DeviceError("Invalid swipe direction: {}".format(self.swipe_to_unlock))
|
||||
|
||||
def capture_screen(self, filepath):
|
||||
"""Caputers the current device screen into the specified file in a PNG format."""
|
||||
on_device_file = self.path.join(self.working_directory, 'screen_capture.png')
|
||||
self.execute('screencap -p {}'.format(on_device_file))
|
||||
self.pull_file(on_device_file, filepath)
|
||||
self.delete_file(on_device_file)
|
||||
|
||||
def is_screen_on(self):
|
||||
"""Returns ``True`` if the device screen is currently on, ``False`` otherwise."""
|
||||
output = self.execute('dumpsys power')
|
||||
match = SCREEN_STATE_REGEX.search(output)
|
||||
if match:
|
||||
return boolean(match.group(1))
|
||||
else:
|
||||
raise DeviceError('Could not establish screen state.')
|
||||
|
||||
def ensure_screen_is_on(self):
|
||||
if not self.is_screen_on():
|
||||
self.execute('input keyevent 26')
|
||||
if self.swipe_to_unlock:
|
||||
self.perform_unlock_swipe()
|
||||
|
||||
def disable_screen_lock(self):
|
||||
"""
|
||||
Attempts to disable he screen lock on the device.
|
||||
|
||||
.. note:: This does not always work...
|
||||
|
||||
Added inversion 2.1.4
|
||||
|
||||
"""
|
||||
lockdb = '/data/system/locksettings.db'
|
||||
sqlcommand = "update locksettings set value='0' where name='screenlock.disabled';"
|
||||
self.execute('sqlite3 {} "{}"'.format(lockdb, sqlcommand), as_root=True)
|
||||
|
||||
def disable_selinux(self):
|
||||
# This may be invoked from intialize() so we can't use execute() or the
|
||||
# standard API for doing this.
|
||||
api_level = int(adb_shell(self.adb_name, 'getprop ro.build.version.sdk',
|
||||
timeout=self.default_timeout).strip())
|
||||
# SELinux was added in Android 4.3 (API level 18). Trying to
|
||||
# 'getenforce' in earlier versions will produce an error.
|
||||
if api_level >= 18:
|
||||
se_status = self.execute('getenforce', as_root=True).strip()
|
||||
if se_status == 'Enforcing':
|
||||
self.execute('setenforce 0', as_root=True)
|
||||
|
||||
def get_device_model(self):
|
||||
try:
|
||||
return self.getprop(prop='ro.product.device')
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
# Internal methods: do not use outside of the class.
|
||||
|
||||
def _update_build_properties(self, filepath, props):
|
||||
try:
|
||||
with open(filepath) as fh:
|
||||
for line in fh:
|
||||
line = re.sub(r'#.*', '', line).strip()
|
||||
if not line:
|
||||
continue
|
||||
key, value = line.split('=', 1)
|
||||
props[key] = value
|
||||
except ValueError:
|
||||
self.logger.warning('Could not parse build.prop.')
|
||||
|
||||
def _update_versions(self, filepath, props):
|
||||
with open(filepath) as fh:
|
||||
text = fh.read()
|
||||
props['version'] = text
|
||||
text = re.sub(r'#.*', '', text).strip()
|
||||
match = re.search(r'^(Linux version .*?)\s*\((gcc version .*)\)$', text)
|
||||
if match:
|
||||
props['linux_version'] = match.group(1).strip()
|
||||
props['gcc_version'] = match.group(2).strip()
|
||||
else:
|
||||
self.logger.warning('Could not parse version string.')
|
||||
|
||||
def _ensure_binaries_directory_is_writable(self):
|
||||
matched = []
|
||||
for entry in self.list_file_systems():
|
||||
if self.binaries_directory.rstrip('/').startswith(entry.mount_point):
|
||||
matched.append(entry)
|
||||
if matched:
|
||||
entry = sorted(matched, key=lambda x: len(x.mount_point))[-1]
|
||||
if 'rw' not in entry.options:
|
||||
self.execute('mount -o rw,remount {} {}'.format(entry.device, entry.mount_point), as_root=True)
|
||||
else:
|
||||
raise DeviceError('Could not find mount point for binaries directory {}'.format(self.binaries_directory))
|
||||
|
||||
|
||||
class _LogcatPoller(threading.Thread):
|
||||
|
||||
join_timeout = 5
|
||||
|
||||
def __init__(self, device, period, timeout=None):
|
||||
super(_LogcatPoller, self).__init__()
|
||||
self.adb_device = device.adb_name
|
||||
self.logger = device.logger
|
||||
self.period = period
|
||||
self.timeout = timeout
|
||||
self.stop_signal = threading.Event()
|
||||
self.lock = threading.RLock()
|
||||
self.buffer_file = tempfile.mktemp()
|
||||
self.last_poll = 0
|
||||
self.daemon = True
|
||||
self.exc = None
|
||||
|
||||
def run(self):
|
||||
self.logger.debug('Starting logcat polling.')
|
||||
try:
|
||||
while True:
|
||||
if self.stop_signal.is_set():
|
||||
break
|
||||
with self.lock:
|
||||
current_time = time.time()
|
||||
if (current_time - self.last_poll) >= self.period:
|
||||
self._poll()
|
||||
time.sleep(0.5)
|
||||
except Exception: # pylint: disable=W0703
|
||||
self.exc = WorkerThreadError(self.name, sys.exc_info())
|
||||
self.logger.debug('Logcat polling stopped.')
|
||||
|
||||
def stop(self):
|
||||
self.logger.debug('Stopping logcat polling.')
|
||||
self.stop_signal.set()
|
||||
self.join(self.join_timeout)
|
||||
if self.is_alive():
|
||||
self.logger.error('Could not join logcat poller thread.')
|
||||
if self.exc:
|
||||
raise self.exc # pylint: disable=E0702
|
||||
|
||||
def clear_buffer(self):
|
||||
self.logger.debug('Clearing logcat buffer.')
|
||||
with self.lock:
|
||||
adb_shell(self.adb_device, 'logcat -c', timeout=self.timeout)
|
||||
with open(self.buffer_file, 'w') as _: # NOQA
|
||||
pass
|
||||
|
||||
def write_log(self, outfile):
|
||||
self.logger.debug('Writing logbuffer to {}.'.format(outfile))
|
||||
with self.lock:
|
||||
self._poll()
|
||||
if os.path.isfile(self.buffer_file):
|
||||
shutil.copy(self.buffer_file, outfile)
|
||||
else: # there was no logcat trace at this time
|
||||
with open(outfile, 'w') as _: # NOQA
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
self.logger.debug('Closing logcat poller.')
|
||||
if os.path.isfile(self.buffer_file):
|
||||
os.remove(self.buffer_file)
|
||||
|
||||
def _poll(self):
|
||||
with self.lock:
|
||||
self.last_poll = time.time()
|
||||
adb_command(self.adb_device, 'logcat -d >> {}'.format(self.buffer_file), timeout=self.timeout)
|
||||
adb_command(self.adb_device, 'logcat -c', timeout=self.timeout)
|
||||
|
||||
|
||||
class BigLittleDevice(AndroidDevice): # pylint: disable=W0223
|
||||
|
||||
parameters = [
|
||||
Parameter('scheduler', default='hmp', override=True),
|
||||
]
|
@@ -89,7 +89,7 @@ class UiAutomatorWorkload(Workload):
|
||||
for k, v in self.uiauto_params.iteritems():
|
||||
params += ' -e {} {}'.format(k, v)
|
||||
self.command = 'uiautomator runtest {}{} -c {}'.format(self.device_uiauto_file, params, method_string)
|
||||
self.device.push_file(self.uiauto_file, self.device_uiauto_file)
|
||||
self.device.push(self.uiauto_file, self.device_uiauto_file)
|
||||
self.device.killall('uiautomator')
|
||||
|
||||
def run(self, context):
|
||||
@@ -104,7 +104,7 @@ class UiAutomatorWorkload(Workload):
|
||||
pass
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.delete_file(self.device_uiauto_file)
|
||||
self.device.remove(self.device_uiauto_file)
|
||||
|
||||
def validate(self):
|
||||
if not self.uiauto_file:
|
||||
@@ -188,7 +188,7 @@ class ApkWorkload(Workload):
|
||||
self.device.clear_logcat()
|
||||
|
||||
def initialize_package(self, context):
|
||||
installed_version = self.device.get_installed_package_version(self.package)
|
||||
installed_version = self.device.get_package_version(self.package)
|
||||
if self.check_apk:
|
||||
self.initialize_with_host_apk(context, installed_version)
|
||||
else:
|
||||
@@ -238,7 +238,7 @@ class ApkWorkload(Workload):
|
||||
|
||||
# As of android API level 23, apps can request permissions at runtime,
|
||||
# this will grant all of them so requests do not pop up when running the app
|
||||
if self.device.get_sdk_version() >= 23:
|
||||
if self.device.os_version['sdk'] >= 23:
|
||||
self._grant_requested_permissions()
|
||||
|
||||
def install_apk(self, context):
|
||||
@@ -281,7 +281,7 @@ class ApkWorkload(Workload):
|
||||
|
||||
def update_result(self, context):
|
||||
self.logcat_log = os.path.join(context.output_directory, 'logcat.log')
|
||||
self.device.dump_logcat(self.logcat_log)
|
||||
context.device_manager.dump_logcat(self.logcat_log)
|
||||
context.add_iteration_artifact(name='logcat',
|
||||
path='logcat.log',
|
||||
kind='log',
|
||||
@@ -333,8 +333,8 @@ class ReventWorkload(Workload):
|
||||
pass
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.delete_file(self.on_device_setup_revent)
|
||||
self.device.delete_file(self.on_device_run_revent)
|
||||
self.device.remove(self.on_device_setup_revent)
|
||||
self.device.remove(self.on_device_run_revent)
|
||||
|
||||
def _check_revent_files(self, context):
|
||||
# check the revent binary
|
||||
@@ -353,8 +353,8 @@ class ReventWorkload(Workload):
|
||||
raise WorkloadError(message)
|
||||
|
||||
self.on_device_revent_binary = self.device.install_executable(revent_binary)
|
||||
self.device.push_file(self.revent_run_file, self.on_device_run_revent)
|
||||
self.device.push_file(self.revent_setup_file, self.on_device_setup_revent)
|
||||
self.device.push(self.revent_run_file, self.on_device_run_revent)
|
||||
self.device.push(self.revent_setup_file, self.on_device_setup_revent)
|
||||
|
||||
|
||||
class AndroidUiAutoBenchmark(UiAutomatorWorkload, AndroidBenchmark):
|
||||
@@ -486,9 +486,9 @@ class GameWorkload(ApkWorkload, ReventWorkload):
|
||||
raise WorkloadError(message.format(resource_file, self.name))
|
||||
# adb push will create intermediate directories if they don't
|
||||
# exist.
|
||||
self.device.push_file(asset_tarball, ondevice_cache, timeout=timeout)
|
||||
self.device.push(asset_tarball, ondevice_cache, timeout=timeout)
|
||||
|
||||
device_asset_directory = self.device.path.join(self.device.external_storage_directory, 'Android', kind)
|
||||
device_asset_directory = self.device.path.join(self.context.device_manager.external_storage_directory, 'Android', kind)
|
||||
deploy_command = 'cd {} && {} tar -xzf {}'.format(device_asset_directory,
|
||||
self.device.busybox,
|
||||
ondevice_cache)
|
||||
|
@@ -1,684 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Original implementation by Rene de Jong. Updated by Sascha Bischoff.
|
||||
|
||||
# pylint: disable=E1101
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import tarfile
|
||||
import time
|
||||
from pexpect import EOF, TIMEOUT, pxssh
|
||||
|
||||
from wlauto import settings, Parameter
|
||||
from wlauto.core.resource import NO_ONE
|
||||
from wlauto.common.resources import Executable
|
||||
from wlauto.core import signal as sig
|
||||
from wlauto.exceptions import DeviceError
|
||||
from wlauto.utils import ssh, types
|
||||
|
||||
|
||||
class BaseGem5Device(object):
|
||||
"""
|
||||
Base implementation for a gem5-based device
|
||||
|
||||
This class is used as the base class for OS-specific devices such as the
|
||||
G3m5LinuxDevice and the Gem5AndroidDevice. The majority of the gem5-specific
|
||||
functionality is included here.
|
||||
|
||||
Note: When inheriting from this class, make sure to inherit from this class
|
||||
prior to inheriting from the OS-specific class, i.e. LinuxDevice, to ensure
|
||||
that the methods are correctly overridden.
|
||||
"""
|
||||
# gem5 can be very slow. Hence, we use some very long timeouts!
|
||||
delay = 3600
|
||||
long_delay = 3 * delay
|
||||
ready_timeout = long_delay
|
||||
default_timeout = delay
|
||||
|
||||
platform = None
|
||||
path_module = 'posixpath'
|
||||
|
||||
parameters = [
|
||||
Parameter('gem5_binary', kind=str, default='./build/ARM/gem5.fast',
|
||||
mandatory=False, description="Command used to execute gem5. "
|
||||
"Adjust according to needs."),
|
||||
Parameter('gem5_args', kind=types.arguments, mandatory=True,
|
||||
description="Command line passed to the gem5 simulation. This"
|
||||
" command line is used to set up the simulated system, and "
|
||||
"should be the same as used for a standard gem5 simulation "
|
||||
"without workload automation. Note that this is simulation "
|
||||
"script specific and will hence need to be tailored to each "
|
||||
"particular use case."),
|
||||
Parameter('gem5_vio_args', kind=types.arguments, mandatory=True,
|
||||
constraint=lambda x: "{}" in str(x),
|
||||
description="gem5 VirtIO command line used to enable the "
|
||||
"VirtIO device in the simulated system. At the very least, "
|
||||
"the root parameter of the VirtIO9PDiod device must be "
|
||||
"exposed on the command line. Please set this root mount to "
|
||||
"{}, as it will be replaced with the directory used by "
|
||||
"Workload Automation at runtime."),
|
||||
Parameter('temp_dir', kind=str, default='/tmp',
|
||||
description="Temporary directory used to pass files into the "
|
||||
"gem5 simulation. Workload Automation will automatically "
|
||||
"create a directory in this folder, and will remove it again "
|
||||
"once the simulation completes."),
|
||||
Parameter('checkpoint', kind=bool, default=False,
|
||||
mandatory=False, description="This parameter "
|
||||
"tells Workload Automation to create a checkpoint of the "
|
||||
"simulated system once the guest system has finished booting."
|
||||
" This checkpoint can then be used at a later stage by other "
|
||||
"WA runs to avoid booting the guest system a second time. Set"
|
||||
" to True to take a checkpoint of the simulated system post "
|
||||
"boot."),
|
||||
Parameter('run_delay', kind=int, default=0, mandatory=False,
|
||||
constraint=lambda x: x >= 0,
|
||||
description="This sets the time that the "
|
||||
"system should sleep in the simulated system prior to "
|
||||
"running and workloads or taking checkpoints. This allows "
|
||||
"the system to quieten down prior to running the workloads. "
|
||||
"When this is combined with the checkpoint_post_boot"
|
||||
" option, it allows the checkpoint to be created post-sleep,"
|
||||
" and therefore the set of workloads resuming from this "
|
||||
"checkpoint will not be required to sleep.")
|
||||
]
|
||||
|
||||
@property
|
||||
def is_rooted(self): # pylint: disable=R0201
|
||||
# gem5 is always rooted
|
||||
return True
|
||||
|
||||
# pylint: disable=E0203
|
||||
def __init__(self):
|
||||
self.logger = logging.getLogger('gem5Device')
|
||||
|
||||
# The gem5 subprocess
|
||||
self.gem5 = None
|
||||
self.gem5_port = -1
|
||||
self.gem5outdir = os.path.join(settings.output_directory, "gem5")
|
||||
self.m5_path = 'm5'
|
||||
self.stdout_file = None
|
||||
self.stderr_file = None
|
||||
self.stderr_filename = None
|
||||
self.sckt = None
|
||||
|
||||
# Find the first one that does not exist. Ensures that we do not re-use
|
||||
# the directory used by someone else.
|
||||
for i in xrange(sys.maxint):
|
||||
directory = os.path.join(self.temp_dir, "wa_{}".format(i))
|
||||
try:
|
||||
os.stat(directory)
|
||||
continue
|
||||
except OSError:
|
||||
break
|
||||
self.temp_dir = directory
|
||||
self.logger.debug("Using {} as the temporary directory.".format(self.temp_dir))
|
||||
|
||||
# Start the gem5 simulation when WA starts a run using a signal.
|
||||
sig.connect(self.init_gem5, sig.RUN_START)
|
||||
|
||||
def validate(self):
|
||||
# Assemble the virtio args
|
||||
self.gem5_vio_args = str(self.gem5_vio_args).format(self.temp_dir) # pylint: disable=W0201
|
||||
self.logger.debug("gem5 VirtIO command: {}".format(self.gem5_vio_args))
|
||||
|
||||
def init_gem5(self, _):
|
||||
"""
|
||||
Start gem5, find out the telnet port and connect to the simulation.
|
||||
|
||||
We first create the temporary directory used by VirtIO to pass files
|
||||
into the simulation, as well as the gem5 output directory.We then create
|
||||
files for the standard output and error for the gem5 process. The gem5
|
||||
process then is started.
|
||||
"""
|
||||
self.logger.info("Creating temporary directory: {}".format(self.temp_dir))
|
||||
os.mkdir(self.temp_dir)
|
||||
os.mkdir(self.gem5outdir)
|
||||
|
||||
# We need to redirect the standard output and standard error for the
|
||||
# gem5 process to a file so that we can debug when things go wrong.
|
||||
f = os.path.join(self.gem5outdir, 'stdout')
|
||||
self.stdout_file = open(f, 'w')
|
||||
f = os.path.join(self.gem5outdir, 'stderr')
|
||||
self.stderr_file = open(f, 'w')
|
||||
# We need to keep this so we can check which port to use for the telnet
|
||||
# connection.
|
||||
self.stderr_filename = f
|
||||
|
||||
self.start_gem5()
|
||||
|
||||
def start_gem5(self):
|
||||
"""
|
||||
Starts the gem5 simulator, and parses the output to get the telnet port.
|
||||
"""
|
||||
self.logger.info("Starting the gem5 simulator")
|
||||
|
||||
command_line = "{} --outdir={}/gem5 {} {}".format(self.gem5_binary,
|
||||
settings.output_directory,
|
||||
self.gem5_args,
|
||||
self.gem5_vio_args)
|
||||
self.logger.debug("gem5 command line: {}".format(command_line))
|
||||
self.gem5 = subprocess.Popen(command_line.split(),
|
||||
stdout=self.stdout_file,
|
||||
stderr=self.stderr_file)
|
||||
|
||||
while self.gem5_port == -1:
|
||||
# Check that gem5 is running!
|
||||
if self.gem5.poll():
|
||||
raise DeviceError("The gem5 process has crashed with error code {}!".format(self.gem5.poll()))
|
||||
|
||||
# Open the stderr file
|
||||
f = open(self.stderr_filename, 'r')
|
||||
for line in f:
|
||||
m = re.search(r"Listening\ for\ system\ connection\ on\ port\ (?P<port>\d+)", line)
|
||||
if m:
|
||||
port = int(m.group('port'))
|
||||
if port >= 3456 and port < 5900:
|
||||
self.gem5_port = port
|
||||
f.close()
|
||||
break
|
||||
else:
|
||||
time.sleep(1)
|
||||
f.close()
|
||||
|
||||
def connect(self): # pylint: disable=R0912,W0201
|
||||
"""
|
||||
Connect to the gem5 simulation and wait for Android to boot. Then,
|
||||
create checkpoints, and mount the VirtIO device.
|
||||
"""
|
||||
self.connect_gem5()
|
||||
|
||||
self.wait_for_boot()
|
||||
|
||||
if self.run_delay:
|
||||
self.logger.info("Sleeping for {} seconds in the guest".format(self.run_delay))
|
||||
self.gem5_shell("sleep {}".format(self.run_delay))
|
||||
|
||||
if self.checkpoint:
|
||||
self.checkpoint_gem5()
|
||||
|
||||
self.mount_virtio()
|
||||
self.logger.info("Creating the working directory in the simulated system")
|
||||
self.gem5_shell('mkdir -p {}'.format(self.working_directory))
|
||||
self._is_ready = True # pylint: disable=W0201
|
||||
|
||||
def wait_for_boot(self):
|
||||
pass
|
||||
|
||||
def connect_gem5(self): # pylint: disable=R0912
|
||||
"""
|
||||
Connect to the telnet port of the gem5 simulation.
|
||||
|
||||
We connect, and wait for the prompt to be found. We do not use a timeout
|
||||
for this, and wait for the prompt in a while loop as the gem5 simulation
|
||||
can take many hours to reach a prompt when booting the system. We also
|
||||
inject some newlines periodically to try and force gem5 to show a
|
||||
prompt. Once the prompt has been found, we replace it with a unique
|
||||
prompt to ensure that we are able to match it properly. We also disable
|
||||
the echo as this simplifies parsing the output when executing commands
|
||||
on the device.
|
||||
"""
|
||||
self.logger.info("Connecting to the gem5 simulation on port {}".format(self.gem5_port))
|
||||
host = socket.gethostname()
|
||||
port = self.gem5_port
|
||||
|
||||
# Connect to the gem5 telnet port. Use a short timeout here.
|
||||
attempts = 0
|
||||
while attempts < 10:
|
||||
attempts += 1
|
||||
try:
|
||||
self.sckt = ssh.TelnetConnection()
|
||||
self.sckt.login(host, 'None', port=port, auto_prompt_reset=False,
|
||||
login_timeout=10)
|
||||
break
|
||||
except pxssh.ExceptionPxssh:
|
||||
pass
|
||||
else:
|
||||
self.gem5.kill()
|
||||
raise DeviceError("Failed to connect to the gem5 telnet session.")
|
||||
|
||||
self.logger.info("Connected! Waiting for prompt...")
|
||||
|
||||
# We need to find the prompt. It might be different if we are resuming
|
||||
# from a checkpoint. Therefore, we test multiple options here.
|
||||
prompt_found = False
|
||||
while not prompt_found:
|
||||
try:
|
||||
self.login_to_device()
|
||||
except TIMEOUT:
|
||||
pass
|
||||
try:
|
||||
# Try and force a prompt to be shown
|
||||
self.sckt.send('\n')
|
||||
self.sckt.expect([r'# ', self.sckt.UNIQUE_PROMPT, r'\[PEXPECT\][\\\$\#]+ '], timeout=60)
|
||||
prompt_found = True
|
||||
except TIMEOUT:
|
||||
pass
|
||||
|
||||
self.logger.info("Setting unique prompt...")
|
||||
|
||||
self.sckt.set_unique_prompt()
|
||||
self.sckt.prompt()
|
||||
self.logger.info("Prompt found and replaced with a unique string")
|
||||
|
||||
# We check that the prompt is what we think it should be. If not, we
|
||||
# need to update the regex we use to match.
|
||||
self.find_prompt()
|
||||
|
||||
self.sckt.setecho(False)
|
||||
self.sync_gem5_shell()
|
||||
self.resize_shell()
|
||||
|
||||
def get_properties(self, context): # pylint: disable=R0801
|
||||
""" Get the property files from the device """
|
||||
for propfile in self.property_files:
|
||||
try:
|
||||
normname = propfile.lstrip(self.path.sep).replace(self.path.sep, '.')
|
||||
outfile = os.path.join(context.host_working_directory, normname)
|
||||
if self.is_file(propfile):
|
||||
self.execute('cat {} > {}'.format(propfile, normname))
|
||||
self.pull_file(normname, outfile)
|
||||
elif self.is_directory(propfile):
|
||||
self.get_directory(context, propfile)
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
except DeviceError:
|
||||
# We pull these files "opportunistically", so if a pull fails
|
||||
# (e.g. we don't have permissions to read the file), just note
|
||||
# it quietly (not as an error/warning) and move on.
|
||||
self.logger.debug('Could not pull property file "{}"'.format(propfile))
|
||||
return {}
|
||||
|
||||
def get_directory(self, context, directory):
|
||||
""" Pull a directory from the device """
|
||||
normname = directory.lstrip(self.path.sep).replace(self.path.sep, '.')
|
||||
outdir = os.path.join(context.host_working_directory, normname)
|
||||
temp_file = os.path.join(context.host_working_directory, "{}.tar".format(normname))
|
||||
# Check that the folder exists
|
||||
self.gem5_shell("ls -la {}".format(directory))
|
||||
# Compress the folder
|
||||
try:
|
||||
self.gem5_shell("{} tar -cvf {}.tar {}".format(self.busybox, normname, directory))
|
||||
except DeviceError:
|
||||
self.logger.debug("Failed to run tar command on device! Not pulling {}".format(directory))
|
||||
return
|
||||
self.pull_file(normname, temp_file)
|
||||
f = tarfile.open(temp_file, 'r')
|
||||
os.mkdir(outdir)
|
||||
f.extractall(outdir)
|
||||
os.remove(temp_file)
|
||||
|
||||
def get_pids_of(self, process_name):
|
||||
""" Returns a list of PIDs of all processes with the specified name. """
|
||||
result = self.gem5_shell('ps | {} grep {}'.format(self.busybox, process_name),
|
||||
check_exit_code=False).strip()
|
||||
if result and 'not found' not in result and len(result.split('\n')) > 2:
|
||||
return [int(x.split()[1]) for x in result.split('\n')]
|
||||
else:
|
||||
return []
|
||||
|
||||
def find_prompt(self):
|
||||
prompt = r'\[PEXPECT\][\\\$\#]+ '
|
||||
synced = False
|
||||
while not synced:
|
||||
self.sckt.send('\n')
|
||||
i = self.sckt.expect([prompt, self.sckt.UNIQUE_PROMPT, r'[\$\#] '], timeout=self.delay)
|
||||
if i == 0:
|
||||
synced = True
|
||||
elif i == 1:
|
||||
prompt = self.sckt.UNIQUE_PROMPT
|
||||
synced = True
|
||||
else:
|
||||
prompt = re.sub(r'\$', r'\\\$', self.sckt.before.strip() + self.sckt.after.strip())
|
||||
prompt = re.sub(r'\#', r'\\\#', prompt)
|
||||
prompt = re.sub(r'\[', r'\[', prompt)
|
||||
prompt = re.sub(r'\]', r'\]', prompt)
|
||||
|
||||
self.sckt.PROMPT = prompt
|
||||
|
||||
def close(self):
|
||||
if self._logcat_poller:
|
||||
self._logcat_poller.stop()
|
||||
|
||||
def reset(self):
|
||||
self.logger.warn("Attempt to restart the gem5 device. This is not "
|
||||
"supported!")
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def push_file(self, source, dest, **kwargs):
|
||||
"""
|
||||
Push a file to the gem5 device using VirtIO
|
||||
|
||||
The file to push to the device is copied to the temporary directory on
|
||||
the host, before being copied within the simulation to the destination.
|
||||
Checks, in the form of 'ls' with error code checking, are performed to
|
||||
ensure that the file is copied to the destination.
|
||||
"""
|
||||
filename = os.path.basename(source)
|
||||
self.logger.debug("Pushing {} to device.".format(source))
|
||||
self.logger.debug("temp_dir: {}".format(self.temp_dir))
|
||||
self.logger.debug("dest: {}".format(dest))
|
||||
self.logger.debug("filename: {}".format(filename))
|
||||
|
||||
# We need to copy the file to copy to the temporary directory
|
||||
self.move_to_temp_dir(source)
|
||||
|
||||
# Back to the gem5 world
|
||||
self.gem5_shell("ls -al /mnt/obb/{}".format(filename))
|
||||
if self.busybox:
|
||||
self.gem5_shell("{} cp /mnt/obb/{} {}".format(self.busybox, filename, dest))
|
||||
else:
|
||||
self.gem5_shell("cat /mnt/obb/{} > {}".format(filename, dest))
|
||||
self.gem5_shell("sync")
|
||||
self.gem5_shell("ls -al {}".format(dest))
|
||||
self.gem5_shell("ls -al /mnt/obb/")
|
||||
self.logger.debug("Push complete.")
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def pull_file(self, source, dest, **kwargs):
|
||||
"""
|
||||
Pull a file from the gem5 device using m5 writefile
|
||||
|
||||
The file is copied to the local directory within the guest as the m5
|
||||
writefile command assumes that the file is local. The file is then
|
||||
written out to the host system using writefile, prior to being moved to
|
||||
the destination on the host.
|
||||
"""
|
||||
filename = os.path.basename(source)
|
||||
|
||||
self.logger.debug("pull_file {} {}".format(source, filename))
|
||||
# We don't check the exit code here because it is non-zero if the source
|
||||
# and destination are the same. The ls below will cause an error if the
|
||||
# file was not where we expected it to be.
|
||||
self.gem5_shell("{} cp {} {}".format(self.busybox, source, filename),
|
||||
check_exit_code=False)
|
||||
self.gem5_shell("sync")
|
||||
self.gem5_shell("ls -la {}".format(filename))
|
||||
self.logger.debug('Finished the copy in the simulator')
|
||||
self.gem5_util("writefile {}".format(filename))
|
||||
|
||||
if 'cpu' not in filename:
|
||||
while not os.path.exists(os.path.join(self.gem5outdir, filename)):
|
||||
time.sleep(1)
|
||||
|
||||
# Perform the local move
|
||||
shutil.move(os.path.join(self.gem5outdir, filename), dest)
|
||||
self.logger.debug("Pull complete.")
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def delete_file(self, filepath, **kwargs):
|
||||
""" Delete a file on the device """
|
||||
self._check_ready()
|
||||
self.gem5_shell("rm '{}'".format(filepath))
|
||||
|
||||
def file_exists(self, filepath):
|
||||
""" Check if a file exists """
|
||||
self._check_ready()
|
||||
output = self.gem5_shell('if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath))
|
||||
try:
|
||||
if int(output):
|
||||
return True
|
||||
except ValueError:
|
||||
# If we cannot process the output, assume that there is no file
|
||||
pass
|
||||
return False
|
||||
|
||||
def disconnect(self):
|
||||
"""
|
||||
Close and disconnect from the gem5 simulation. Additionally, we remove
|
||||
the temporary directory used to pass files into the simulation.
|
||||
"""
|
||||
self.logger.info("Gracefully terminating the gem5 simulation.")
|
||||
try:
|
||||
self.gem5_util("exit")
|
||||
self.gem5.wait()
|
||||
except EOF:
|
||||
pass
|
||||
self.logger.info("Removing the temporary directory")
|
||||
try:
|
||||
shutil.rmtree(self.temp_dir)
|
||||
except OSError:
|
||||
self.logger.warn("Failed to remove the temporary directory!")
|
||||
|
||||
# gem5 might be slow. Hence, we need to make the ping timeout very long.
|
||||
def ping(self):
|
||||
self.logger.debug("Pinging gem5 to see if it is still alive")
|
||||
self.gem5_shell('ls /', timeout=self.longdelay)
|
||||
|
||||
# Additional Android-specific methods.
|
||||
def forward_port(self, _): # pylint: disable=R0201
|
||||
raise DeviceError('we do not need forwarding')
|
||||
|
||||
# gem5 should dump out a framebuffer. We can use this if it exists. Failing
|
||||
# that, fall back to the parent class implementation.
|
||||
def capture_screen(self, filepath):
|
||||
file_list = os.listdir(self.gem5outdir)
|
||||
screen_caps = []
|
||||
for f in file_list:
|
||||
if '.bmp' in f:
|
||||
screen_caps.append(f)
|
||||
|
||||
if len(screen_caps) == 1:
|
||||
# Bail out if we do not have image, and resort to the slower, built
|
||||
# in method.
|
||||
try:
|
||||
import Image
|
||||
gem5_image = os.path.join(self.gem5outdir, screen_caps[0])
|
||||
temp_image = os.path.join(self.gem5outdir, "file.png")
|
||||
im = Image.open(gem5_image)
|
||||
im.save(temp_image, "PNG")
|
||||
shutil.copy(temp_image, filepath)
|
||||
os.remove(temp_image)
|
||||
self.logger.debug("capture_screen: using gem5 screencap")
|
||||
return True
|
||||
except (shutil.Error, ImportError, IOError):
|
||||
pass
|
||||
return False
|
||||
|
||||
# pylint: disable=W0613
|
||||
def execute(self, command, timeout=1000, check_exit_code=True, background=False,
|
||||
as_root=False, busybox=False, **kwargs):
|
||||
self._check_ready()
|
||||
if as_root and not self.is_rooted:
|
||||
raise DeviceError('Attempting to execute "{}" as root on unrooted device.'.format(command))
|
||||
if busybox:
|
||||
if not self.is_rooted:
|
||||
raise DeviceError('Attempting to execute "{}" with busybox. '.format(command) +
|
||||
'Busybox can only be deployed to rooted devices.')
|
||||
command = ' '.join([self.busybox, command])
|
||||
if background:
|
||||
self.logger.debug("Attempt to execute in background. Not supported "
|
||||
"in gem5, hence ignored.")
|
||||
return self.gem5_shell(command, as_root=as_root)
|
||||
|
||||
# Internal methods: do not use outside of the class.
|
||||
|
||||
def _check_ready(self):
|
||||
"""
|
||||
Check if the device is ready.
|
||||
|
||||
As this is gem5, we just assume that the device is ready once we have
|
||||
connected to the gem5 simulation, and updated the prompt.
|
||||
"""
|
||||
if not self._is_ready:
|
||||
raise DeviceError('Device not ready.')
|
||||
|
||||
def gem5_shell(self, command, as_root=False, timeout=None, check_exit_code=True, sync=True): # pylint: disable=R0912
|
||||
"""
|
||||
Execute a command in the gem5 shell
|
||||
|
||||
This wraps the telnet connection to gem5 and processes the raw output.
|
||||
|
||||
This method waits for the shell to return, and then will try and
|
||||
separate the output from the command from the command itself. If this
|
||||
fails, warn, but continue with the potentially wrong output.
|
||||
|
||||
The exit code is also checked by default, and non-zero exit codes will
|
||||
raise a DeviceError.
|
||||
"""
|
||||
conn = self.sckt
|
||||
if sync:
|
||||
self.sync_gem5_shell()
|
||||
|
||||
self.logger.debug("gem5_shell command: {}".format(command))
|
||||
|
||||
# Send the actual command
|
||||
conn.send("{}\n".format(command))
|
||||
|
||||
# Wait for the response. We just sit here and wait for the prompt to
|
||||
# appear, as gem5 might take a long time to provide the output. This
|
||||
# avoids timeout issues.
|
||||
command_index = -1
|
||||
while command_index == -1:
|
||||
if conn.prompt():
|
||||
output = re.sub(r' \r([^\n])', r'\1', conn.before)
|
||||
output = re.sub(r'[\b]', r'', output)
|
||||
# Deal with line wrapping
|
||||
output = re.sub(r'[\r].+?<', r'', output)
|
||||
command_index = output.find(command)
|
||||
|
||||
# If we have -1, then we cannot match the command, but the
|
||||
# prompt has returned. Hence, we have a bit of an issue. We
|
||||
# warn, and return the whole output.
|
||||
if command_index == -1:
|
||||
self.logger.warn("gem5_shell: Unable to match command in "
|
||||
"command output. Expect parsing errors!")
|
||||
command_index = 0
|
||||
|
||||
output = output[command_index + len(command):].strip()
|
||||
|
||||
# It is possible that gem5 will echo the command. Therefore, we need to
|
||||
# remove that too!
|
||||
command_index = output.find(command)
|
||||
if command_index != -1:
|
||||
output = output[command_index + len(command):].strip()
|
||||
|
||||
self.logger.debug("gem5_shell output: {}".format(output))
|
||||
|
||||
# We get a second prompt. Hence, we need to eat one to make sure that we
|
||||
# stay in sync. If we do not do this, we risk getting out of sync for
|
||||
# slower simulations.
|
||||
self.sckt.expect([self.sckt.UNIQUE_PROMPT, self.sckt.PROMPT], timeout=self.delay)
|
||||
|
||||
if check_exit_code:
|
||||
exit_code_text = self.gem5_shell('echo $?', as_root=as_root,
|
||||
timeout=timeout, check_exit_code=False,
|
||||
sync=False)
|
||||
try:
|
||||
exit_code = int(exit_code_text.split()[0])
|
||||
if exit_code:
|
||||
message = 'Got exit code {}\nfrom: {}\nOUTPUT: {}'
|
||||
raise DeviceError(message.format(exit_code, command, output))
|
||||
except (ValueError, IndexError):
|
||||
self.logger.warning('Could not get exit code for "{}",\ngot: "{}"'.format(command, exit_code_text))
|
||||
|
||||
return output
|
||||
|
||||
def gem5_util(self, command):
|
||||
""" Execute a gem5 utility command using the m5 binary on the device """
|
||||
self.gem5_shell('{} {}'.format(self.m5_path, command))
|
||||
|
||||
def sync_gem5_shell(self):
|
||||
"""
|
||||
Synchronise with the gem5 shell.
|
||||
|
||||
Write some unique text to the gem5 device to allow us to synchronise
|
||||
with the shell output. We actually get two prompts so we need to match
|
||||
both of these.
|
||||
"""
|
||||
self.logger.debug("Sending Sync")
|
||||
self.sckt.send("echo \*\*sync\*\*\n")
|
||||
self.sckt.expect(r"\*\*sync\*\*", timeout=self.delay)
|
||||
self.sckt.expect([self.sckt.UNIQUE_PROMPT, self.sckt.PROMPT], timeout=self.delay)
|
||||
self.sckt.expect([self.sckt.UNIQUE_PROMPT, self.sckt.PROMPT], timeout=self.delay)
|
||||
|
||||
def resize_shell(self):
|
||||
"""
|
||||
Resize the shell to avoid line wrapping issues.
|
||||
|
||||
"""
|
||||
# Try and avoid line wrapping as much as possible. Don't check the error
|
||||
# codes from these command because some of them WILL fail.
|
||||
self.gem5_shell('stty columns 1024', check_exit_code=False)
|
||||
self.gem5_shell('{} stty columns 1024'.format(self.busybox), check_exit_code=False)
|
||||
self.gem5_shell('stty cols 1024', check_exit_code=False)
|
||||
self.gem5_shell('{} stty cols 1024'.format(self.busybox), check_exit_code=False)
|
||||
self.gem5_shell('reset', check_exit_code=False)
|
||||
|
||||
def move_to_temp_dir(self, source):
|
||||
"""
|
||||
Move a file to the temporary directory on the host for copying to the
|
||||
gem5 device
|
||||
"""
|
||||
command = "cp {} {}".format(source, self.temp_dir)
|
||||
self.logger.debug("Local copy command: {}".format(command))
|
||||
subprocess.call(command.split())
|
||||
subprocess.call("sync".split())
|
||||
|
||||
def checkpoint_gem5(self, end_simulation=False):
|
||||
""" Checkpoint the gem5 simulation, storing all system state """
|
||||
self.logger.info("Taking a post-boot checkpoint")
|
||||
self.gem5_util("checkpoint")
|
||||
if end_simulation:
|
||||
self.disconnect()
|
||||
|
||||
def mount_virtio(self):
|
||||
"""
|
||||
Mount the VirtIO device in the simulated system.
|
||||
"""
|
||||
self.logger.info("Mounting VirtIO device in simulated system")
|
||||
|
||||
self.gem5_shell('mkdir -p /mnt/obb')
|
||||
|
||||
mount_command = "mount -t 9p -o trans=virtio,version=9p2000.L,aname={} gem5 /mnt/obb".format(self.temp_dir)
|
||||
self.gem5_shell(mount_command)
|
||||
|
||||
def deploy_m5(self, context, force=False):
|
||||
"""
|
||||
Deploys the m5 binary to the device and returns the path to the binary
|
||||
on the device.
|
||||
|
||||
:param force: by default, if the binary is already present on the
|
||||
device, it will not be deployed again. Setting force to
|
||||
``True`` overrides that behaviour and ensures that the
|
||||
binary is always copied. Defaults to ``False``.
|
||||
|
||||
:returns: The on-device path to the m5 binary.
|
||||
|
||||
"""
|
||||
on_device_executable = self.path.join(self.binaries_directory, 'm5')
|
||||
if not force and self.file_exists(on_device_executable):
|
||||
# We want to check the version of the binary. We cannot directly
|
||||
# check this because the m5 binary itself is unversioned. We also
|
||||
# need to make sure not to check the error code as "m5 --help"
|
||||
# returns a non-zero error code.
|
||||
output = self.gem5_shell('m5 --help', check_exit_code=False)
|
||||
if "writefile" in output:
|
||||
self.logger.debug("Using the m5 binary on the device...")
|
||||
self.m5_path = on_device_executable
|
||||
return on_device_executable
|
||||
else:
|
||||
self.logger.debug("m5 on device does not support writefile!")
|
||||
host_file = context.resolver.get(Executable(NO_ONE, self.abi, 'm5'))
|
||||
self.logger.info("Installing the m5 binary to the device...")
|
||||
self.m5_path = self.install(host_file)
|
||||
return self.m5_path
|
@@ -1,16 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
@@ -1,875 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# pylint: disable=E1101
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import socket
|
||||
from collections import namedtuple
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
from wlauto.core.extension import Parameter
|
||||
from wlauto.core.device import Device, RuntimeParameter, CoreParameter
|
||||
from wlauto.core.resource import NO_ONE
|
||||
from wlauto.exceptions import ConfigError, DeviceError, TimeoutError, DeviceNotRespondingError
|
||||
from wlauto.common.resources import Executable
|
||||
from wlauto.utils.cpuinfo import Cpuinfo
|
||||
from wlauto.utils.misc import convert_new_lines, escape_double_quotes, ranges_to_list, ABI_MAP
|
||||
from wlauto.utils.misc import isiterable, list_to_mask
|
||||
from wlauto.utils.ssh import SshShell
|
||||
from wlauto.utils.types import boolean, list_of_strings
|
||||
|
||||
|
||||
FSTAB_ENTRY_REGEX = re.compile(r'(\S+) on (\S+) type (\S+) \((\S+)\)')
|
||||
|
||||
FstabEntry = namedtuple('FstabEntry', ['device', 'mount_point', 'fs_type', 'options', 'dump_freq', 'pass_num'])
|
||||
PsEntry = namedtuple('PsEntry', 'user pid ppid vsize rss wchan pc state name')
|
||||
LsmodEntry = namedtuple('LsmodEntry', ['name', 'size', 'use_count', 'used_by'])
|
||||
|
||||
|
||||
class BaseLinuxDevice(Device): # pylint: disable=abstract-method
|
||||
|
||||
path_module = 'posixpath'
|
||||
has_gpu = True
|
||||
|
||||
parameters = [
|
||||
Parameter('scheduler', kind=str, default='unknown',
|
||||
allowed_values=['unknown', 'smp', 'hmp', 'iks', 'ea', 'other'],
|
||||
description="""
|
||||
Specifies the type of multi-core scheduling model utilized in the device. The value
|
||||
must be one of the following:
|
||||
|
||||
:unknown: A generic Device interface is used to interact with the underlying device
|
||||
and the underlying scheduling model is unkown.
|
||||
:smp: A standard single-core or Symmetric Multi-Processing system.
|
||||
:hmp: ARM Heterogeneous Multi-Processing system.
|
||||
:iks: Linaro In-Kernel Switcher.
|
||||
:ea: ARM Energy-Aware scheduler.
|
||||
:other: Any other system not covered by the above.
|
||||
|
||||
.. note:: most currently-available systems would fall under ``smp`` rather than
|
||||
this value. ``other`` is there to future-proof against new schemes
|
||||
not yet covered by WA.
|
||||
|
||||
"""),
|
||||
Parameter('iks_switch_frequency', kind=int, default=None,
|
||||
description="""
|
||||
This is the switching frequency, in kilohertz, of IKS devices. This parameter *MUST NOT*
|
||||
be set for non-IKS device (i.e. ``scheduler != 'iks'``). If left unset for IKS devices,
|
||||
it will default to ``800000``, i.e. 800MHz.
|
||||
"""),
|
||||
Parameter('property_files', kind=list_of_strings,
|
||||
default=[
|
||||
'/etc/arch-release',
|
||||
'/etc/debian_version',
|
||||
'/etc/lsb-release',
|
||||
'/proc/config.gz',
|
||||
'/proc/cmdline',
|
||||
'/proc/cpuinfo',
|
||||
'/proc/version',
|
||||
'/proc/zconfig',
|
||||
'/sys/kernel/debug/sched_features',
|
||||
'/sys/kernel/hmp',
|
||||
],
|
||||
description='''
|
||||
A list of paths to files containing static OS properties. These will be pulled into the
|
||||
__meta directory in output for each run in order to provide information about the platfrom.
|
||||
These paths do not have to exist and will be ignored if the path is not present on a
|
||||
particular device.
|
||||
'''),
|
||||
Parameter('binaries_directory',
|
||||
description='Location of executable binaries on this device (must be in PATH).'),
|
||||
|
||||
]
|
||||
|
||||
runtime_parameters = [
|
||||
RuntimeParameter('sysfile_values', 'get_sysfile_values', 'set_sysfile_values', value_name='params'),
|
||||
CoreParameter('${core}_cores', 'get_number_of_online_cpus', 'set_number_of_online_cpus',
|
||||
value_name='number'),
|
||||
CoreParameter('${core}_min_frequency', 'get_core_min_frequency', 'set_core_min_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_max_frequency', 'get_core_max_frequency', 'set_core_max_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_frequency', 'get_core_cur_frequency', 'set_core_cur_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_governor', 'get_core_governor', 'set_core_governor',
|
||||
value_name='governor'),
|
||||
CoreParameter('${core}_governor_tunables', 'get_core_governor_tunables', 'set_core_governor_tunables',
|
||||
value_name='tunables'),
|
||||
]
|
||||
|
||||
dynamic_modules = [
|
||||
'devcpufreq',
|
||||
'cpuidle',
|
||||
]
|
||||
|
||||
@property
|
||||
def abi(self):
|
||||
if not self._abi:
|
||||
val = self.execute('uname -m').strip()
|
||||
for abi, architectures in ABI_MAP.iteritems():
|
||||
if val in architectures:
|
||||
self._abi = abi
|
||||
break
|
||||
else:
|
||||
self._abi = val
|
||||
return self._abi
|
||||
|
||||
@property
|
||||
def online_cpus(self):
|
||||
val = self.get_sysfile_value('/sys/devices/system/cpu/online')
|
||||
return ranges_to_list(val)
|
||||
|
||||
@property
|
||||
def number_of_cores(self):
|
||||
"""
|
||||
Added in version 2.1.4.
|
||||
|
||||
"""
|
||||
if self._number_of_cores is None:
|
||||
corere = re.compile(r'^\s*cpu\d+\s*$')
|
||||
output = self.execute('ls /sys/devices/system/cpu')
|
||||
self._number_of_cores = 0
|
||||
for entry in output.split():
|
||||
if corere.match(entry):
|
||||
self._number_of_cores += 1
|
||||
return self._number_of_cores
|
||||
|
||||
@property
|
||||
def resource_cache(self):
|
||||
return self.path.join(self.working_directory, '.cache')
|
||||
|
||||
@property
|
||||
def file_transfer_cache(self):
|
||||
return self.path.join(self.working_directory, '.transfer')
|
||||
|
||||
@property
|
||||
def cpuinfo(self):
|
||||
if not self._cpuinfo:
|
||||
self._cpuinfo = Cpuinfo(self.execute('cat /proc/cpuinfo'))
|
||||
return self._cpuinfo
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(BaseLinuxDevice, self).__init__(**kwargs)
|
||||
self.busybox = None
|
||||
self._is_initialized = False
|
||||
self._is_ready = False
|
||||
self._just_rebooted = False
|
||||
self._is_rooted = None
|
||||
self._is_root_user = False
|
||||
self._available_frequencies = {}
|
||||
self._available_governors = {}
|
||||
self._available_governor_tunables = {}
|
||||
self._number_of_cores = None
|
||||
self._written_sysfiles = []
|
||||
self._cpuinfo = None
|
||||
self._abi = None
|
||||
|
||||
def validate(self):
|
||||
if self.iks_switch_frequency is not None and self.scheduler != 'iks': # pylint: disable=E0203
|
||||
raise ConfigError('iks_switch_frequency must NOT be set for non-IKS devices.')
|
||||
if self.iks_switch_frequency is None and self.scheduler == 'iks': # pylint: disable=E0203
|
||||
self.iks_switch_frequency = 800000 # pylint: disable=W0201
|
||||
|
||||
def initialize(self, context):
|
||||
self.execute('mkdir -p {}'.format(self.working_directory))
|
||||
if not self.binaries_directory:
|
||||
self._set_binaries_dir()
|
||||
self.execute('mkdir -p {}'.format(self.binaries_directory))
|
||||
self.busybox = self.deploy_busybox(context)
|
||||
|
||||
def _set_binaries_dir(self):
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
self.binaries_directory = self.path.join(self.working_directory, "bin")
|
||||
|
||||
def is_file(self, filepath):
|
||||
output = self.execute('if [ -f \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath))
|
||||
# output from ssh my contain part of the expression in the buffer,
|
||||
# split out everything except the last word.
|
||||
return boolean(output.split()[-1]) # pylint: disable=maybe-no-member
|
||||
|
||||
def is_directory(self, filepath):
|
||||
output = self.execute('if [ -d \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath))
|
||||
# output from ssh my contain part of the expression in the buffer,
|
||||
# split out everything except the last word.
|
||||
return boolean(output.split()[-1]) # pylint: disable=maybe-no-member
|
||||
|
||||
def get_properties(self, context):
|
||||
for propfile in self.property_files:
|
||||
try:
|
||||
normname = propfile.lstrip(self.path.sep).replace(self.path.sep, '.')
|
||||
outfile = os.path.join(context.host_working_directory, normname)
|
||||
if self.is_file(propfile):
|
||||
with open(outfile, 'w') as wfh:
|
||||
wfh.write(self.execute('cat {}'.format(propfile)))
|
||||
elif self.is_directory(propfile):
|
||||
self.pull_file(propfile, outfile)
|
||||
else:
|
||||
continue
|
||||
except DeviceError:
|
||||
# We pull these files "opportunistically", so if a pull fails
|
||||
# (e.g. we don't have permissions to read the file), just note
|
||||
# it quietly (not as an error/warning) and move on.
|
||||
self.logger.debug('Could not pull property file "{}"'.format(propfile))
|
||||
return {}
|
||||
|
||||
def get_sysfile_value(self, sysfile, kind=None):
|
||||
"""
|
||||
Get the contents of the specified sysfile.
|
||||
|
||||
:param sysfile: The file who's contents will be returned.
|
||||
|
||||
:param kind: The type of value to be expected in the sysfile. This can
|
||||
be any Python callable that takes a single str argument.
|
||||
If not specified or is None, the contents will be returned
|
||||
as a string.
|
||||
|
||||
"""
|
||||
output = self.execute('cat \'{}\''.format(sysfile), as_root=self.is_rooted).strip() # pylint: disable=E1103
|
||||
if kind:
|
||||
return kind(output)
|
||||
else:
|
||||
return output
|
||||
|
||||
def set_sysfile_value(self, sysfile, value, verify=True):
|
||||
"""
|
||||
Set the value of the specified sysfile. By default, the value will be checked afterwards.
|
||||
Can be overridden by setting ``verify`` parameter to ``False``.
|
||||
|
||||
"""
|
||||
value = str(value)
|
||||
self.execute('echo {} > \'{}\''.format(value, sysfile), check_exit_code=False, as_root=True)
|
||||
if verify:
|
||||
output = self.get_sysfile_value(sysfile)
|
||||
if output.strip() != value: # pylint: disable=E1103
|
||||
message = 'Could not set the value of {} to {}'.format(sysfile, value)
|
||||
raise DeviceError(message)
|
||||
self._written_sysfiles.append(sysfile)
|
||||
|
||||
def get_sysfile_values(self):
|
||||
"""
|
||||
Returns a dict mapping paths of sysfiles that were previously set to their
|
||||
current values.
|
||||
|
||||
"""
|
||||
values = {}
|
||||
for sysfile in self._written_sysfiles:
|
||||
values[sysfile] = self.get_sysfile_value(sysfile)
|
||||
return values
|
||||
|
||||
def set_sysfile_values(self, params):
|
||||
"""
|
||||
The plural version of ``set_sysfile_value``. Takes a single parameter which is a mapping of
|
||||
file paths to values to be set. By default, every value written will be verified. The can
|
||||
be disabled for individual paths by appending ``'!'`` to them.
|
||||
|
||||
"""
|
||||
for sysfile, value in params.iteritems():
|
||||
verify = not sysfile.endswith('!')
|
||||
sysfile = sysfile.rstrip('!')
|
||||
self.set_sysfile_value(sysfile, value, verify=verify)
|
||||
|
||||
def deploy_busybox(self, context, force=False):
|
||||
"""
|
||||
Deploys the busybox binary to the specified device and returns
|
||||
the path to the binary on the device.
|
||||
|
||||
:param context: an instance of ExecutionContext
|
||||
:param force: by default, if the binary is already present on the
|
||||
device, it will not be deployed again. Setting force
|
||||
to ``True`` overrides that behavior and ensures that the
|
||||
binary is always copied. Defaults to ``False``.
|
||||
|
||||
:returns: The on-device path to the busybox binary.
|
||||
|
||||
"""
|
||||
on_device_executable = self.get_binary_path("busybox", search_system_binaries=False)
|
||||
if force or not on_device_executable:
|
||||
host_file = context.resolver.get(Executable(NO_ONE, self.abi, 'busybox'))
|
||||
return self.install(host_file)
|
||||
return on_device_executable
|
||||
|
||||
def is_installed(self, name): # pylint: disable=unused-argument,no-self-use
|
||||
raise AttributeError("""Instead of using is_installed, please use
|
||||
``get_binary_path`` or ``install_if_needed`` instead. You should
|
||||
use the path returned by these functions to then invoke the binary
|
||||
|
||||
please see: https://pythonhosted.org/wlauto/writing_extensions.html""")
|
||||
|
||||
def get_binary_path(self, name, search_system_binaries=True):
|
||||
"""
|
||||
Searches the devices ``binary_directory`` for the given binary,
|
||||
if it cant find it there it tries using which to find it.
|
||||
|
||||
:param name: The name of the binary
|
||||
:param search_system_binaries: By default this function will try using
|
||||
which to find the binary if it isn't in
|
||||
``binary_directory``. When this is set
|
||||
to ``False`` it will not try this.
|
||||
|
||||
:returns: The on-device path to the binary.
|
||||
|
||||
"""
|
||||
wa_binary = self.path.join(self.binaries_directory, name)
|
||||
if self.file_exists(wa_binary):
|
||||
return wa_binary
|
||||
if search_system_binaries:
|
||||
try:
|
||||
return self.execute('{} which {}'.format(self.busybox, name)).strip()
|
||||
except DeviceError:
|
||||
pass
|
||||
return None
|
||||
|
||||
def install_if_needed(self, host_path, search_system_binaries=True):
|
||||
"""
|
||||
Similar to get_binary_path but will install the binary if not found.
|
||||
|
||||
:param host_path: The path to the binary on the host
|
||||
:param search_system_binaries: By default this function will try using
|
||||
which to find the binary if it isn't in
|
||||
``binary_directory``. When this is set
|
||||
to ``False`` it will not try this.
|
||||
|
||||
:returns: The on-device path to the binary.
|
||||
|
||||
"""
|
||||
binary_path = self.get_binary_path(os.path.split(host_path)[1],
|
||||
search_system_binaries=search_system_binaries)
|
||||
if not binary_path:
|
||||
binary_path = self.install(host_path)
|
||||
return binary_path
|
||||
|
||||
def list_file_systems(self):
|
||||
output = self.execute('mount')
|
||||
fstab = []
|
||||
for line in output.split('\n'):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
match = FSTAB_ENTRY_REGEX.search(line)
|
||||
if match:
|
||||
fstab.append(FstabEntry(match.group(1), match.group(2),
|
||||
match.group(3), match.group(4),
|
||||
None, None))
|
||||
else: # assume pre-M Android
|
||||
fstab.append(FstabEntry(*line.split()))
|
||||
return fstab
|
||||
|
||||
# Process query and control
|
||||
|
||||
def get_pids_of(self, process_name):
|
||||
raise NotImplementedError()
|
||||
|
||||
def ps(self, **kwargs):
|
||||
raise NotImplementedError()
|
||||
|
||||
def kill(self, pid, signal=None, as_root=False): # pylint: disable=W0221
|
||||
"""
|
||||
Kill the specified process.
|
||||
|
||||
:param pid: PID of the process to kill.
|
||||
:param signal: Specify which singal to send to the process. This must
|
||||
be a valid value for -s option of kill. Defaults to ``None``.
|
||||
|
||||
Modified in version 2.1.4: added ``signal`` parameter.
|
||||
|
||||
"""
|
||||
signal_string = '-s {}'.format(signal) if signal else ''
|
||||
self.execute('kill {} {}'.format(signal_string, pid), as_root=as_root)
|
||||
|
||||
def killall(self, process_name, signal=None, as_root=False): # pylint: disable=W0221
|
||||
"""
|
||||
Kill all processes with the specified name.
|
||||
|
||||
:param process_name: The name of the process(es) to kill.
|
||||
:param signal: Specify which singal to send to the process. This must
|
||||
be a valid value for -s option of kill. Defaults to ``None``.
|
||||
|
||||
Modified in version 2.1.5: added ``as_root`` parameter.
|
||||
|
||||
"""
|
||||
for pid in self.get_pids_of(process_name):
|
||||
self.kill(pid, signal=signal, as_root=as_root)
|
||||
|
||||
def get_online_cpus(self, c):
|
||||
if isinstance(c, int): # assume c == cluster
|
||||
return [i for i in self.online_cpus if self.core_clusters[i] == c]
|
||||
elif isinstance(c, basestring): # assume c == core
|
||||
return [i for i in self.online_cpus if self.core_names[i] == c]
|
||||
else:
|
||||
raise ValueError(c)
|
||||
|
||||
def get_number_of_online_cpus(self, c):
|
||||
return len(self.get_online_cpus(c))
|
||||
|
||||
def set_number_of_online_cpus(self, core, number):
|
||||
core_ids = [i for i, c in enumerate(self.core_names) if c == core]
|
||||
max_cores = len(core_ids)
|
||||
if number > max_cores:
|
||||
message = 'Attempting to set the number of active {} to {}; maximum is {}'
|
||||
raise ValueError(message.format(core, number, max_cores))
|
||||
for i in xrange(0, number):
|
||||
self.enable_cpu(core_ids[i])
|
||||
for i in xrange(number, max_cores):
|
||||
self.disable_cpu(core_ids[i])
|
||||
|
||||
# hotplug
|
||||
|
||||
def enable_cpu(self, cpu):
|
||||
"""
|
||||
Enable the specified core.
|
||||
|
||||
:param cpu: CPU core to enable. This must be the full name as it
|
||||
appears in sysfs, e.g. "cpu0".
|
||||
|
||||
"""
|
||||
self.hotplug_cpu(cpu, online=True)
|
||||
|
||||
def disable_cpu(self, cpu):
|
||||
"""
|
||||
Disable the specified core.
|
||||
|
||||
:param cpu: CPU core to disable. This must be the full name as it
|
||||
appears in sysfs, e.g. "cpu0".
|
||||
"""
|
||||
self.hotplug_cpu(cpu, online=False)
|
||||
|
||||
def hotplug_cpu(self, cpu, online):
|
||||
"""
|
||||
Hotplug the specified CPU either on or off.
|
||||
See https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
|
||||
|
||||
:param cpu: The CPU for which the governor is to be set. This must be
|
||||
the full name as it appears in sysfs, e.g. "cpu0".
|
||||
:param online: CPU will be enabled if this value bool()'s to True, and
|
||||
will be disabled otherwise.
|
||||
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
status = 1 if online else 0
|
||||
sysfile = '/sys/devices/system/cpu/{}/online'.format(cpu)
|
||||
self.set_sysfile_value(sysfile, status)
|
||||
|
||||
def get_number_of_active_cores(self, core):
|
||||
if core not in self.core_names:
|
||||
raise ValueError('Unexpected core: {}; must be in {}'.format(core, list(set(self.core_names))))
|
||||
active_cpus = self.active_cpus
|
||||
num_active_cores = 0
|
||||
for i, c in enumerate(self.core_names):
|
||||
if c == core and i in active_cpus:
|
||||
num_active_cores += 1
|
||||
return num_active_cores
|
||||
|
||||
def set_number_of_active_cores(self, core, number): # NOQA
|
||||
if core not in self.core_names:
|
||||
raise ValueError('Unexpected core: {}; must be in {}'.format(core, list(set(self.core_names))))
|
||||
core_ids = [i for i, c in enumerate(self.core_names) if c == core]
|
||||
max_cores = len(core_ids)
|
||||
if number > max_cores:
|
||||
message = 'Attempting to set the number of active {} to {}; maximum is {}'
|
||||
raise ValueError(message.format(core, number, max_cores))
|
||||
|
||||
if not number:
|
||||
# make sure at least one other core is enabled to avoid trying to
|
||||
# hotplug everything.
|
||||
for i, c in enumerate(self.core_names):
|
||||
if c != core:
|
||||
self.enable_cpu(i)
|
||||
break
|
||||
else: # did not find one
|
||||
raise ValueError('Cannot hotplug all cpus on the device!')
|
||||
|
||||
for i in xrange(0, number):
|
||||
self.enable_cpu(core_ids[i])
|
||||
for i in xrange(number, max_cores):
|
||||
self.disable_cpu(core_ids[i])
|
||||
|
||||
def invoke(self, binary, args=None, in_directory=None, on_cpus=None,
|
||||
background=False, as_root=False, timeout=30):
|
||||
"""
|
||||
Executes the specified binary under the specified conditions.
|
||||
|
||||
:binary: binary to execute. Must be present and executable on the device.
|
||||
:args: arguments to be passed to the binary. The can be either a list or
|
||||
a string.
|
||||
:in_directory: execute the binary in the specified directory. This must
|
||||
be an absolute path.
|
||||
:on_cpus: taskset the binary to these CPUs. This may be a single ``int`` (in which
|
||||
case, it will be interpreted as the mask), a list of ``ints``, in which
|
||||
case this will be interpreted as the list of cpus, or string, which
|
||||
will be interpreted as a comma-separated list of cpu ranges, e.g.
|
||||
``"0,4-7"``.
|
||||
:background: If ``True``, a ``subprocess.Popen`` object will be returned straight
|
||||
away. If ``False`` (the default), this will wait for the command to
|
||||
terminate and return the STDOUT output
|
||||
:as_root: Specify whether the command should be run as root
|
||||
:timeout: If the invocation does not terminate within this number of seconds,
|
||||
a ``TimeoutError`` exception will be raised. Set to ``None`` if the
|
||||
invocation should not timeout.
|
||||
|
||||
"""
|
||||
command = binary
|
||||
if args:
|
||||
if isiterable(args):
|
||||
args = ' '.join(args)
|
||||
command = '{} {}'.format(command, args)
|
||||
if on_cpus:
|
||||
if isinstance(on_cpus, basestring):
|
||||
on_cpus = ranges_to_list(on_cpus)
|
||||
if isiterable(on_cpus):
|
||||
on_cpus = list_to_mask(on_cpus) # pylint: disable=redefined-variable-type
|
||||
command = '{} taskset 0x{:x} {}'.format(self.busybox, on_cpus, command)
|
||||
if in_directory:
|
||||
command = 'cd {} && {}'.format(in_directory, command)
|
||||
return self.execute(command, background=background, as_root=as_root, timeout=timeout)
|
||||
|
||||
def get_device_model(self):
|
||||
if self.file_exists("/proc/device-tree/model"):
|
||||
raw_model = self.execute("cat /proc/device-tree/model")
|
||||
return '_'.join(raw_model.split()[:2])
|
||||
# Right now we don't know any other way to get device model
|
||||
# info in linux on arm platforms
|
||||
return None
|
||||
|
||||
# internal methods
|
||||
|
||||
def _check_ready(self):
|
||||
if not self._is_ready:
|
||||
raise AttributeError('Device not ready.')
|
||||
|
||||
def _get_core_cluster(self, core):
|
||||
"""Returns the first cluster that has cores of the specified type. Raises
|
||||
value error if no cluster for the specified type has been found"""
|
||||
core_indexes = [i for i, c in enumerate(self.core_names) if c == core]
|
||||
core_clusters = set(self.core_clusters[i] for i in core_indexes)
|
||||
if not core_clusters:
|
||||
raise ValueError('No cluster found for core {}'.format(core))
|
||||
return sorted(list(core_clusters))[0]
|
||||
|
||||
|
||||
class LinuxDevice(BaseLinuxDevice):
|
||||
|
||||
platform = 'linux'
|
||||
|
||||
default_timeout = 30
|
||||
delay = 2
|
||||
long_delay = 3 * delay
|
||||
ready_timeout = 60
|
||||
|
||||
parameters = [
|
||||
Parameter('host', mandatory=True, description='Host name or IP address for the device.'),
|
||||
Parameter('username', mandatory=True, description='User name for the account on the device.'),
|
||||
Parameter('password', description='Password for the account on the device (for password-based auth).'),
|
||||
Parameter('keyfile', description='Keyfile to be used for key-based authentication.'),
|
||||
Parameter('port', kind=int, default=22, description='SSH port number on the device.'),
|
||||
Parameter('password_prompt', default='[sudo] password',
|
||||
description='Prompt presented by sudo when requesting the password.'),
|
||||
|
||||
Parameter('use_telnet', kind=boolean, default=False,
|
||||
description='Optionally, telnet may be used instead of ssh, though this is discouraged.'),
|
||||
Parameter('boot_timeout', kind=int, default=120,
|
||||
description='How long to try to connect to the device after a reboot.'),
|
||||
|
||||
Parameter('working_directory', default=None,
|
||||
description='''
|
||||
Working directory to be used by WA. This must be in a location where the specified user
|
||||
has write permissions. This will default to /home/<username>/wa (or to /root/wa, if
|
||||
username is 'root').
|
||||
'''),
|
||||
]
|
||||
|
||||
@property
|
||||
def is_rooted(self):
|
||||
if self._is_rooted is None:
|
||||
# First check if the user is root
|
||||
try:
|
||||
self.execute('test $(id -u) = 0')
|
||||
self._is_root_user = True
|
||||
self._is_rooted = True
|
||||
return self._is_rooted
|
||||
except DeviceError:
|
||||
self._is_root_user = False
|
||||
|
||||
# Otherwise, check if the user has sudo rights
|
||||
try:
|
||||
self.execute('ls /', as_root=True)
|
||||
self._is_rooted = True
|
||||
except DeviceError:
|
||||
self._is_rooted = False
|
||||
return self._is_rooted
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(LinuxDevice, self).__init__(*args, **kwargs)
|
||||
self.shell = None
|
||||
self._is_rooted = None
|
||||
|
||||
def validate(self):
|
||||
if self.working_directory is None: # pylint: disable=access-member-before-definition
|
||||
if self.username == 'root':
|
||||
self.working_directory = '/root/wa' # pylint: disable=attribute-defined-outside-init
|
||||
else:
|
||||
self.working_directory = '/home/{}/wa'.format(self.username) # pylint: disable=attribute-defined-outside-init
|
||||
|
||||
def initialize(self, context, *args, **kwargs):
|
||||
self.execute('mkdir -p {}'.format(self.binaries_directory))
|
||||
self.execute('export PATH={}:$PATH'.format(self.binaries_directory))
|
||||
super(LinuxDevice, self).initialize(context, *args, **kwargs)
|
||||
|
||||
# Power control
|
||||
|
||||
def reset(self):
|
||||
self.execute('reboot', as_root=True)
|
||||
self._is_ready = False
|
||||
|
||||
def hard_reset(self):
|
||||
self._is_ready = False
|
||||
|
||||
def boot(self, hard=False, **kwargs):
|
||||
if hard:
|
||||
self.hard_reset()
|
||||
else:
|
||||
self.reset()
|
||||
self.logger.debug('Waiting for device...')
|
||||
start_time = time.time()
|
||||
while (time.time() - start_time) < self.boot_timeout:
|
||||
try:
|
||||
s = socket.create_connection((self.host, self.port), timeout=5)
|
||||
s.close()
|
||||
break
|
||||
except socket.timeout:
|
||||
pass
|
||||
except socket.error:
|
||||
time.sleep(5)
|
||||
else:
|
||||
raise DeviceError('Could not connect to {} after reboot'.format(self.host))
|
||||
|
||||
def connect(self): # NOQA pylint: disable=R0912
|
||||
self.shell = SshShell(password_prompt=self.password_prompt,
|
||||
timeout=self.default_timeout, telnet=self.use_telnet)
|
||||
self.shell.login(self.host, self.username, self.password, self.keyfile, self.port)
|
||||
self._is_ready = True
|
||||
|
||||
def disconnect(self): # NOQA pylint: disable=R0912
|
||||
self.shell.logout()
|
||||
self._is_ready = False
|
||||
|
||||
# Execution
|
||||
|
||||
def has_root(self):
|
||||
try:
|
||||
self.execute('ls /', as_root=True)
|
||||
return True
|
||||
except DeviceError as e:
|
||||
if 'not in the sudoers file' not in e.message:
|
||||
raise e
|
||||
return False
|
||||
|
||||
def execute(self, command, timeout=default_timeout, check_exit_code=True, background=False,
|
||||
as_root=False, strip_colors=True, **kwargs):
|
||||
"""
|
||||
Execute the specified command on the device using adb.
|
||||
|
||||
Parameters:
|
||||
|
||||
:param command: The command to be executed. It should appear exactly
|
||||
as if you were typing it into a shell.
|
||||
:param timeout: Time, in seconds, to wait for adb to return before aborting
|
||||
and raising an error. Defaults to ``AndroidDevice.default_timeout``.
|
||||
:param check_exit_code: If ``True``, the return code of the command on the Device will
|
||||
be check and exception will be raised if it is not 0.
|
||||
Defaults to ``True``.
|
||||
:param background: If ``True``, will execute create a new ssh shell rather than using
|
||||
the default session and will return it immediately. If this is ``True``,
|
||||
``timeout``, ``strip_colors`` and (obvisously) ``check_exit_code`` will
|
||||
be ignored; also, with this, ``as_root=True`` is only valid if ``username``
|
||||
for the device was set to ``root``.
|
||||
:param as_root: If ``True``, will attempt to execute command in privileged mode. The device
|
||||
must be rooted, otherwise an error will be raised. Defaults to ``False``.
|
||||
|
||||
Added in version 2.1.3
|
||||
|
||||
:returns: If ``background`` parameter is set to ``True``, the subprocess object will
|
||||
be returned; otherwise, the contents of STDOUT from the device will be returned.
|
||||
|
||||
"""
|
||||
self._check_ready()
|
||||
try:
|
||||
if background:
|
||||
if as_root and self.username != 'root':
|
||||
raise DeviceError('Cannot execute in background with as_root=True unless user is root.')
|
||||
return self.shell.background(command)
|
||||
else:
|
||||
# If we're already the root user, don't bother with sudo
|
||||
if self._is_root_user:
|
||||
as_root = False
|
||||
return self.shell.execute(command, timeout, check_exit_code, as_root, strip_colors)
|
||||
except CalledProcessError as e:
|
||||
raise DeviceError(e)
|
||||
|
||||
def kick_off(self, command, as_root=False):
|
||||
"""
|
||||
Like execute but closes adb session and returns immediately, leaving the command running on the
|
||||
device (this is different from execute(background=True) which keeps adb connection open and returns
|
||||
a subprocess object).
|
||||
|
||||
"""
|
||||
self._check_ready()
|
||||
command = 'sh -c "{}" 1>/dev/null 2>/dev/null &'.format(escape_double_quotes(command))
|
||||
return self.shell.execute(command, as_root=as_root)
|
||||
|
||||
def get_pids_of(self, process_name):
|
||||
"""Returns a list of PIDs of all processes with the specified name."""
|
||||
# result should be a column of PIDs with the first row as "PID" header
|
||||
result = self.execute('ps -C {} -o pid'.format(process_name), # NOQA
|
||||
check_exit_code=False).strip().split()
|
||||
if len(result) >= 2: # at least one row besides the header
|
||||
return map(int, result[1:])
|
||||
else:
|
||||
return []
|
||||
|
||||
def ps(self, **kwargs):
|
||||
command = 'ps -eo user,pid,ppid,vsize,rss,wchan,pcpu,state,fname'
|
||||
lines = iter(convert_new_lines(self.execute(command)).split('\n'))
|
||||
lines.next() # header
|
||||
|
||||
result = []
|
||||
for line in lines:
|
||||
parts = re.split(r'\s+', line, maxsplit=8)
|
||||
if parts:
|
||||
result.append(PsEntry(*(parts[0:1] + map(int, parts[1:5]) + parts[5:])))
|
||||
|
||||
if not kwargs:
|
||||
return result
|
||||
else:
|
||||
filtered_result = []
|
||||
for entry in result:
|
||||
if all(getattr(entry, k) == v for k, v in kwargs.iteritems()):
|
||||
filtered_result.append(entry)
|
||||
return filtered_result
|
||||
|
||||
# File management
|
||||
|
||||
def push_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
|
||||
self._check_ready()
|
||||
try:
|
||||
if not as_root or self.username == 'root':
|
||||
self.shell.push_file(source, dest, timeout=timeout)
|
||||
else:
|
||||
tempfile = self.path.join(self.working_directory, self.path.basename(dest))
|
||||
self.shell.push_file(source, tempfile, timeout=timeout)
|
||||
self.shell.execute('cp -r {} {}'.format(tempfile, dest), timeout=timeout, as_root=True)
|
||||
except CalledProcessError as e:
|
||||
raise DeviceError(e)
|
||||
|
||||
def pull_file(self, source, dest, as_root=False, timeout=default_timeout): # pylint: disable=W0221
|
||||
self._check_ready()
|
||||
try:
|
||||
if not as_root or self.username == 'root':
|
||||
self.shell.pull_file(source, dest, timeout=timeout)
|
||||
else:
|
||||
tempfile = self.path.join(self.working_directory, self.path.basename(source))
|
||||
self.shell.execute('cp -r {} {}'.format(source, tempfile), timeout=timeout, as_root=True)
|
||||
self.shell.execute('chown -R {} {}'.format(self.username, tempfile), timeout=timeout, as_root=True)
|
||||
self.shell.pull_file(tempfile, dest, timeout=timeout)
|
||||
except CalledProcessError as e:
|
||||
raise DeviceError(e)
|
||||
|
||||
def delete_file(self, filepath, as_root=False): # pylint: disable=W0221
|
||||
self.execute('rm -rf {}'.format(filepath), as_root=as_root)
|
||||
|
||||
def file_exists(self, filepath):
|
||||
output = self.execute('if [ -e \'{}\' ]; then echo 1; else echo 0; fi'.format(filepath))
|
||||
# output from ssh my contain part of the expression in the buffer,
|
||||
# split out everything except the last word.
|
||||
return boolean(output.split()[-1]) # pylint: disable=maybe-no-member
|
||||
|
||||
def listdir(self, path, as_root=False, **kwargs):
|
||||
contents = self.execute('ls -1 {}'.format(path), as_root=as_root).strip()
|
||||
if not contents:
|
||||
return []
|
||||
return [x.strip() for x in contents.split('\n')] # pylint: disable=maybe-no-member
|
||||
|
||||
def install(self, filepath, timeout=default_timeout, with_name=None): # pylint: disable=W0221
|
||||
destpath = self.path.join(self.binaries_directory,
|
||||
with_name or self.path.basename(filepath))
|
||||
self.push_file(filepath, destpath, as_root=True)
|
||||
self.execute('chmod a+x {}'.format(destpath), timeout=timeout, as_root=True)
|
||||
return destpath
|
||||
|
||||
install_executable = install # compatibility
|
||||
|
||||
def uninstall(self, executable_name):
|
||||
on_device_executable = self.get_binary_path(executable_name, search_system_binaries=False)
|
||||
if not on_device_executable:
|
||||
raise DeviceError("Could not uninstall {}, binary not found".format(on_device_executable))
|
||||
self.delete_file(on_device_executable, as_root=self.is_rooted)
|
||||
|
||||
uninstall_executable = uninstall # compatibility
|
||||
|
||||
# misc
|
||||
|
||||
def lsmod(self):
|
||||
"""List loaded kernel modules."""
|
||||
lines = self.execute('lsmod').splitlines()
|
||||
entries = []
|
||||
for line in lines[1:]: # first line is the header
|
||||
if not line.strip():
|
||||
continue
|
||||
parts = line.split()
|
||||
name = parts[0]
|
||||
size = int(parts[1])
|
||||
use_count = int(parts[2])
|
||||
if len(parts) > 3:
|
||||
used_by = ''.join(parts[3:]).split(',')
|
||||
else:
|
||||
used_by = []
|
||||
entries.append(LsmodEntry(name, size, use_count, used_by))
|
||||
return entries
|
||||
|
||||
def insmod(self, path):
|
||||
"""Install a kernel module located on the host on the target device."""
|
||||
target_path = self.path.join(self.working_directory, os.path.basename(path))
|
||||
self.push_file(path, target_path)
|
||||
self.execute('insmod {}'.format(target_path), as_root=True)
|
||||
|
||||
def ping(self):
|
||||
try:
|
||||
# May be triggered inside initialize()
|
||||
self.shell.execute('ls /', timeout=5)
|
||||
except (TimeoutError, CalledProcessError):
|
||||
raise DeviceNotRespondingError(self.host)
|
||||
|
||||
def capture_screen(self, filepath):
|
||||
if not self.get_binary_path('scrot'):
|
||||
self.logger.debug('Could not take screenshot as scrot is not installed.')
|
||||
return
|
||||
try:
|
||||
tempfile = self.path.join(self.working_directory, os.path.basename(filepath))
|
||||
self.execute('DISPLAY=:0.0 scrot {}'.format(tempfile))
|
||||
self.pull_file(tempfile, filepath)
|
||||
self.delete_file(tempfile)
|
||||
except DeviceError as e:
|
||||
if "Can't open X dispay." not in e.message:
|
||||
raise e
|
||||
message = e.message.split('OUTPUT:', 1)[1].strip()
|
||||
self.logger.debug('Could not take screenshot: {}'.format(message))
|
||||
|
||||
def is_screen_on(self):
|
||||
pass # TODO
|
||||
|
||||
def ensure_screen_is_on(self):
|
||||
pass # TODO
|
@@ -53,14 +53,13 @@ sys.path.insert(0, os.path.join(_this_dir, '..', 'external'))
|
||||
|
||||
#pylint: disable=C0326
|
||||
_EXTENSION_TYPE_TABLE = [
|
||||
# name, class, default package, default path
|
||||
('command', 'wlauto.core.command.Command', 'wlauto.commands', 'commands'),
|
||||
('device', 'wlauto.core.device.Device', 'wlauto.devices', 'devices'),
|
||||
('instrument', 'wlauto.core.instrumentation.Instrument', 'wlauto.instrumentation', 'instruments'),
|
||||
('module', 'wlauto.core.extension.Module', 'wlauto.modules', 'modules'),
|
||||
('resource_getter', 'wlauto.core.resource.ResourceGetter', 'wlauto.resource_getters', 'resource_getters'),
|
||||
('result_processor', 'wlauto.core.result.ResultProcessor', 'wlauto.result_processors', 'result_processors'),
|
||||
('workload', 'wlauto.core.workload.Workload', 'wlauto.workloads', 'workloads'),
|
||||
# name, class, default package, default path
|
||||
('command', 'wlauto.core.command.Command', 'wlauto.commands', 'commands'),
|
||||
('device_manager', 'wlauto.core.device_manager.DeviceManager', 'wlauto.managers', 'managers'),
|
||||
('instrument', 'wlauto.core.instrumentation.Instrument', 'wlauto.instrumentation', 'instruments'),
|
||||
('resource_getter', 'wlauto.core.resource.ResourceGetter', 'wlauto.resource_getters', 'resource_getters'),
|
||||
('result_processor', 'wlauto.core.result.ResultProcessor', 'wlauto.result_processors', 'result_processors'),
|
||||
('workload', 'wlauto.core.workload.Workload', 'wlauto.workloads', 'workloads'),
|
||||
]
|
||||
_Extension = namedtuple('_Extension', 'name, cls, default_package, default_path')
|
||||
_extensions = [_Extension._make(ext) for ext in _EXTENSION_TYPE_TABLE] # pylint: disable=W0212
|
||||
@@ -211,4 +210,3 @@ if os.path.isfile(_packages_file):
|
||||
|
||||
for config in _env_configs:
|
||||
settings.update(config)
|
||||
|
||||
|
@@ -243,6 +243,13 @@ class RebootPolicy(object):
|
||||
else:
|
||||
return cmp(self.policy, other)
|
||||
|
||||
def to_pod(self):
|
||||
return self.policy
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
return RebootPolicy(pod)
|
||||
|
||||
|
||||
class RunConfigurationItem(object):
|
||||
"""
|
||||
|
@@ -1,449 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
"""
|
||||
Base classes for device interfaces.
|
||||
|
||||
:Device: The base class for all devices. This defines the interface that must be
|
||||
implemented by all devices and therefore any workload and instrumentation
|
||||
can always rely on.
|
||||
:AndroidDevice: Implements most of the :class:`Device` interface, and extends it
|
||||
with a number of Android-specific methods.
|
||||
:BigLittleDevice: Subclasses :class:`AndroidDevice` to implement big.LITTLE-specific
|
||||
runtime parameters.
|
||||
:SimpleMulticoreDevice: Subclasses :class:`AndroidDevice` to implement homogeneous cores
|
||||
device runtime parameters.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import imp
|
||||
import string
|
||||
from collections import OrderedDict
|
||||
from contextlib import contextmanager
|
||||
|
||||
from wlauto.core.extension import Extension, ExtensionMeta, AttributeCollection, Parameter
|
||||
from wlauto.core.extension_loader import ExtensionLoader
|
||||
from wlauto.exceptions import DeviceError, ConfigError
|
||||
from wlauto.utils.types import list_of_integers, list_of, caseless_string
|
||||
|
||||
|
||||
__all__ = ['RuntimeParameter', 'CoreParameter', 'Device', 'DeviceMeta']
|
||||
|
||||
|
||||
class RuntimeParameter(object):
|
||||
"""
|
||||
A runtime parameter which has its getter and setter methods associated it
|
||||
with it.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, name, getter, setter,
|
||||
getter_args=None, setter_args=None,
|
||||
value_name='value', override=False):
|
||||
"""
|
||||
:param name: the name of the parameter.
|
||||
:param getter: the getter method which returns the value of this parameter.
|
||||
:param setter: the setter method which sets the value of this parameter. The setter
|
||||
always expects to be passed one argument when it is called.
|
||||
:param getter_args: keyword arguments to be used when invoking the getter.
|
||||
:param setter_args: keyword arguments to be used when invoking the setter.
|
||||
:param override: A ``bool`` that specifies whether a parameter of the same name further up the
|
||||
hierarchy should be overridden. If this is ``False`` (the default), an exception
|
||||
will be raised by the ``AttributeCollection`` instead.
|
||||
|
||||
"""
|
||||
self.name = name
|
||||
self.getter = getter
|
||||
self.setter = setter
|
||||
self.getter_args = getter_args or {}
|
||||
self.setter_args = setter_args or {}
|
||||
self.value_name = value_name
|
||||
self.override = override
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
class CoreParameter(RuntimeParameter):
|
||||
"""A runtime parameter that will get expanded into a RuntimeParameter for each core type."""
|
||||
|
||||
def get_runtime_parameters(self, core_names):
|
||||
params = []
|
||||
for core in set(core_names):
|
||||
name = string.Template(self.name).substitute(core=core)
|
||||
getter = string.Template(self.getter).substitute(core=core)
|
||||
setter = string.Template(self.setter).substitute(core=core)
|
||||
getargs = dict(self.getter_args.items() + [('core', core)])
|
||||
setargs = dict(self.setter_args.items() + [('core', core)])
|
||||
params.append(RuntimeParameter(name, getter, setter, getargs, setargs, self.value_name, self.override))
|
||||
return params
|
||||
|
||||
|
||||
class DynamicModuleSpec(dict):
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.keys()[0]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
dict.__init__(self)
|
||||
if args:
|
||||
if len(args) > 1:
|
||||
raise ValueError(args)
|
||||
value = args[0]
|
||||
else:
|
||||
value = kwargs
|
||||
if isinstance(value, basestring):
|
||||
self[value] = {}
|
||||
elif isinstance(value, dict) and len(value) == 1:
|
||||
for k, v in value.iteritems():
|
||||
self[k] = v
|
||||
else:
|
||||
raise ValueError(value)
|
||||
|
||||
|
||||
class DeviceMeta(ExtensionMeta):
|
||||
|
||||
to_propagate = ExtensionMeta.to_propagate + [
|
||||
('runtime_parameters', RuntimeParameter, AttributeCollection),
|
||||
('dynamic_modules', DynamicModuleSpec, AttributeCollection),
|
||||
]
|
||||
|
||||
|
||||
class Device(Extension):
|
||||
"""
|
||||
Base class for all devices supported by Workload Automation. Defines
|
||||
the interface the rest of WA uses to interact with devices.
|
||||
|
||||
:name: Unique name used to identify the device.
|
||||
:platform: The name of the device's platform (e.g. ``Android``) this may
|
||||
be used by workloads and instrumentation to assess whether they
|
||||
can run on the device.
|
||||
:working_directory: a string of the directory which is
|
||||
going to be used by the workloads on the device.
|
||||
:binaries_directory: a string of the binary directory for
|
||||
the device.
|
||||
:has_gpu: Should be ``True`` if the device as a separate GPU, and
|
||||
``False`` if graphics processing is done on a CPU.
|
||||
|
||||
.. note:: Pretty much all devices currently on the market
|
||||
have GPUs, however this may not be the case for some
|
||||
development boards.
|
||||
|
||||
:path_module: The name of one of the modules implementing the os.path
|
||||
interface, e.g. ``posixpath`` or ``ntpath``. You can provide
|
||||
your own implementation rather than relying on one of the
|
||||
standard library modules, in which case you need to specify
|
||||
the *full* path to you module. e.g. '/home/joebloggs/mypathimp.py'
|
||||
:parameters: A list of RuntimeParameter objects. The order of the objects
|
||||
is very important as the setters and getters will be called
|
||||
in the order the RuntimeParameter objects inserted.
|
||||
:active_cores: This should be a list of all the currently active cpus in
|
||||
the device in ``'/sys/devices/system/cpu/online'``. The
|
||||
returned list should be read from the device at the time
|
||||
of read request.
|
||||
|
||||
"""
|
||||
__metaclass__ = DeviceMeta
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', kind=list_of(caseless_string), mandatory=True, default=None,
|
||||
description="""
|
||||
This is a list of all cpu cores on the device with each
|
||||
element being the core type, e.g. ``['a7', 'a7', 'a15']``. The
|
||||
order of the cores must match the order they are listed in
|
||||
``'/sys/devices/system/cpu'``. So in this case, ``'cpu0'`` must
|
||||
be an A7 core, and ``'cpu2'`` an A15.'
|
||||
"""),
|
||||
Parameter('core_clusters', kind=list_of_integers, mandatory=True, default=None,
|
||||
description="""
|
||||
This is a list indicating the cluster affinity of the CPU cores,
|
||||
each element correponding to the cluster ID of the core coresponding
|
||||
to its index. E.g. ``[0, 0, 1]`` indicates that cpu0 and cpu1 are on
|
||||
cluster 0, while cpu2 is on cluster 1. If this is not specified, this
|
||||
will be inferred from ``core_names`` if possible (assuming all cores with
|
||||
the same name are on the same cluster).
|
||||
"""),
|
||||
]
|
||||
|
||||
runtime_parameters = []
|
||||
# dynamic modules are loaded or not based on whether the device supports
|
||||
# them (established at runtime by module probling the device).
|
||||
dynamic_modules = []
|
||||
|
||||
# These must be overwritten by subclasses.
|
||||
name = None
|
||||
platform = None
|
||||
default_working_directory = None
|
||||
has_gpu = None
|
||||
path_module = None
|
||||
active_cores = None
|
||||
|
||||
def __init__(self, **kwargs): # pylint: disable=W0613
|
||||
super(Device, self).__init__(**kwargs)
|
||||
if not self.path_module:
|
||||
raise NotImplementedError('path_module must be specified by the deriving classes.')
|
||||
libpath = os.path.dirname(os.__file__)
|
||||
modpath = os.path.join(libpath, self.path_module)
|
||||
if not modpath.lower().endswith('.py'):
|
||||
modpath += '.py'
|
||||
try:
|
||||
self.path = imp.load_source('device_path', modpath)
|
||||
except IOError:
|
||||
raise DeviceError('Unsupported path module: {}'.format(self.path_module))
|
||||
|
||||
def validate(self):
|
||||
# pylint: disable=access-member-before-definition,attribute-defined-outside-init
|
||||
if self.core_names and not self.core_clusters:
|
||||
self.core_clusters = []
|
||||
clusters = []
|
||||
for cn in self.core_names:
|
||||
if cn not in clusters:
|
||||
clusters.append(cn)
|
||||
self.core_clusters.append(clusters.index(cn))
|
||||
if len(self.core_names) != len(self.core_clusters):
|
||||
raise ConfigError('core_names and core_clusters are of different lengths.')
|
||||
|
||||
def initialize(self, context):
|
||||
"""
|
||||
Initialization that is performed at the begining of the run (after the device has
|
||||
been connecte).
|
||||
|
||||
"""
|
||||
loader = ExtensionLoader()
|
||||
for module_spec in self.dynamic_modules:
|
||||
module = self._load_module(loader, module_spec)
|
||||
if not hasattr(module, 'probe'):
|
||||
message = 'Module {} does not have "probe" attribute; cannot be loaded dynamically'
|
||||
raise ValueError(message.format(module.name))
|
||||
if module.probe(self):
|
||||
self.logger.debug('Installing module "{}"'.format(module.name))
|
||||
self._install_module(module)
|
||||
else:
|
||||
self.logger.debug('Module "{}" is not supported by the device'.format(module.name))
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Initiate rebooting of the device.
|
||||
|
||||
Added in version 2.1.3.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def boot(self, *args, **kwargs):
|
||||
"""
|
||||
Perform the seteps necessary to boot the device to the point where it is ready
|
||||
to accept other commands.
|
||||
|
||||
Changed in version 2.1.3: no longer expected to wait until boot completes.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def connect(self, *args, **kwargs):
|
||||
"""
|
||||
Establish a connection to the device that will be used for subsequent commands.
|
||||
|
||||
Added in version 2.1.3.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def disconnect(self):
|
||||
""" Close the established connection to the device. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def ping(self):
|
||||
"""
|
||||
This must return successfully if the device is able to receive commands, or must
|
||||
raise :class:`wlauto.exceptions.DeviceUnresponsiveError` if the device cannot respond.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_runtime_parameter_names(self):
|
||||
return [p.name for p in self._expand_runtime_parameters()]
|
||||
|
||||
def get_runtime_parameters(self):
|
||||
""" returns the runtime parameters that have been set. """
|
||||
# pylint: disable=cell-var-from-loop
|
||||
runtime_parameters = OrderedDict()
|
||||
for rtp in self._expand_runtime_parameters():
|
||||
if not rtp.getter:
|
||||
continue
|
||||
getter = getattr(self, rtp.getter)
|
||||
rtp_value = getter(**rtp.getter_args)
|
||||
runtime_parameters[rtp.name] = rtp_value
|
||||
return runtime_parameters
|
||||
|
||||
def set_runtime_parameters(self, params):
|
||||
"""
|
||||
The parameters are taken from the keyword arguments and are specific to
|
||||
a particular device. See the device documentation.
|
||||
|
||||
"""
|
||||
runtime_parameters = self._expand_runtime_parameters()
|
||||
rtp_map = {rtp.name.lower(): rtp for rtp in runtime_parameters}
|
||||
|
||||
params = OrderedDict((k.lower(), v) for k, v in params.iteritems() if v is not None)
|
||||
|
||||
expected_keys = rtp_map.keys()
|
||||
if not set(params.keys()).issubset(set(expected_keys)):
|
||||
unknown_params = list(set(params.keys()).difference(set(expected_keys)))
|
||||
raise ConfigError('Unknown runtime parameter(s): {}'.format(unknown_params))
|
||||
|
||||
for param in params:
|
||||
self.logger.debug('Setting runtime parameter "{}"'.format(param))
|
||||
rtp = rtp_map[param]
|
||||
setter = getattr(self, rtp.setter)
|
||||
args = dict(rtp.setter_args.items() + [(rtp.value_name, params[rtp.name.lower()])])
|
||||
setter(**args)
|
||||
|
||||
def capture_screen(self, filepath):
|
||||
"""Captures the current device screen into the specified file in a PNG format."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_properties(self, output_path):
|
||||
"""Captures and saves the device configuration properties version and
|
||||
any other relevant information. Return them in a dict"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def listdir(self, path, **kwargs):
|
||||
""" List the contents of the specified directory. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def push_file(self, source, dest):
|
||||
""" Push a file from the host file system onto the device. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def pull_file(self, source, dest):
|
||||
""" Pull a file from device system onto the host file system. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_file(self, filepath):
|
||||
""" Delete the specified file on the device. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def file_exists(self, filepath):
|
||||
""" Check if the specified file or directory exist on the device. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_pids_of(self, process_name):
|
||||
""" Returns a list of PIDs of the specified process name. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def kill(self, pid, as_root=False):
|
||||
""" Kill the process with the specified PID. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def killall(self, process_name, as_root=False):
|
||||
""" Kill all running processes with the specified name. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def install(self, filepath, **kwargs):
|
||||
""" Install the specified file on the device. What "install" means is device-specific
|
||||
and may possibly also depend on the type of file."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def uninstall(self, filepath):
|
||||
""" Uninstall the specified file on the device. What "uninstall" means is device-specific
|
||||
and may possibly also depend on the type of file."""
|
||||
raise NotImplementedError()
|
||||
|
||||
def execute(self, command, timeout=None, **kwargs):
|
||||
"""
|
||||
Execute the specified command command on the device and return the output.
|
||||
|
||||
:param command: Command to be executed on the device.
|
||||
:param timeout: If the command does not return after the specified time,
|
||||
execute() will abort with an error. If there is no timeout for
|
||||
the command, this should be set to 0 or None.
|
||||
|
||||
Other device-specific keyword arguments may also be specified.
|
||||
|
||||
:returns: The stdout output from the command.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def set_sysfile_value(self, filepath, value, verify=True):
|
||||
"""
|
||||
Write the specified value to the specified file on the device
|
||||
and verify that the value has actually been written.
|
||||
|
||||
:param file: The file to be modified.
|
||||
:param value: The value to be written to the file. Must be
|
||||
an int or a string convertable to an int.
|
||||
:param verify: Specifies whether the value should be verified, once written.
|
||||
|
||||
Should raise DeviceError if could write value.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_sysfile_value(self, sysfile, kind=None):
|
||||
"""
|
||||
Get the contents of the specified sysfile.
|
||||
|
||||
:param sysfile: The file who's contents will be returned.
|
||||
|
||||
:param kind: The type of value to be expected in the sysfile. This can
|
||||
be any Python callable that takes a single str argument.
|
||||
If not specified or is None, the contents will be returned
|
||||
as a string.
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
This gets invoked before an iteration is started and is endented to help the
|
||||
device manange any internal supporting functions.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
This gets invoked after iteration execution has completed and is endented to help the
|
||||
device manange any internal supporting functions.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def __str__(self):
|
||||
return 'Device<{}>'.format(self.name)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
def _expand_runtime_parameters(self):
|
||||
expanded_params = []
|
||||
for param in self.runtime_parameters:
|
||||
if isinstance(param, CoreParameter):
|
||||
expanded_params.extend(param.get_runtime_parameters(self.core_names)) # pylint: disable=no-member
|
||||
else:
|
||||
expanded_params.append(param)
|
||||
return expanded_params
|
||||
|
||||
@contextmanager
|
||||
def _check_alive(self):
|
||||
try:
|
||||
yield
|
||||
except Exception as e:
|
||||
self.ping()
|
||||
raise e
|
318
wlauto/core/device_manager.py
Normal file
318
wlauto/core/device_manager.py
Normal file
@@ -0,0 +1,318 @@
|
||||
import string
|
||||
from collections import OrderedDict
|
||||
|
||||
from wlauto.core.extension import Extension, Parameter
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.utils.types import list_of_integers, list_of, caseless_string
|
||||
|
||||
from devlib.platform import Platform
|
||||
from devlib.target import AndroidTarget, Cpuinfo, KernelVersion, KernelConfig
|
||||
|
||||
__all__ = ['RuntimeParameter', 'CoreParameter', 'DeviceManager', 'TargetInfo']
|
||||
|
||||
|
||||
class RuntimeParameter(object):
|
||||
"""
|
||||
A runtime parameter which has its getter and setter methods associated it
|
||||
with it.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, name, getter, setter,
|
||||
getter_args=None, setter_args=None,
|
||||
value_name='value', override=False):
|
||||
"""
|
||||
:param name: the name of the parameter.
|
||||
:param getter: the getter method which returns the value of this parameter.
|
||||
:param setter: the setter method which sets the value of this parameter. The setter
|
||||
always expects to be passed one argument when it is called.
|
||||
:param getter_args: keyword arguments to be used when invoking the getter.
|
||||
:param setter_args: keyword arguments to be used when invoking the setter.
|
||||
:param override: A ``bool`` that specifies whether a parameter of the same name further up the
|
||||
hierarchy should be overridden. If this is ``False`` (the default), an exception
|
||||
will be raised by the ``AttributeCollection`` instead.
|
||||
|
||||
"""
|
||||
self.name = name
|
||||
self.getter = getter
|
||||
self.setter = setter
|
||||
self.getter_args = getter_args or {}
|
||||
self.setter_args = setter_args or {}
|
||||
self.value_name = value_name
|
||||
self.override = override
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
class CoreParameter(RuntimeParameter):
|
||||
"""A runtime parameter that will get expanded into a RuntimeParameter for each core type."""
|
||||
|
||||
def get_runtime_parameters(self, core_names):
|
||||
params = []
|
||||
for core in set(core_names):
|
||||
name = string.Template(self.name).substitute(core=core)
|
||||
getter = string.Template(self.getter).substitute(core=core)
|
||||
setter = string.Template(self.setter).substitute(core=core)
|
||||
getargs = dict(self.getter_args.items() + [('core', core)])
|
||||
setargs = dict(self.setter_args.items() + [('core', core)])
|
||||
params.append(RuntimeParameter(name, getter, setter, getargs, setargs, self.value_name, self.override))
|
||||
return params
|
||||
|
||||
|
||||
class TargetInfo(object):
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
instance = TargetInfo()
|
||||
instance.target = pod['target']
|
||||
instance.abi = pod['abi']
|
||||
instance.cpuinfo = Cpuinfo(pod['cpuinfo'])
|
||||
instance.os = pod['os']
|
||||
instance.os_version = pod['os_version']
|
||||
instance.abi = pod['abi']
|
||||
instance.is_rooted = pod['is_rooted']
|
||||
instance.kernel_version = KernelVersion(pod['kernel_version'])
|
||||
instance.kernel_config = KernelConfig(pod['kernel_config'])
|
||||
|
||||
if pod["target"] == "AndroidTarget":
|
||||
instance.screen_resolution = pod['screen_resolution']
|
||||
instance.prop = pod['prop']
|
||||
instance.prop = pod['android_id']
|
||||
|
||||
return instance
|
||||
|
||||
def __init__(self, target=None):
|
||||
if target:
|
||||
self.target = target.__class__.__name__
|
||||
self.cpuinfo = target.cpuinfo
|
||||
self.os = target.os
|
||||
self.os_version = target.os_version
|
||||
self.abi = target.abi
|
||||
self.is_rooted = target.is_rooted
|
||||
self.kernel_version = target.kernel_version
|
||||
self.kernel_config = target.config
|
||||
|
||||
if isinstance(target, AndroidTarget):
|
||||
self.screen_resolution = target.screen_resolution
|
||||
self.prop = target.getprop()
|
||||
self.android_id = target.android_id
|
||||
|
||||
else:
|
||||
self.target = None
|
||||
self.cpuinfo = None
|
||||
self.os = None
|
||||
self.os_version = None
|
||||
self.abi = None
|
||||
self.is_rooted = None
|
||||
self.kernel_version = None
|
||||
self.kernel_config = None
|
||||
|
||||
if isinstance(target, AndroidTarget):
|
||||
self.screen_resolution = None
|
||||
self.prop = None
|
||||
self.android_id = None
|
||||
|
||||
def to_pod(self):
|
||||
pod = {}
|
||||
pod['target'] = self.target.__class__.__name__
|
||||
pod['abi'] = self.abi
|
||||
pod['cpuinfo'] = self.cpuinfo.text
|
||||
pod['os'] = self.os
|
||||
pod['os_version'] = self.os_version
|
||||
pod['abi'] = self.abi
|
||||
pod['is_rooted'] = self.is_rooted
|
||||
pod['kernel_version'] = self.kernel_version.version
|
||||
pod['kernel_config'] = self.kernel_config.text
|
||||
|
||||
if self.target == "AndroidTarget":
|
||||
pod['screen_resolution'] = self.screen_resolution
|
||||
pod['prop'] = self.prop
|
||||
pod['android_id'] = self.android_id
|
||||
|
||||
return pod
|
||||
|
||||
|
||||
class DeviceManager(Extension):
|
||||
|
||||
name = None
|
||||
target_type = None
|
||||
platform_type = Platform
|
||||
has_gpu = None
|
||||
path_module = None
|
||||
info = None
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', kind=list_of(caseless_string),
|
||||
description="""
|
||||
This is a list of all cpu cores on the device with each
|
||||
element being the core type, e.g. ``['a7', 'a7', 'a15']``. The
|
||||
order of the cores must match the order they are listed in
|
||||
``'/sys/devices/system/cpu'``. So in this case, ``'cpu0'`` must
|
||||
be an A7 core, and ``'cpu2'`` an A15.'
|
||||
"""),
|
||||
Parameter('core_clusters', kind=list_of_integers,
|
||||
description="""
|
||||
This is a list indicating the cluster affinity of the CPU cores,
|
||||
each element correponding to the cluster ID of the core coresponding
|
||||
to its index. E.g. ``[0, 0, 1]`` indicates that cpu0 and cpu1 are on
|
||||
cluster 0, while cpu2 is on cluster 1. If this is not specified, this
|
||||
will be inferred from ``core_names`` if possible (assuming all cores with
|
||||
the same name are on the same cluster).
|
||||
"""),
|
||||
Parameter('working_directory',
|
||||
description='''
|
||||
Working directory to be used by WA. This must be in a location where the specified user
|
||||
has write permissions. This will default to /home/<username>/wa (or to /root/wa, if
|
||||
username is 'root').
|
||||
'''),
|
||||
Parameter('binaries_directory',
|
||||
description='Location of executable binaries on this device (must be in PATH).'),
|
||||
]
|
||||
modules = []
|
||||
|
||||
runtime_parameters = [
|
||||
RuntimeParameter('sysfile_values', 'get_sysfile_values', 'set_sysfile_values', value_name='params'),
|
||||
CoreParameter('${core}_cores', 'get_number_of_online_cpus', 'set_number_of_online_cpus',
|
||||
value_name='number'),
|
||||
CoreParameter('${core}_min_frequency', 'get_core_min_frequency', 'set_core_min_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_max_frequency', 'get_core_max_frequency', 'set_core_max_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_frequency', 'get_core_cur_frequency', 'set_core_cur_frequency',
|
||||
value_name='freq'),
|
||||
CoreParameter('${core}_governor', 'get_core_governor', 'set_core_governor',
|
||||
value_name='governor'),
|
||||
CoreParameter('${core}_governor_tunables', 'get_core_governor_tunables', 'set_core_governor_tunables',
|
||||
value_name='tunables'),
|
||||
]
|
||||
|
||||
# Framework
|
||||
|
||||
def connect(self):
|
||||
raise NotImplementedError("connect method must be implemented for device managers")
|
||||
|
||||
def initialize(self, context):
|
||||
super(DeviceManager, self).initialize(context)
|
||||
self.info = TargetInfo(self.target)
|
||||
self.target.setup()
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
def validate(self):
|
||||
pass
|
||||
|
||||
# Runtime Parameters
|
||||
|
||||
def get_runtime_parameter_names(self):
|
||||
return [p.name for p in self._expand_runtime_parameters()]
|
||||
|
||||
def get_runtime_parameters(self):
|
||||
""" returns the runtime parameters that have been set. """
|
||||
# pylint: disable=cell-var-from-loop
|
||||
runtime_parameters = OrderedDict()
|
||||
for rtp in self._expand_runtime_parameters():
|
||||
if not rtp.getter:
|
||||
continue
|
||||
getter = getattr(self, rtp.getter)
|
||||
rtp_value = getter(**rtp.getter_args)
|
||||
runtime_parameters[rtp.name] = rtp_value
|
||||
return runtime_parameters
|
||||
|
||||
def set_runtime_parameters(self, params):
|
||||
"""
|
||||
The parameters are taken from the keyword arguments and are specific to
|
||||
a particular device. See the device documentation.
|
||||
|
||||
"""
|
||||
runtime_parameters = self._expand_runtime_parameters()
|
||||
rtp_map = {rtp.name.lower(): rtp for rtp in runtime_parameters}
|
||||
|
||||
params = OrderedDict((k.lower(), v) for k, v in params.iteritems() if v is not None)
|
||||
|
||||
expected_keys = rtp_map.keys()
|
||||
if not set(params.keys()).issubset(set(expected_keys)):
|
||||
unknown_params = list(set(params.keys()).difference(set(expected_keys)))
|
||||
raise ConfigError('Unknown runtime parameter(s): {}'.format(unknown_params))
|
||||
|
||||
for param in params:
|
||||
self.logger.debug('Setting runtime parameter "{}"'.format(param))
|
||||
rtp = rtp_map[param]
|
||||
setter = getattr(self, rtp.setter)
|
||||
args = dict(rtp.setter_args.items() + [(rtp.value_name, params[rtp.name.lower()])])
|
||||
setter(**args)
|
||||
|
||||
def _expand_runtime_parameters(self):
|
||||
expanded_params = []
|
||||
for param in self.runtime_parameters:
|
||||
if isinstance(param, CoreParameter):
|
||||
expanded_params.extend(param.get_runtime_parameters(self.target.core_names)) # pylint: disable=no-member
|
||||
else:
|
||||
expanded_params.append(param)
|
||||
return expanded_params
|
||||
|
||||
#Runtime parameter getters/setters
|
||||
|
||||
_written_sysfiles = []
|
||||
|
||||
def get_sysfile_values(self):
|
||||
return self._written_sysfiles
|
||||
|
||||
def set_sysfile_values(self, params):
|
||||
for sysfile, value in params.iteritems():
|
||||
verify = not sysfile.endswith('!')
|
||||
sysfile = sysfile.rstrip('!')
|
||||
self._written_sysfiles.append((sysfile, value))
|
||||
self.target.write_value(sysfile, value, verify=verify)
|
||||
|
||||
# pylint: disable=E1101
|
||||
|
||||
def _get_core_online_cpu(self, core):
|
||||
try:
|
||||
return self.target.list_online_core_cpus(core)[0]
|
||||
except IndexError:
|
||||
raise ValueError("No {} cores are online".format(core))
|
||||
|
||||
def get_number_of_online_cpus(self, core):
|
||||
return len(self._get_core_online_cpu(core))
|
||||
|
||||
def set_number_of_online_cpus(self, core, number):
|
||||
for cpu in self.target.core_cpus(core)[:number]:
|
||||
self.target.hotplug.online(cpu)
|
||||
|
||||
def get_core_min_frequency(self, core):
|
||||
return self.target.cpufreq.get_min_frequency(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_min_frequency(self, core, frequency):
|
||||
self.target.cpufreq.set_min_frequency(self._get_core_online_cpu(core), frequency)
|
||||
|
||||
def get_core_max_frequency(self, core):
|
||||
return self.target.cpufreq.get_max_frequency(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_max_frequency(self, core, frequency):
|
||||
self.target.cpufreq.set_max_frequency(self._get_core_online_cpu(core), frequency)
|
||||
|
||||
def get_core_frequency(self, core):
|
||||
return self.target.cpufreq.get_frequency(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_frequency(self, core, frequency):
|
||||
self.target.cpufreq.set_frequency(self._get_core_online_cpu(core), frequency)
|
||||
|
||||
def get_core_governor(self, core):
|
||||
return self.target.cpufreq.get_cpu_governor(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_governor(self, core, governor):
|
||||
self.target.cpufreq.set_cpu_governor(self._get_core_online_cpu(core), governor)
|
||||
|
||||
def get_core_governor_tunables(self, core):
|
||||
return self.target.cpufreq.get_governor_tunables(self._get_core_online_cpu(core))
|
||||
|
||||
def set_core_governor_tunables(self, core, tunables):
|
||||
self.target.cpufreq.set_governor_tunables(self._get_core_online_cpu(core),
|
||||
*tunables)
|
@@ -142,8 +142,9 @@ class ExecutionContext(object):
|
||||
def result(self):
|
||||
return getattr(self.current_job, 'result', self.run_result)
|
||||
|
||||
def __init__(self, device, config):
|
||||
self.device = device
|
||||
def __init__(self, device_manager, config):
|
||||
self.device_manager = device_manager
|
||||
self.device = self.device_manager.target
|
||||
self.config = config
|
||||
self.reboot_policy = config.reboot_policy
|
||||
self.output_directory = None
|
||||
@@ -258,6 +259,7 @@ class Executor(object):
|
||||
self.warning_logged = False
|
||||
self.config = None
|
||||
self.ext_loader = None
|
||||
self.device_manager = None
|
||||
self.device = None
|
||||
self.context = None
|
||||
|
||||
@@ -301,10 +303,11 @@ class Executor(object):
|
||||
self.logger.debug('Initialising device configuration.')
|
||||
if not self.config.device:
|
||||
raise ConfigError('Make sure a device is specified in the config.')
|
||||
self.device = self.ext_loader.get_device(self.config.device, **self.config.device_config)
|
||||
self.device.validate()
|
||||
self.device_manager = self.ext_loader.get_device_manager(self.config.device, **self.config.device_config)
|
||||
self.device_manager.validate()
|
||||
self.device = self.device_manager.target
|
||||
|
||||
self.context = ExecutionContext(self.device, self.config)
|
||||
self.context = ExecutionContext(self.device_manager, self.config)
|
||||
|
||||
self.logger.debug('Loading resource discoverers.')
|
||||
self.context.initialize()
|
||||
@@ -384,7 +387,7 @@ class Executor(object):
|
||||
runnercls = RandomRunner
|
||||
else:
|
||||
raise ConfigError('Unexpected execution order: {}'.format(self.config.execution_order))
|
||||
return runnercls(self.device, self.context, result_manager)
|
||||
return runnercls(self.device_manager, self.context, result_manager)
|
||||
|
||||
def _error_signalled_callback(self):
|
||||
self.error_logged = True
|
||||
@@ -464,8 +467,9 @@ class Runner(object):
|
||||
return True
|
||||
return self.current_job.spec.id != self.next_job.spec.id
|
||||
|
||||
def __init__(self, device, context, result_manager):
|
||||
self.device = device
|
||||
def __init__(self, device_manager, context, result_manager):
|
||||
self.device_manager = device_manager
|
||||
self.device = device_manager.target
|
||||
self.context = context
|
||||
self.result_manager = result_manager
|
||||
self.logger = logging.getLogger('Runner')
|
||||
@@ -533,14 +537,13 @@ class Runner(object):
|
||||
self.context.run_info.start_time = datetime.utcnow()
|
||||
self._connect_to_device()
|
||||
self.logger.info('Initializing device')
|
||||
self.device.initialize(self.context)
|
||||
self.device_manager.initialize(self.context)
|
||||
|
||||
self.logger.info('Initializing workloads')
|
||||
for workload_spec in self.context.config.workload_specs:
|
||||
workload_spec.workload.initialize(self.context)
|
||||
|
||||
props = self.device.get_properties(self.context)
|
||||
self.context.run_info.device_properties = props
|
||||
self.context.run_info.device_properties = self.device_manager.info
|
||||
self.result_manager.initialize(self.context)
|
||||
self._send(signal.RUN_INIT)
|
||||
|
||||
@@ -550,7 +553,7 @@ class Runner(object):
|
||||
def _connect_to_device(self):
|
||||
if self.context.reboot_policy.perform_initial_boot:
|
||||
try:
|
||||
self.device.connect()
|
||||
self.device_manager.connect()
|
||||
except DeviceError: # device may be offline
|
||||
if self.device.can('reset_power'):
|
||||
with self._signal_wrap('INITIAL_BOOT'):
|
||||
@@ -564,7 +567,7 @@ class Runner(object):
|
||||
self._reboot_device()
|
||||
else:
|
||||
self.logger.info('Connecting to device')
|
||||
self.device.connect()
|
||||
self.device_manager.connect()
|
||||
|
||||
def _init_job(self):
|
||||
self.current_job.result.status = IterationResult.RUNNING
|
||||
@@ -597,7 +600,7 @@ class Runner(object):
|
||||
|
||||
instrumentation.disable_all()
|
||||
instrumentation.enable(spec.instrumentation)
|
||||
self.device.start()
|
||||
self.device_manager.start()
|
||||
|
||||
if self.spec_changed:
|
||||
self._send(signal.WORKLOAD_SPEC_START)
|
||||
@@ -606,7 +609,7 @@ class Runner(object):
|
||||
try:
|
||||
setup_ok = False
|
||||
with self._handle_errors('Setting up device parameters'):
|
||||
self.device.set_runtime_parameters(spec.runtime_parameters)
|
||||
self.device_manager.set_runtime_parameters(spec.runtime_parameters)
|
||||
setup_ok = True
|
||||
|
||||
if setup_ok:
|
||||
@@ -625,7 +628,7 @@ class Runner(object):
|
||||
if self.spec_will_change or not spec.enabled:
|
||||
self._send(signal.WORKLOAD_SPEC_END)
|
||||
finally:
|
||||
self.device.stop()
|
||||
self.device_manager.stop()
|
||||
|
||||
def _finalize_job(self):
|
||||
self.context.run_result.iteration_results.append(self.current_job.result)
|
||||
@@ -737,7 +740,7 @@ class Runner(object):
|
||||
except (KeyboardInterrupt, DeviceNotRespondingError):
|
||||
raise
|
||||
except (WAError, TimeoutError), we:
|
||||
self.device.ping()
|
||||
self.device.check_responsive()
|
||||
if self.current_job:
|
||||
self.current_job.result.status = on_error_status
|
||||
self.current_job.result.add_event(str(we))
|
||||
|
@@ -61,7 +61,7 @@ we want to push the file to the target device and then change the file mode to
|
||||
755 ::
|
||||
|
||||
def setup(self, context):
|
||||
self.device.push_file(BINARY_FILE, self.device.working_directory)
|
||||
self.device.push(BINARY_FILE, self.device.working_directory)
|
||||
self.device.execute('chmod 755 {}'.format(self.trace_on_device))
|
||||
|
||||
Then we implemented the start method, which will simply run the file to start
|
||||
@@ -85,7 +85,7 @@ are metric key, value, unit and lower_is_better, which is a boolean. ::
|
||||
def update_result(self, context):
|
||||
# pull the trace file to the device
|
||||
result = os.path.join(self.device.working_directory, 'trace.txt')
|
||||
self.device.pull_file(result, context.working_directory)
|
||||
self.device.pull(result, context.working_directory)
|
||||
|
||||
# parse the file if needs to be parsed, or add result to
|
||||
# context.result
|
||||
@@ -94,7 +94,7 @@ At the end, we might want to delete any files generated by the instrumentation
|
||||
and the code to clear these file goes in teardown method. ::
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.delete_file(os.path.join(self.device.working_directory, 'trace.txt'))
|
||||
self.device.remove(os.path.join(self.device.working_directory, 'trace.txt'))
|
||||
|
||||
"""
|
||||
|
||||
|
@@ -47,8 +47,9 @@ class Workload(Extension):
|
||||
super(Workload, self).__init__(**kwargs)
|
||||
if self.supported_devices and device.name not in self.supported_devices:
|
||||
raise WorkloadError('Workload {} does not support device {}'.format(self.name, device.name))
|
||||
if self.supported_platforms and device.platform not in self.supported_platforms:
|
||||
raise WorkloadError('Workload {} does not support platform {}'.format(self.name, device.platform))
|
||||
|
||||
if self.supported_platforms and device.os not in self.supported_platforms:
|
||||
raise WorkloadError('Workload {} does not support platform {}'.format(self.name, device.os))
|
||||
self.device = device
|
||||
|
||||
def init_resources(self, context):
|
||||
@@ -101,4 +102,3 @@ class Workload(Extension):
|
||||
|
||||
def __str__(self):
|
||||
return '<Workload {}>'.format(self.name)
|
||||
|
||||
|
@@ -1,16 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
@@ -1,16 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
@@ -1,222 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Original implementation by Rene de Jong. Updated by Sascha Bischoff.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
from wlauto import AndroidDevice, Parameter
|
||||
from wlauto.common.gem5.device import BaseGem5Device
|
||||
from wlauto.exceptions import DeviceError
|
||||
|
||||
|
||||
class Gem5AndroidDevice(BaseGem5Device, AndroidDevice):
|
||||
"""
|
||||
Implements gem5 Android device.
|
||||
|
||||
This class allows a user to connect WA to a simulation using gem5. The
|
||||
connection to the device is made using the telnet connection of the
|
||||
simulator, and is used for all commands. The simulator does not have ADB
|
||||
support, and therefore we need to fall back to using standard shell
|
||||
commands.
|
||||
|
||||
Files are copied into the simulation using a VirtIO 9P device in gem5. Files
|
||||
are copied out of the simulated environment using the m5 writefile command
|
||||
within the simulated system.
|
||||
|
||||
When starting the workload run, the simulator is automatically started by
|
||||
Workload Automation, and a connection to the simulator is established. WA
|
||||
will then wait for Android to boot on the simulated system (which can take
|
||||
hours), prior to executing any other commands on the device. It is also
|
||||
possible to resume from a checkpoint when starting the simulation. To do
|
||||
this, please append the relevant checkpoint commands from the gem5
|
||||
simulation script to the gem5_discription argument in the agenda.
|
||||
|
||||
Host system requirements:
|
||||
* VirtIO support. We rely on diod on the host system. This can be
|
||||
installed on ubuntu using the following command:
|
||||
|
||||
sudo apt-get install diod
|
||||
|
||||
Guest requirements:
|
||||
* VirtIO support. We rely on VirtIO to move files into the simulation.
|
||||
Please make sure that the following are set in the kernel
|
||||
configuration:
|
||||
|
||||
CONFIG_NET_9P=y
|
||||
|
||||
CONFIG_NET_9P_VIRTIO=y
|
||||
|
||||
CONFIG_9P_FS=y
|
||||
|
||||
CONFIG_9P_FS_POSIX_ACL=y
|
||||
|
||||
CONFIG_9P_FS_SECURITY=y
|
||||
|
||||
CONFIG_VIRTIO_BLK=y
|
||||
|
||||
* m5 binary. Please make sure that the m5 binary is on the device and
|
||||
can by found in the path.
|
||||
"""
|
||||
|
||||
name = 'gem5_android'
|
||||
platform = 'android'
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=[], override=True),
|
||||
Parameter('core_clusters', default=[], override=True),
|
||||
]
|
||||
|
||||
# Overwritten from Device. For documentation, see corresponding method in
|
||||
# Device.
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.logger = logging.getLogger('Gem5AndroidDevice')
|
||||
AndroidDevice.__init__(self, **kwargs)
|
||||
BaseGem5Device.__init__(self)
|
||||
|
||||
def login_to_device(self):
|
||||
pass
|
||||
|
||||
def wait_for_boot(self):
|
||||
"""
|
||||
Wait for the system to boot
|
||||
|
||||
We monitor the sys.boot_completed and service.bootanim.exit system
|
||||
properties to determine when the system has finished booting. In the
|
||||
event that we cannot coerce the result of service.bootanim.exit to an
|
||||
integer, we assume that the boot animation was disabled and do not wait
|
||||
for it to finish.
|
||||
|
||||
"""
|
||||
self.logger.info("Waiting for Android to boot...")
|
||||
while True:
|
||||
booted = False
|
||||
anim_finished = True # Assume boot animation was disabled on except
|
||||
try:
|
||||
booted = (int('0' + self.gem5_shell('getprop sys.boot_completed', check_exit_code=False).strip()) == 1)
|
||||
anim_finished = (int(self.gem5_shell('getprop service.bootanim.exit', check_exit_code=False).strip()) == 1)
|
||||
except ValueError:
|
||||
pass
|
||||
if booted and anim_finished:
|
||||
break
|
||||
time.sleep(60)
|
||||
|
||||
self.logger.info("Android booted")
|
||||
|
||||
def install(self, filepath, timeout=3 * 3600): # pylint: disable=W0221
|
||||
""" Install an APK or a normal executable """
|
||||
ext = os.path.splitext(filepath)[1].lower()
|
||||
if ext == '.apk':
|
||||
return self.install_apk(filepath, timeout)
|
||||
else:
|
||||
return self.install_executable(filepath)
|
||||
|
||||
def install_apk(self, filepath, timeout=3 * 3600): # pylint: disable=W0221
|
||||
"""
|
||||
Install an APK on the gem5 device
|
||||
|
||||
The APK is pushed to the device. Then the file and folder permissions
|
||||
are changed to ensure that the APK can be correctly installed. The APK
|
||||
is then installed on the device using 'pm'.
|
||||
"""
|
||||
self._check_ready()
|
||||
self.logger.info("Installing {}".format(filepath))
|
||||
ext = os.path.splitext(filepath)[1].lower()
|
||||
if ext == '.apk':
|
||||
filename = os.path.basename(filepath)
|
||||
on_device_path = os.path.join('/data/local/tmp', filename)
|
||||
self.push_file(filepath, on_device_path)
|
||||
# We need to make sure that the folder permissions are set
|
||||
# correctly, else the APK does not install correctly.
|
||||
self.gem5_shell('chmod 775 /data/local/tmp')
|
||||
self.gem5_shell('chmod 774 {}'.format(on_device_path))
|
||||
self.logger.debug("Actually installing the APK: {}".format(on_device_path))
|
||||
return self.gem5_shell("pm install {}".format(on_device_path))
|
||||
else:
|
||||
raise DeviceError('Can\'t install {}: unsupported format.'.format(filepath))
|
||||
|
||||
def install_executable(self, filepath, with_name=None):
|
||||
""" Install an executable """
|
||||
executable_name = os.path.basename(filepath)
|
||||
on_device_file = self.path.join(self.working_directory, executable_name)
|
||||
on_device_executable = self.path.join(self.binaries_directory, executable_name)
|
||||
self.push_file(filepath, on_device_file)
|
||||
if self.busybox:
|
||||
self.execute('{} cp {} {}'.format(self.busybox, on_device_file, on_device_executable))
|
||||
else:
|
||||
self.execute('cat {} > {}'.format(on_device_file, on_device_executable))
|
||||
self.execute('chmod 0777 {}'.format(on_device_executable))
|
||||
return on_device_executable
|
||||
|
||||
def uninstall(self, package):
|
||||
self._check_ready()
|
||||
self.gem5_shell("pm uninstall {}".format(package))
|
||||
|
||||
def dump_logcat(self, outfile, filter_spec=None):
|
||||
""" Extract logcat from simulation """
|
||||
self.logger.info("Extracting logcat from the simulated system")
|
||||
filename = outfile.split('/')[-1]
|
||||
command = 'logcat -d > {}'.format(filename)
|
||||
self.gem5_shell(command)
|
||||
self.pull_file("{}".format(filename), outfile)
|
||||
|
||||
def clear_logcat(self):
|
||||
"""Clear (flush) logcat log."""
|
||||
if self._logcat_poller:
|
||||
return self._logcat_poller.clear_buffer()
|
||||
else:
|
||||
return self.gem5_shell('logcat -c')
|
||||
|
||||
def disable_selinux(self):
|
||||
""" Disable SELinux. Overridden as parent implementation uses ADB """
|
||||
api_level = int(self.gem5_shell('getprop ro.build.version.sdk').strip())
|
||||
|
||||
# SELinux was added in Android 4.3 (API level 18). Trying to
|
||||
# 'getenforce' in earlier versions will produce an error.
|
||||
if api_level >= 18:
|
||||
se_status = self.execute('getenforce', as_root=True).strip()
|
||||
if se_status == 'Enforcing':
|
||||
self.execute('setenforce 0', as_root=True)
|
||||
|
||||
def get_properties(self, context): # pylint: disable=R0801
|
||||
""" Get the property files from the device """
|
||||
BaseGem5Device.get_properties(self, context)
|
||||
props = self._get_android_properties(context)
|
||||
return props
|
||||
|
||||
def disable_screen_lock(self):
|
||||
"""
|
||||
Attempts to disable he screen lock on the device.
|
||||
|
||||
Overridden here as otherwise we have issues with too many backslashes.
|
||||
"""
|
||||
lockdb = '/data/system/locksettings.db'
|
||||
sqlcommand = "update locksettings set value=\'0\' where name=\'screenlock.disabled\';"
|
||||
self.execute('sqlite3 {} "{}"'.format(lockdb, sqlcommand), as_root=True)
|
||||
|
||||
def capture_screen(self, filepath):
|
||||
if BaseGem5Device.capture_screen(self, filepath):
|
||||
return
|
||||
|
||||
# If we didn't manage to do the above, call the parent class.
|
||||
self.logger.warning("capture_screen: falling back to parent class implementation")
|
||||
AndroidDevice.capture_screen(self, filepath)
|
||||
|
||||
def initialize(self, context):
|
||||
self.resize_shell()
|
||||
self.deploy_m5(context, force=False)
|
@@ -1,38 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wlauto import AndroidDevice, Parameter
|
||||
|
||||
|
||||
class GenericDevice(AndroidDevice):
|
||||
name = 'generic_android'
|
||||
description = """
|
||||
A generic Android device interface. Use this if you do not have an interface
|
||||
for your device.
|
||||
|
||||
This should allow basic WA functionality on most Android devices using adb over
|
||||
USB. Some additional configuration may be required for some WA extensions
|
||||
(e.g. configuring ``core_names`` and ``core_clusters``).
|
||||
|
||||
"""
|
||||
|
||||
default_working_directory = '/storage/sdcard0/working'
|
||||
has_gpu = True
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=[], override=True),
|
||||
Parameter('core_clusters', default=[], override=True),
|
||||
]
|
@@ -1,223 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
# pylint: disable=E1101
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
|
||||
import pexpect
|
||||
|
||||
from wlauto import BigLittleDevice, Parameter
|
||||
from wlauto.exceptions import DeviceError
|
||||
from wlauto.utils.serial_port import open_serial_connection, pulse_dtr
|
||||
from wlauto.utils.android import adb_connect, adb_disconnect, adb_list_devices
|
||||
from wlauto.utils.uefi import UefiMenu, UefiConfig
|
||||
from wlauto.utils.uboot import UbootMenu
|
||||
|
||||
|
||||
AUTOSTART_MESSAGE = 'Press Enter to stop auto boot...'
|
||||
|
||||
|
||||
class Juno(BigLittleDevice):
|
||||
|
||||
name = 'juno'
|
||||
description = """
|
||||
ARM Juno next generation big.LITTLE development platform.
|
||||
"""
|
||||
|
||||
capabilities = ['reset_power']
|
||||
|
||||
has_gpu = True
|
||||
|
||||
core_modules = [
|
||||
'vexpress',
|
||||
]
|
||||
|
||||
parameters = [
|
||||
Parameter('retries', kind=int, default=2,
|
||||
description="""Specifies the number of times the device will attempt to recover
|
||||
(normally, with a hard reset) if it detects that something went wrong."""),
|
||||
|
||||
Parameter('microsd_mount_point', default='/media/JUNO',
|
||||
description='Location at which the device\'s MicroSD card will be mounted.'),
|
||||
Parameter('port', default='/dev/ttyS0', description='Serial port on which the device is connected.'),
|
||||
Parameter('baudrate', kind=int, default=115200, description='Serial connection baud.'),
|
||||
Parameter('timeout', kind=int, default=300, description='Serial connection timeout.'),
|
||||
Parameter('core_names', default=['a53', 'a53', 'a53', 'a53', 'a57', 'a57'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1], override=True),
|
||||
|
||||
Parameter('bootloader', default='uefi', allowed_values=['uefi', 'u-boot'],
|
||||
description="""Bootloader used on the device."""),
|
||||
Parameter('actually_disconnect', kind=bool, default=False,
|
||||
description="""
|
||||
Actually perfom "adb disconnect" on closing the connection to the device.
|
||||
"""),
|
||||
|
||||
# VExpress flasher expects a device to have these:
|
||||
Parameter('uefi_entry', default='WA',
|
||||
description='The name of the entry to use (will be created if does not exist).'),
|
||||
Parameter('uefi_config', kind=UefiConfig,
|
||||
description='''Specifies the configuration for the UEFI entry for his device. In an
|
||||
entry specified by ``uefi_entry`` parameter doesn't exist in UEFI menu,
|
||||
it will be created using this config. This configuration will also be
|
||||
used, when flashing new images.''',
|
||||
default={
|
||||
'image_name': 'Image',
|
||||
'image_args': None, # populated from bootargs if not specified
|
||||
'fdt_support': True,
|
||||
}
|
||||
),
|
||||
Parameter('bootargs', default='console=ttyAMA0,115200 earlyprintk=pl011,0x7ff80000 '
|
||||
'verbose debug init=/init root=/dev/sda1 rw ip=dhcp '
|
||||
'rootwait video=DVI-D-1:1920x1080R@60',
|
||||
description='''Default boot arguments to use when boot_arguments were not.'''),
|
||||
]
|
||||
|
||||
short_delay = 1
|
||||
firmware_prompt = 'Cmd>'
|
||||
|
||||
def validate(self):
|
||||
if not self.uefi_config.image_args:
|
||||
self.uefi_config.image_args = self.bootargs
|
||||
|
||||
def boot(self, hard=False, **kwargs):
|
||||
if kwargs:
|
||||
self.bootargs = kwargs # pylint: disable=attribute-defined-outside-init
|
||||
if hard:
|
||||
self.logger.debug('Performing a hard reset.')
|
||||
self.hard_reset()
|
||||
else:
|
||||
self.logger.debug('Resetting the device.')
|
||||
self.reset()
|
||||
if self.bootloader == 'uefi':
|
||||
self._boot_via_uefi()
|
||||
else:
|
||||
self._boot_via_uboot(bootargs=self.bootargs)
|
||||
|
||||
def _boot_via_uboot(self, **kwargs):
|
||||
if not kwargs:
|
||||
# Standard linaro configuration will proceed directly to the kernel
|
||||
return
|
||||
with open_serial_connection(port=self.port,
|
||||
baudrate=self.baudrate,
|
||||
timeout=self.timeout,
|
||||
init_dtr=0) as target:
|
||||
menu = UbootMenu(target)
|
||||
self.logger.debug('Waiting for U-Boot prompt...')
|
||||
menu.open(timeout=120)
|
||||
for var, value in kwargs.iteritems():
|
||||
menu.setenv(var, value)
|
||||
menu.boot()
|
||||
|
||||
def _boot_via_uefi(self):
|
||||
with open_serial_connection(port=self.port,
|
||||
baudrate=self.baudrate,
|
||||
timeout=self.timeout,
|
||||
init_dtr=0) as target:
|
||||
menu = UefiMenu(target)
|
||||
self.logger.debug('Waiting for UEFI menu...')
|
||||
menu.open(timeout=120)
|
||||
try:
|
||||
menu.select(self.uefi_entry)
|
||||
except LookupError:
|
||||
self.logger.debug('{} UEFI entry not found.'.format(self.uefi_entry))
|
||||
self.logger.debug('Attempting to create one using default flasher configuration.')
|
||||
menu.create_entry(self.uefi_entry, self.uefi_config)
|
||||
menu.select(self.uefi_entry)
|
||||
self.logger.debug('Waiting for the Android prompt.')
|
||||
target.expect(self.android_prompt, timeout=self.timeout)
|
||||
|
||||
def connect(self):
|
||||
if not self._is_ready:
|
||||
if not self.adb_name: # pylint: disable=E0203
|
||||
with open_serial_connection(timeout=self.timeout,
|
||||
port=self.port,
|
||||
baudrate=self.baudrate,
|
||||
init_dtr=0) as target:
|
||||
target.sendline('')
|
||||
self.logger.debug('Waiting for the Android prompt.')
|
||||
target.expect(self.android_prompt)
|
||||
|
||||
self.logger.debug('Waiting for IP address...')
|
||||
wait_start_time = time.time()
|
||||
while True:
|
||||
target.sendline('ip addr list eth0')
|
||||
time.sleep(1)
|
||||
try:
|
||||
target.expect('inet ([1-9]\d*.\d+.\d+.\d+)', timeout=10)
|
||||
self.adb_name = target.match.group(1) + ':5555' # pylint: disable=W0201
|
||||
break
|
||||
except pexpect.TIMEOUT:
|
||||
pass # We have our own timeout -- see below.
|
||||
if (time.time() - wait_start_time) > self.ready_timeout:
|
||||
raise DeviceError('Could not acquire IP address.')
|
||||
|
||||
if self.adb_name in adb_list_devices():
|
||||
adb_disconnect(self.adb_name)
|
||||
adb_connect(self.adb_name, timeout=self.timeout)
|
||||
super(Juno, self).connect() # wait for boot to complete etc.
|
||||
self._is_ready = True
|
||||
|
||||
def disconnect(self):
|
||||
if self._is_ready:
|
||||
super(Juno, self).disconnect()
|
||||
if self.actually_disconnect:
|
||||
adb_disconnect(self.adb_name)
|
||||
self._is_ready = False
|
||||
|
||||
def reset(self):
|
||||
# Currently, reboot is not working in Android on Juno, so
|
||||
# perfrom a ahard reset instead
|
||||
self.hard_reset()
|
||||
|
||||
def hard_reset(self):
|
||||
self.disconnect()
|
||||
self.adb_name = None # Force re-acquire IP address on reboot. pylint: disable=attribute-defined-outside-init
|
||||
with open_serial_connection(port=self.port,
|
||||
baudrate=self.baudrate,
|
||||
timeout=300,
|
||||
init_dtr=0,
|
||||
get_conn=True) as (target, conn):
|
||||
pulse_dtr(conn, state=True, duration=0.1) # TRM specifies a pulse of >=100ms
|
||||
|
||||
i = target.expect([AUTOSTART_MESSAGE, self.firmware_prompt])
|
||||
if i:
|
||||
self.logger.debug('Saw firmware prompt.')
|
||||
time.sleep(self.short_delay)
|
||||
target.sendline('reboot')
|
||||
else:
|
||||
self.logger.debug('Saw auto boot message.')
|
||||
|
||||
def wait_for_microsd_mount_point(self, target, timeout=100):
|
||||
attempts = 1 + self.retries
|
||||
if os.path.exists(os.path.join(self.microsd_mount_point, 'config.txt')):
|
||||
return
|
||||
|
||||
self.logger.debug('Waiting for VExpress MicroSD to mount...')
|
||||
for i in xrange(attempts):
|
||||
if i: # Do not reboot on the first attempt.
|
||||
target.sendline('reboot')
|
||||
for _ in xrange(timeout):
|
||||
time.sleep(self.short_delay)
|
||||
if os.path.exists(os.path.join(self.microsd_mount_point, 'config.txt')):
|
||||
return
|
||||
raise DeviceError('Did not detect MicroSD mount on {}'.format(self.microsd_mount_point))
|
||||
|
||||
def get_android_id(self):
|
||||
# Android ID currenlty not set properly in Juno Android builds.
|
||||
return 'abad1deadeadbeef'
|
||||
|
@@ -1,48 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import time
|
||||
|
||||
from wlauto import AndroidDevice, Parameter
|
||||
|
||||
|
||||
class Nexus10Device(AndroidDevice):
|
||||
|
||||
name = 'Nexus10'
|
||||
description = """
|
||||
Nexus10 is a 10 inch tablet device, which has dual-core A15.
|
||||
|
||||
To be able to use Nexus10 in WA, the following must be true:
|
||||
|
||||
- USB Debugging Mode is enabled.
|
||||
- Generate USB debugging authorisation for the host machine
|
||||
|
||||
"""
|
||||
|
||||
default_working_directory = '/sdcard/working'
|
||||
has_gpu = True
|
||||
max_cores = 2
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=['A15', 'A15'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0], override=True),
|
||||
]
|
||||
|
||||
def initialize(self, context):
|
||||
time.sleep(self.long_delay)
|
||||
self.execute('svc power stayon true', check_exit_code=False)
|
||||
time.sleep(self.long_delay)
|
||||
self.execute('input keyevent 82')
|
@@ -1,40 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wlauto import AndroidDevice, Parameter
|
||||
|
||||
|
||||
class Nexus5Device(AndroidDevice):
|
||||
|
||||
name = 'Nexus5'
|
||||
description = """
|
||||
Adapter for Nexus 5.
|
||||
|
||||
To be able to use Nexus5 in WA, the following must be true:
|
||||
|
||||
- USB Debugging Mode is enabled.
|
||||
- Generate USB debugging authorisation for the host machine
|
||||
|
||||
"""
|
||||
|
||||
default_working_directory = '/storage/sdcard0/working'
|
||||
has_gpu = True
|
||||
max_cores = 4
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=['krait400', 'krait400', 'krait400', 'krait400'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0, 0, 0], override=True),
|
||||
]
|
@@ -1,76 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import time
|
||||
|
||||
from wlauto import AndroidDevice, Parameter
|
||||
from wlauto.exceptions import TimeoutError
|
||||
from wlauto.utils.android import adb_shell
|
||||
|
||||
|
||||
class Note3Device(AndroidDevice):
|
||||
|
||||
name = 'Note3'
|
||||
description = """
|
||||
Adapter for Galaxy Note 3.
|
||||
|
||||
To be able to use Note3 in WA, the following must be true:
|
||||
|
||||
- USB Debugging Mode is enabled.
|
||||
- Generate USB debugging authorisation for the host machine
|
||||
|
||||
"""
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=['A15', 'A15', 'A15', 'A15'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0, 0, 0], override=True),
|
||||
Parameter('working_directory', default='/storage/sdcard0/wa-working', override=True),
|
||||
]
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(Note3Device, self).__init__(**kwargs)
|
||||
self._just_rebooted = False
|
||||
|
||||
def initialize(self, context):
|
||||
self.execute('svc power stayon true', check_exit_code=False)
|
||||
|
||||
def reset(self):
|
||||
super(Note3Device, self).reset()
|
||||
self._just_rebooted = True
|
||||
|
||||
def hard_reset(self):
|
||||
super(Note3Device, self).hard_reset()
|
||||
self._just_rebooted = True
|
||||
|
||||
def connect(self): # NOQA pylint: disable=R0912
|
||||
super(Note3Device, self).connect()
|
||||
if self._just_rebooted:
|
||||
self.logger.debug('Waiting for boot to complete...')
|
||||
# On the Note 3, adb connection gets reset some time after booting.
|
||||
# This causes errors during execution. To prevent this, open a shell
|
||||
# session and wait for it to be killed. Once its killed, give adb
|
||||
# enough time to restart, and then the device should be ready.
|
||||
try:
|
||||
adb_shell(self.adb_name, '', timeout=20) # pylint: disable=no-member
|
||||
time.sleep(5) # give adb time to re-initialize
|
||||
except TimeoutError:
|
||||
pass # timed out waiting for the session to be killed -- assume not going to be.
|
||||
|
||||
self.logger.debug('Boot completed.')
|
||||
self._just_rebooted = False
|
||||
# Swipe upwards to unlock the screen.
|
||||
time.sleep(self.long_delay)
|
||||
self.execute('input touchscreen swipe 540 1600 560 800 ')
|
@@ -1,38 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wlauto import AndroidDevice, Parameter
|
||||
|
||||
|
||||
class OdroidXU3(AndroidDevice):
|
||||
|
||||
name = "odroidxu3"
|
||||
description = 'HardKernel Odroid XU3 development board.'
|
||||
|
||||
core_modules = [
|
||||
'odroidxu3-fan',
|
||||
]
|
||||
|
||||
parameters = [
|
||||
Parameter('adb_name', default='BABABEEFBABABEEF', override=True),
|
||||
Parameter('working_directory', default='/data/local/wa-working', override=True),
|
||||
Parameter('core_names', default=['a7', 'a7', 'a7', 'a7', 'a15', 'a15', 'a15', 'a15'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1, 1, 1], override=True),
|
||||
Parameter('port', default='/dev/ttyUSB0', kind=str,
|
||||
description='Serial port on which the device is connected'),
|
||||
Parameter('baudrate', default=115200, kind=int, description='Serial connection baud rate'),
|
||||
]
|
||||
|
@@ -1,850 +0,0 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import string
|
||||
import shutil
|
||||
import time
|
||||
from collections import Counter
|
||||
|
||||
import pexpect
|
||||
|
||||
from wlauto import BigLittleDevice, RuntimeParameter, Parameter, settings
|
||||
from wlauto.exceptions import ConfigError, DeviceError
|
||||
from wlauto.utils.android import adb_connect, adb_disconnect, adb_list_devices
|
||||
from wlauto.utils.serial_port import open_serial_connection
|
||||
from wlauto.utils.misc import merge_dicts
|
||||
from wlauto.utils.types import boolean
|
||||
|
||||
|
||||
BOOT_FIRMWARE = {
|
||||
'uefi': {
|
||||
'SCC_0x010': '0x000003E0',
|
||||
'reboot_attempts': 0,
|
||||
},
|
||||
'bootmon': {
|
||||
'SCC_0x010': '0x000003D0',
|
||||
'reboot_attempts': 2,
|
||||
},
|
||||
}
|
||||
|
||||
MODES = {
|
||||
'mp_a7_only': {
|
||||
'images_file': 'images_mp.txt',
|
||||
'dtb': 'mp_a7',
|
||||
'initrd': 'init_mp',
|
||||
'kernel': 'kern_mp',
|
||||
'SCC_0x700': '0x1032F003',
|
||||
'cpus': ['a7', 'a7', 'a7'],
|
||||
},
|
||||
'mp_a7_bootcluster': {
|
||||
'images_file': 'images_mp.txt',
|
||||
'dtb': 'mp_a7bc',
|
||||
'initrd': 'init_mp',
|
||||
'kernel': 'kern_mp',
|
||||
'SCC_0x700': '0x1032F003',
|
||||
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
|
||||
},
|
||||
'mp_a15_only': {
|
||||
'images_file': 'images_mp.txt',
|
||||
'dtb': 'mp_a15',
|
||||
'initrd': 'init_mp',
|
||||
'kernel': 'kern_mp',
|
||||
'SCC_0x700': '0x0032F003',
|
||||
'cpus': ['a15', 'a15'],
|
||||
},
|
||||
'mp_a15_bootcluster': {
|
||||
'images_file': 'images_mp.txt',
|
||||
'dtb': 'mp_a15bc',
|
||||
'initrd': 'init_mp',
|
||||
'kernel': 'kern_mp',
|
||||
'SCC_0x700': '0x0032F003',
|
||||
'cpus': ['a15', 'a15', 'a7', 'a7', 'a7'],
|
||||
},
|
||||
'iks_cpu': {
|
||||
'images_file': 'images_iks.txt',
|
||||
'dtb': 'iks',
|
||||
'initrd': 'init_iks',
|
||||
'kernel': 'kern_iks',
|
||||
'SCC_0x700': '0x1032F003',
|
||||
'cpus': ['a7', 'a7'],
|
||||
},
|
||||
'iks_a15': {
|
||||
'images_file': 'images_iks.txt',
|
||||
'dtb': 'iks',
|
||||
'initrd': 'init_iks',
|
||||
'kernel': 'kern_iks',
|
||||
'SCC_0x700': '0x0032F003',
|
||||
'cpus': ['a15', 'a15'],
|
||||
},
|
||||
'iks_a7': {
|
||||
'images_file': 'images_iks.txt',
|
||||
'dtb': 'iks',
|
||||
'initrd': 'init_iks',
|
||||
'kernel': 'kern_iks',
|
||||
'SCC_0x700': '0x0032F003',
|
||||
'cpus': ['a7', 'a7'],
|
||||
},
|
||||
'iks_ns_a15': {
|
||||
'images_file': 'images_iks.txt',
|
||||
'dtb': 'iks',
|
||||
'initrd': 'init_iks',
|
||||
'kernel': 'kern_iks',
|
||||
'SCC_0x700': '0x0032F003',
|
||||
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
|
||||
},
|
||||
'iks_ns_a7': {
|
||||
'images_file': 'images_iks.txt',
|
||||
'dtb': 'iks',
|
||||
'initrd': 'init_iks',
|
||||
'kernel': 'kern_iks',
|
||||
'SCC_0x700': '0x0032F003',
|
||||
'cpus': ['a7', 'a7', 'a7', 'a15', 'a15'],
|
||||
},
|
||||
}
|
||||
|
||||
A7_ONLY_MODES = ['mp_a7_only', 'iks_a7', 'iks_cpu']
|
||||
A15_ONLY_MODES = ['mp_a15_only', 'iks_a15']
|
||||
|
||||
DEFAULT_A7_GOVERNOR_TUNABLES = {
|
||||
'interactive': {
|
||||
'above_hispeed_delay': 80000,
|
||||
'go_hispeed_load': 85,
|
||||
'hispeed_freq': 800000,
|
||||
'min_sample_time': 80000,
|
||||
'timer_rate': 20000,
|
||||
},
|
||||
'ondemand': {
|
||||
'sampling_rate': 50000,
|
||||
},
|
||||
}
|
||||
|
||||
DEFAULT_A15_GOVERNOR_TUNABLES = {
|
||||
'interactive': {
|
||||
'above_hispeed_delay': 80000,
|
||||
'go_hispeed_load': 85,
|
||||
'hispeed_freq': 1000000,
|
||||
'min_sample_time': 80000,
|
||||
'timer_rate': 20000,
|
||||
},
|
||||
'ondemand': {
|
||||
'sampling_rate': 50000,
|
||||
},
|
||||
}
|
||||
|
||||
ADB_SHELL_TIMEOUT = 30
|
||||
|
||||
|
||||
class _TC2DeviceConfig(object):
|
||||
|
||||
name = 'TC2 Configuration'
|
||||
device_name = 'TC2'
|
||||
|
||||
def __init__(self, # pylint: disable=R0914,W0613
|
||||
root_mount='/media/VEMSD',
|
||||
|
||||
disable_boot_configuration=False,
|
||||
boot_firmware=None,
|
||||
mode=None,
|
||||
|
||||
fs_medium='usb',
|
||||
|
||||
device_working_directory='/data/local/usecase',
|
||||
|
||||
bm_image='bm_v519r.axf',
|
||||
|
||||
serial_device='/dev/ttyS0',
|
||||
serial_baud=38400,
|
||||
serial_max_timeout=600,
|
||||
serial_log=sys.stdout,
|
||||
|
||||
init_timeout=120,
|
||||
|
||||
always_delete_uefi_entry=True,
|
||||
psci_enable=True,
|
||||
|
||||
host_working_directory=None,
|
||||
|
||||
a7_governor_tunables=None,
|
||||
a15_governor_tunables=None,
|
||||
|
||||
adb_name=None,
|
||||
# Compatibility with other android devices.
|
||||
enable_screen_check=None, # pylint: disable=W0613
|
||||
**kwargs
|
||||
):
|
||||
self.root_mount = root_mount
|
||||
self.disable_boot_configuration = disable_boot_configuration
|
||||
if not disable_boot_configuration:
|
||||
self.boot_firmware = boot_firmware or 'uefi'
|
||||
self.default_mode = mode or 'mp_a7_bootcluster'
|
||||
elif boot_firmware or mode:
|
||||
raise ConfigError('boot_firmware and/or mode cannot be specified when disable_boot_configuration is enabled.')
|
||||
|
||||
self.mode = self.default_mode
|
||||
self.working_directory = device_working_directory
|
||||
self.serial_device = serial_device
|
||||
self.serial_baud = serial_baud
|
||||
self.serial_max_timeout = serial_max_timeout
|
||||
self.serial_log = serial_log
|
||||
self.bootmon_prompt = re.compile('^([KLM]:\\\)?>', re.MULTILINE)
|
||||
|
||||
self.fs_medium = fs_medium.lower()
|
||||
|
||||
self.bm_image = bm_image
|
||||
|
||||
self.init_timeout = init_timeout
|
||||
|
||||
self.always_delete_uefi_entry = always_delete_uefi_entry
|
||||
self.psci_enable = psci_enable
|
||||
|
||||
self.resource_dir = os.path.join(os.path.dirname(__file__), 'resources')
|
||||
self.board_dir = os.path.join(self.root_mount, 'SITE1', 'HBI0249A')
|
||||
self.board_file = 'board.txt'
|
||||
self.board_file_bak = 'board.bak'
|
||||
self.images_file = 'images.txt'
|
||||
|
||||
self.host_working_directory = host_working_directory or settings.meta_directory
|
||||
|
||||
if not a7_governor_tunables:
|
||||
self.a7_governor_tunables = DEFAULT_A7_GOVERNOR_TUNABLES
|
||||
else:
|
||||
self.a7_governor_tunables = merge_dicts(DEFAULT_A7_GOVERNOR_TUNABLES, a7_governor_tunables)
|
||||
|
||||
if not a15_governor_tunables:
|
||||
self.a15_governor_tunables = DEFAULT_A15_GOVERNOR_TUNABLES
|
||||
else:
|
||||
self.a15_governor_tunables = merge_dicts(DEFAULT_A15_GOVERNOR_TUNABLES, a15_governor_tunables)
|
||||
|
||||
self.adb_name = adb_name
|
||||
|
||||
@property
|
||||
def src_images_template_file(self):
|
||||
return os.path.join(self.resource_dir, MODES[self.mode]['images_file'])
|
||||
|
||||
@property
|
||||
def src_images_file(self):
|
||||
return os.path.join(self.host_working_directory, 'images.txt')
|
||||
|
||||
@property
|
||||
def src_board_template_file(self):
|
||||
return os.path.join(self.resource_dir, 'board_template.txt')
|
||||
|
||||
@property
|
||||
def src_board_file(self):
|
||||
return os.path.join(self.host_working_directory, 'board.txt')
|
||||
|
||||
@property
|
||||
def kernel_arguments(self):
|
||||
kernel_args = ' console=ttyAMA0,38400 androidboot.console=ttyAMA0 selinux=0'
|
||||
if self.fs_medium == 'usb':
|
||||
kernel_args += ' androidboot.hardware=arm-versatileexpress-usb'
|
||||
if 'iks' in self.mode:
|
||||
kernel_args += ' no_bL_switcher=0'
|
||||
return kernel_args
|
||||
|
||||
@property
|
||||
def kernel(self):
|
||||
return MODES[self.mode]['kernel']
|
||||
|
||||
@property
|
||||
def initrd(self):
|
||||
return MODES[self.mode]['initrd']
|
||||
|
||||
@property
|
||||
def dtb(self):
|
||||
return MODES[self.mode]['dtb']
|
||||
|
||||
@property
|
||||
def SCC_0x700(self):
|
||||
return MODES[self.mode]['SCC_0x700']
|
||||
|
||||
@property
|
||||
def SCC_0x010(self):
|
||||
return BOOT_FIRMWARE[self.boot_firmware]['SCC_0x010']
|
||||
|
||||
@property
|
||||
def reboot_attempts(self):
|
||||
return BOOT_FIRMWARE[self.boot_firmware]['reboot_attempts']
|
||||
|
||||
def validate(self):
|
||||
valid_modes = MODES.keys()
|
||||
if self.mode not in valid_modes:
|
||||
message = 'Invalid mode: {}; must be in {}'.format(
|
||||
self.mode, valid_modes)
|
||||
raise ConfigError(message)
|
||||
|
||||
valid_boot_firmware = BOOT_FIRMWARE.keys()
|
||||
if self.boot_firmware not in valid_boot_firmware:
|
||||
message = 'Invalid boot_firmware: {}; must be in {}'.format(
|
||||
self.boot_firmware,
|
||||
valid_boot_firmware)
|
||||
raise ConfigError(message)
|
||||
|
||||
if self.fs_medium not in ['usb', 'sdcard']:
|
||||
message = 'Invalid filesystem medium: {} allowed values : usb, sdcard '.format(self.fs_medium)
|
||||
raise ConfigError(message)
|
||||
|
||||
|
||||
class TC2Device(BigLittleDevice):
|
||||
|
||||
name = 'TC2'
|
||||
description = """
|
||||
TC2 is a development board, which has three A7 cores and two A15 cores.
|
||||
|
||||
TC2 has a number of boot parameters which are:
|
||||
|
||||
:root_mount: Defaults to '/media/VEMSD'
|
||||
:boot_firmware: It has only two boot firmware options, which are
|
||||
uefi and bootmon. Defaults to 'uefi'.
|
||||
:fs_medium: Defaults to 'usb'.
|
||||
:device_working_directory: The direcitory that WA will be using to copy
|
||||
files to. Defaults to 'data/local/usecase'
|
||||
:serial_device: The serial device which TC2 is connected to. Defaults to
|
||||
'/dev/ttyS0'.
|
||||
:serial_baud: Defaults to 38400.
|
||||
:serial_max_timeout: Serial timeout value in seconds. Defaults to 600.
|
||||
:serial_log: Defaults to standard output.
|
||||
:init_timeout: The timeout in seconds to init the device. Defaults set
|
||||
to 30.
|
||||
:always_delete_uefi_entry: If true, it will delete the ufi entry.
|
||||
Defaults to True.
|
||||
:psci_enable: Enabling the psci. Defaults to True.
|
||||
:host_working_directory: The host working directory. Defaults to None.
|
||||
:disable_boot_configuration: Disables boot configuration through images.txt and board.txt. When
|
||||
this is ``True``, those two files will not be overwritten in VEMSD.
|
||||
This option may be necessary if the firmware version in the ``TC2``
|
||||
is not compatible with the templates in WA. Please note that enabling
|
||||
this will prevent you form being able to set ``boot_firmware`` and
|
||||
``mode`` parameters. Defaults to ``False``.
|
||||
|
||||
TC2 can also have a number of different booting mode, which are:
|
||||
|
||||
:mp_a7_only: Only the A7 cluster.
|
||||
:mp_a7_bootcluster: Both A7 and A15 clusters, but it boots on A7
|
||||
cluster.
|
||||
:mp_a15_only: Only the A15 cluster.
|
||||
:mp_a15_bootcluster: Both A7 and A15 clusters, but it boots on A15
|
||||
clusters.
|
||||
:iks_cpu: Only A7 cluster with only 2 cpus.
|
||||
:iks_a15: Only A15 cluster.
|
||||
:iks_a7: Same as iks_cpu
|
||||
:iks_ns_a15: Both A7 and A15 clusters.
|
||||
:iks_ns_a7: Both A7 and A15 clusters.
|
||||
|
||||
The difference between mp and iks is the scheduling policy.
|
||||
|
||||
TC2 takes the following runtime parameters
|
||||
|
||||
:a7_cores: Number of active A7 cores.
|
||||
:a15_cores: Number of active A15 cores.
|
||||
:a7_governor: CPUFreq governor for the A7 cluster.
|
||||
:a15_governor: CPUFreq governor for the A15 cluster.
|
||||
:a7_min_frequency: Minimum CPU frequency for the A7 cluster.
|
||||
:a15_min_frequency: Minimum CPU frequency for the A15 cluster.
|
||||
:a7_max_frequency: Maximum CPU frequency for the A7 cluster.
|
||||
:a15_max_frequency: Maximum CPU frequency for the A7 cluster.
|
||||
:irq_affinity: lambda x: Which cluster will receive IRQs.
|
||||
:cpuidle: Whether idle states should be enabled.
|
||||
:sysfile_values: A dict mapping a complete file path to the value that
|
||||
should be echo'd into it. By default, the file will be
|
||||
subsequently read to verify that the value was written
|
||||
into it with DeviceError raised otherwise. For write-only
|
||||
files, this check can be disabled by appending a ``!`` to
|
||||
the end of the file path.
|
||||
|
||||
"""
|
||||
|
||||
has_gpu = False
|
||||
a15_only_modes = A15_ONLY_MODES
|
||||
a7_only_modes = A7_ONLY_MODES
|
||||
not_configurable_modes = ['iks_a7', 'iks_cpu', 'iks_a15']
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', mandatory=False, override=True,
|
||||
description='This parameter will be ignored for TC2'),
|
||||
Parameter('core_clusters', mandatory=False, override=True,
|
||||
description='This parameter will be ignored for TC2'),
|
||||
]
|
||||
|
||||
runtime_parameters = [
|
||||
RuntimeParameter('irq_affinity', lambda d, x: d.set_irq_affinity(x.lower()), lambda: None),
|
||||
RuntimeParameter('cpuidle', lambda d, x: d.enable_idle_states() if boolean(x) else d.disable_idle_states(),
|
||||
lambda d: d.get_cpuidle())
|
||||
]
|
||||
|
||||
def get_mode(self):
|
||||
return self.config.mode
|
||||
|
||||
def set_mode(self, mode):
|
||||
if self._has_booted:
|
||||
raise DeviceError('Attempting to set boot mode when already booted.')
|
||||
valid_modes = MODES.keys()
|
||||
if mode is None:
|
||||
mode = self.config.default_mode
|
||||
if mode not in valid_modes:
|
||||
message = 'Invalid mode: {}; must be in {}'.format(mode, valid_modes)
|
||||
raise ConfigError(message)
|
||||
self.config.mode = mode
|
||||
|
||||
mode = property(get_mode, set_mode)
|
||||
|
||||
def _get_core_names(self):
|
||||
return MODES[self.mode]['cpus']
|
||||
|
||||
def _set_core_names(self, value):
|
||||
pass
|
||||
|
||||
core_names = property(_get_core_names, _set_core_names)
|
||||
|
||||
def _get_core_clusters(self):
|
||||
seen = set([])
|
||||
core_clusters = []
|
||||
cluster_id = -1
|
||||
for core in MODES[self.mode]['cpus']:
|
||||
if core not in seen:
|
||||
seen.add(core)
|
||||
cluster_id += 1
|
||||
core_clusters.append(cluster_id)
|
||||
return core_clusters
|
||||
|
||||
def _set_core_clusters(self, value):
|
||||
pass
|
||||
|
||||
core_clusters = property(_get_core_clusters, _set_core_clusters)
|
||||
|
||||
@property
|
||||
def cpu_cores(self):
|
||||
return MODES[self.mode]['cpus']
|
||||
|
||||
@property
|
||||
def max_a7_cores(self):
|
||||
return Counter(MODES[self.mode]['cpus'])['a7']
|
||||
|
||||
@property
|
||||
def max_a15_cores(self):
|
||||
return Counter(MODES[self.mode]['cpus'])['a15']
|
||||
|
||||
@property
|
||||
def a7_governor_tunables(self):
|
||||
return self.config.a7_governor_tunables
|
||||
|
||||
@property
|
||||
def a15_governor_tunables(self):
|
||||
return self.config.a15_governor_tunables
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(TC2Device, self).__init__()
|
||||
self.config = _TC2DeviceConfig(**kwargs)
|
||||
self.working_directory = self.config.working_directory
|
||||
self._serial = None
|
||||
self._has_booted = None
|
||||
|
||||
def boot(self, **kwargs): # NOQA
|
||||
mode = kwargs.get('os_mode', None)
|
||||
self._is_ready = False
|
||||
self._has_booted = False
|
||||
|
||||
self.mode = mode
|
||||
self.logger.debug('Booting in {} mode'.format(self.mode))
|
||||
|
||||
with open_serial_connection(timeout=self.config.serial_max_timeout,
|
||||
port=self.config.serial_device,
|
||||
baudrate=self.config.serial_baud) as target:
|
||||
if self.config.boot_firmware == 'bootmon':
|
||||
self._boot_using_bootmon(target)
|
||||
elif self.config.boot_firmware == 'uefi':
|
||||
self._boot_using_uefi(target)
|
||||
else:
|
||||
message = 'Unexpected boot firmware: {}'.format(self.config.boot_firmware)
|
||||
raise ConfigError(message)
|
||||
|
||||
try:
|
||||
target.sendline('')
|
||||
self.logger.debug('Waiting for the Android prompt.')
|
||||
target.expect(self.android_prompt, timeout=40) # pylint: disable=E1101
|
||||
except pexpect.TIMEOUT:
|
||||
# Try a second time before giving up.
|
||||
self.logger.debug('Did not get Android prompt, retrying...')
|
||||
target.sendline('')
|
||||
target.expect(self.android_prompt, timeout=10) # pylint: disable=E1101
|
||||
|
||||
self.logger.debug('Waiting for OS to initialize...')
|
||||
started_waiting_time = time.time()
|
||||
time.sleep(20) # we know it's not going to to take less time than this.
|
||||
boot_completed, got_ip_address = False, False
|
||||
while True:
|
||||
try:
|
||||
if not boot_completed:
|
||||
target.sendline('getprop sys.boot_completed')
|
||||
boot_completed = target.expect(['0.*', '1.*'], timeout=10)
|
||||
if not got_ip_address:
|
||||
target.sendline('getprop dhcp.eth0.ipaddress')
|
||||
# regexes are processed in order, so ip regex has to
|
||||
# come first (as we only want to match new line if we
|
||||
# don't match the IP). We do a "not" make the logic
|
||||
# consistent with boot_completed.
|
||||
got_ip_address = not target.expect(['[1-9]\d*.\d+.\d+.\d+', '\n'], timeout=10)
|
||||
except pexpect.TIMEOUT:
|
||||
pass # We have our own timeout -- see below.
|
||||
if boot_completed and got_ip_address:
|
||||
break
|
||||
time.sleep(5)
|
||||
if (time.time() - started_waiting_time) > self.config.init_timeout:
|
||||
raise DeviceError('Timed out waiting for the device to initialize.')
|
||||
|
||||
self._has_booted = True
|
||||
|
||||
def connect(self):
|
||||
if not self._is_ready:
|
||||
if self.config.adb_name:
|
||||
self.adb_name = self.config.adb_name # pylint: disable=attribute-defined-outside-init
|
||||
else:
|
||||
with open_serial_connection(timeout=self.config.serial_max_timeout,
|
||||
port=self.config.serial_device,
|
||||
baudrate=self.config.serial_baud) as target:
|
||||
# Get IP address and push the Gator and PMU logger.
|
||||
target.sendline('su') # as of Android v5.0.2, Linux does not boot into root shell
|
||||
target.sendline('netcfg')
|
||||
ipaddr_re = re.compile('eth0 +UP +(.+)/.+', re.MULTILINE)
|
||||
target.expect(ipaddr_re)
|
||||
output = target.after
|
||||
match = re.search('eth0 +UP +(.+)/.+', output)
|
||||
if not match:
|
||||
raise DeviceError('Could not get adb IP address.')
|
||||
ipaddr = match.group(1)
|
||||
|
||||
# Connect to device using adb.
|
||||
target.expect(self.android_prompt) # pylint: disable=E1101
|
||||
self.adb_name = ipaddr + ":5555" # pylint: disable=W0201
|
||||
|
||||
if self.adb_name in adb_list_devices():
|
||||
adb_disconnect(self.adb_name)
|
||||
adb_connect(self.adb_name)
|
||||
self._is_ready = True
|
||||
self.execute("input keyevent 82", timeout=ADB_SHELL_TIMEOUT)
|
||||
self.execute("svc power stayon true", timeout=ADB_SHELL_TIMEOUT)
|
||||
|
||||
def disconnect(self):
|
||||
adb_disconnect(self.adb_name)
|
||||
self._is_ready = False
|
||||
|
||||
# TC2-specific methods. You should avoid calling these in
|
||||
# Workloads/Instruments as that would tie them to TC2 (and if that is
|
||||
# the case, then you should set the supported_devices parameter in the
|
||||
# Workload/Instrument accordingly). Most of these can be replace with a
|
||||
# call to set_runtime_parameters.
|
||||
|
||||
def get_cpuidle(self):
|
||||
return self.get_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable')
|
||||
|
||||
def enable_idle_states(self):
|
||||
"""
|
||||
Fully enables idle states on TC2.
|
||||
See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section)
|
||||
and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels
|
||||
|
||||
"""
|
||||
# Enable C1 (cluster shutdown).
|
||||
self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 0, verify=False)
|
||||
# Enable C0 on A15 cluster.
|
||||
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0, verify=False)
|
||||
# Enable C0 on A7 cluster.
|
||||
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 1, verify=False)
|
||||
|
||||
def disable_idle_states(self):
|
||||
"""
|
||||
Disable idle states on TC2.
|
||||
See http://wiki.arm.com/Research/TC2SetupAndUsage ("Enabling Idle Modes" section)
|
||||
and http://wiki.arm.com/ASD/ControllingPowerManagementInLinaroKernels
|
||||
|
||||
"""
|
||||
# Disable C1 (cluster shutdown).
|
||||
self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 1, verify=False)
|
||||
# Disable C0.
|
||||
self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0xFF, verify=False)
|
||||
|
||||
def set_irq_affinity(self, cluster):
|
||||
"""
|
||||
Set's IRQ affinity to the specified cluster.
|
||||
|
||||
This method will only work if the device mode is mp_a7_bootcluster or
|
||||
mp_a15_bootcluster. This operation does not make sense if there is only one
|
||||
cluster active (all IRQs will obviously go to that), and it will not work for
|
||||
IKS kernel because clusters are not exposed to sysfs.
|
||||
|
||||
:param cluster: must be either 'a15' or 'a7'.
|
||||
|
||||
"""
|
||||
if self.config.mode not in ('mp_a7_bootcluster', 'mp_a15_bootcluster'):
|
||||
raise ConfigError('Cannot set IRQ affinity with mode {}'.format(self.config.mode))
|
||||
if cluster == 'a7':
|
||||
self.execute('/sbin/set_irq_affinity.sh 0xc07', check_exit_code=False)
|
||||
elif cluster == 'a15':
|
||||
self.execute('/sbin/set_irq_affinity.sh 0xc0f', check_exit_code=False)
|
||||
else:
|
||||
raise ConfigError('cluster must either "a15" or "a7"; got {}'.format(cluster))
|
||||
|
||||
def _boot_using_uefi(self, target):
|
||||
self.logger.debug('Booting using UEFI.')
|
||||
self._wait_for_vemsd_mount(target)
|
||||
self._setup_before_reboot()
|
||||
self._perform_uefi_reboot(target)
|
||||
|
||||
# Get to the UEFI menu.
|
||||
self.logger.debug('Waiting for UEFI default selection.')
|
||||
target.sendline('reboot')
|
||||
target.expect('The default boot selection will start in'.rstrip())
|
||||
time.sleep(1)
|
||||
target.sendline(''.rstrip())
|
||||
|
||||
# If delete every time is specified, try to delete entry.
|
||||
if self.config.always_delete_uefi_entry:
|
||||
self._delete_uefi_entry(target, entry='workload_automation_MP')
|
||||
self.config.always_delete_uefi_entry = False
|
||||
|
||||
# Specify argument to be passed specifying that psci is (or is not) enabled
|
||||
if self.config.psci_enable:
|
||||
psci_enable = ' psci=enable'
|
||||
else:
|
||||
psci_enable = ''
|
||||
|
||||
# Identify the workload automation entry.
|
||||
selection_pattern = r'\[([0-9]*)\] '
|
||||
|
||||
try:
|
||||
target.expect(re.compile(selection_pattern + 'workload_automation_MP'), timeout=5)
|
||||
wl_menu_item = target.match.group(1)
|
||||
except pexpect.TIMEOUT:
|
||||
self._create_uefi_entry(target, psci_enable, entry_name='workload_automation_MP')
|
||||
# At this point the board should be rebooted so we need to retry to boot
|
||||
self._boot_using_uefi(target)
|
||||
else: # Did not time out.
|
||||
try:
|
||||
#Identify the boot manager menu item
|
||||
target.expect(re.compile(selection_pattern + 'Boot Manager'))
|
||||
boot_manager_menu_item = target.match.group(1)
|
||||
|
||||
#Update FDT
|
||||
target.sendline(boot_manager_menu_item)
|
||||
target.expect(re.compile(selection_pattern + 'Update FDT path'), timeout=15)
|
||||
update_fdt_menu_item = target.match.group(1)
|
||||
target.sendline(update_fdt_menu_item)
|
||||
target.expect(re.compile(selection_pattern + 'NOR Flash .*'), timeout=15)
|
||||
bootmonfs_menu_item = target.match.group(1)
|
||||
target.sendline(bootmonfs_menu_item)
|
||||
target.expect('File path of the FDT blob:')
|
||||
target.sendline(self.config.dtb)
|
||||
|
||||
#Return to main manu and boot from wl automation
|
||||
target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15)
|
||||
return_to_main_menu_item = target.match.group(1)
|
||||
target.sendline(return_to_main_menu_item)
|
||||
target.sendline(wl_menu_item)
|
||||
except pexpect.TIMEOUT:
|
||||
raise DeviceError('Timed out')
|
||||
|
||||
def _setup_before_reboot(self):
|
||||
if not self.config.disable_boot_configuration:
|
||||
self.logger.debug('Performing pre-boot setup.')
|
||||
substitution = {
|
||||
'SCC_0x010': self.config.SCC_0x010,
|
||||
'SCC_0x700': self.config.SCC_0x700,
|
||||
}
|
||||
with open(self.config.src_board_template_file, 'r') as fh:
|
||||
template_board_txt = string.Template(fh.read())
|
||||
with open(self.config.src_board_file, 'w') as wfh:
|
||||
wfh.write(template_board_txt.substitute(substitution))
|
||||
|
||||
with open(self.config.src_images_template_file, 'r') as fh:
|
||||
template_images_txt = string.Template(fh.read())
|
||||
with open(self.config.src_images_file, 'w') as wfh:
|
||||
wfh.write(template_images_txt.substitute({'bm_image': self.config.bm_image}))
|
||||
|
||||
shutil.copyfile(self.config.src_board_file,
|
||||
os.path.join(self.config.board_dir, self.config.board_file))
|
||||
shutil.copyfile(self.config.src_images_file,
|
||||
os.path.join(self.config.board_dir, self.config.images_file))
|
||||
os.system('sync') # make sure everything is flushed to microSD
|
||||
else:
|
||||
self.logger.debug('Boot configuration disabled proceeding with existing board.txt and images.txt.')
|
||||
|
||||
def _delete_uefi_entry(self, target, entry): # pylint: disable=R0201
|
||||
"""
|
||||
this method deletes the entry specified as parameter
|
||||
as a precondition serial port input needs to be parsed AT MOST up to
|
||||
the point BEFORE recognizing this entry (both entry and boot manager has
|
||||
not yet been parsed)
|
||||
|
||||
"""
|
||||
try:
|
||||
selection_pattern = r'\[([0-9]+)\] *'
|
||||
|
||||
try:
|
||||
target.expect(re.compile(selection_pattern + entry), timeout=5)
|
||||
wl_menu_item = target.match.group(1)
|
||||
except pexpect.TIMEOUT:
|
||||
return # Entry does not exist, nothing to delete here...
|
||||
|
||||
# Identify and select boot manager menu item
|
||||
target.expect(selection_pattern + 'Boot Manager', timeout=15)
|
||||
bootmanager_item = target.match.group(1)
|
||||
target.sendline(bootmanager_item)
|
||||
|
||||
# Identify and select 'Remove entry'
|
||||
target.expect(selection_pattern + 'Remove Boot Device Entry', timeout=15)
|
||||
new_entry_item = target.match.group(1)
|
||||
target.sendline(new_entry_item)
|
||||
|
||||
# Delete entry
|
||||
target.expect(re.compile(selection_pattern + entry), timeout=5)
|
||||
wl_menu_item = target.match.group(1)
|
||||
target.sendline(wl_menu_item)
|
||||
|
||||
# Return to main manu
|
||||
target.expect(re.compile(selection_pattern + 'Return to main menu'), timeout=15)
|
||||
return_to_main_menu_item = target.match.group(1)
|
||||
target.sendline(return_to_main_menu_item)
|
||||
except pexpect.TIMEOUT:
|
||||
raise DeviceError('Timed out while deleting UEFI entry.')
|
||||
|
||||
def _create_uefi_entry(self, target, psci_enable, entry_name):
|
||||
"""
|
||||
Creates the default boot entry that is expected when booting in uefi mode.
|
||||
|
||||
"""
|
||||
self._wait_for_vemsd_mount(target)
|
||||
try:
|
||||
selection_pattern = '\[([0-9]+)\] *'
|
||||
|
||||
# Identify and select boot manager menu item.
|
||||
target.expect(selection_pattern + 'Boot Manager', timeout=15)
|
||||
bootmanager_item = target.match.group(1)
|
||||
target.sendline(bootmanager_item)
|
||||
|
||||
# Identify and select 'add new entry'.
|
||||
target.expect(selection_pattern + 'Add Boot Device Entry', timeout=15)
|
||||
new_entry_item = target.match.group(1)
|
||||
target.sendline(new_entry_item)
|
||||
|
||||
# Identify and select BootMonFs.
|
||||
target.expect(selection_pattern + 'NOR Flash .*', timeout=15)
|
||||
BootMonFs_item = target.match.group(1)
|
||||
target.sendline(BootMonFs_item)
|
||||
|
||||
# Specify the parameters of the new entry.
|
||||
target.expect('.+the kernel', timeout=5)
|
||||
target.sendline(self.config.kernel) # kernel path
|
||||
target.expect('Has FDT support\?.*\[y\/n\].*', timeout=5)
|
||||
time.sleep(0.5)
|
||||
target.sendline('y') # Has Fdt support? -> y
|
||||
target.expect('Add an initrd.*\[y\/n\].*', timeout=5)
|
||||
time.sleep(0.5)
|
||||
target.sendline('y') # add an initrd? -> y
|
||||
target.expect('.+the initrd.*', timeout=5)
|
||||
time.sleep(0.5)
|
||||
target.sendline(self.config.initrd) # initrd path
|
||||
target.expect('.+to the binary.*', timeout=5)
|
||||
time.sleep(0.5)
|
||||
_slow_sendline(target, self.config.kernel_arguments + psci_enable) # arguments to pass to binary
|
||||
time.sleep(0.5)
|
||||
target.expect('.+new Entry.+', timeout=5)
|
||||
_slow_sendline(target, entry_name) # Entry name
|
||||
target.expect('Choice.+', timeout=15)
|
||||
time.sleep(2)
|
||||
except pexpect.TIMEOUT:
|
||||
raise DeviceError('Timed out while creating UEFI entry.')
|
||||
self._perform_uefi_reboot(target)
|
||||
|
||||
def _perform_uefi_reboot(self, target):
|
||||
self._wait_for_vemsd_mount(target)
|
||||
open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close()
|
||||
|
||||
def _wait_for_vemsd_mount(self, target, timeout=100):
|
||||
attempts = 1 + self.config.reboot_attempts
|
||||
if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')):
|
||||
return
|
||||
|
||||
self.logger.debug('Waiting for VEMSD to mount...')
|
||||
for i in xrange(attempts):
|
||||
if i: # Do not reboot on the first attempt.
|
||||
target.sendline('reboot')
|
||||
target.sendline('usb_on')
|
||||
for _ in xrange(timeout):
|
||||
time.sleep(1)
|
||||
if os.path.exists(os.path.join(self.config.root_mount, 'config.txt')):
|
||||
return
|
||||
|
||||
raise DeviceError('Timed out waiting for VEMSD to mount.')
|
||||
|
||||
def _boot_using_bootmon(self, target):
|
||||
"""
|
||||
This method Boots TC2 using the bootmon interface.
|
||||
"""
|
||||
self.logger.debug('Booting using bootmon.')
|
||||
|
||||
try:
|
||||
self._wait_for_vemsd_mount(target, timeout=20)
|
||||
except DeviceError:
|
||||
# OK, something's wrong. Reboot the board and try again.
|
||||
self.logger.debug('VEMSD not mounted, attempting to power cycle device.')
|
||||
target.sendline(' ')
|
||||
state = target.expect(['Cmd> ', self.config.bootmon_prompt, self.android_prompt]) # pylint: disable=E1101
|
||||
|
||||
if state == 0 or state == 1:
|
||||
# Reboot - Bootmon
|
||||
target.sendline('reboot')
|
||||
target.expect('Powering up system...')
|
||||
elif state == 2:
|
||||
target.sendline('reboot -n')
|
||||
target.expect('Powering up system...')
|
||||
else:
|
||||
raise DeviceError('Unexpected board state {}; should be 0, 1 or 2'.format(state))
|
||||
|
||||
self._wait_for_vemsd_mount(target)
|
||||
|
||||
self._setup_before_reboot()
|
||||
|
||||
# Reboot - Bootmon
|
||||
self.logger.debug('Rebooting into bootloader...')
|
||||
open(os.path.join(self.config.root_mount, 'reboot.txt'), 'a').close()
|
||||
target.expect('Powering up system...')
|
||||
target.expect(self.config.bootmon_prompt)
|
||||
|
||||
# Wait for VEMSD to mount
|
||||
self._wait_for_vemsd_mount(target)
|
||||
|
||||
#Boot Linux - Bootmon
|
||||
target.sendline('fl linux fdt ' + self.config.dtb)
|
||||
target.expect(self.config.bootmon_prompt)
|
||||
target.sendline('fl linux initrd ' + self.config.initrd)
|
||||
target.expect(self.config.bootmon_prompt)
|
||||
#Workaround TC2 bootmon serial issue for loading large initrd blob
|
||||
target.sendline(' ')
|
||||
target.expect(self.config.bootmon_prompt)
|
||||
target.sendline('fl linux boot ' + self.config.kernel + self.config.kernel_arguments)
|
||||
|
||||
|
||||
# Utility functions.
|
||||
|
||||
def _slow_sendline(target, line):
|
||||
for c in line:
|
||||
target.send(c)
|
||||
time.sleep(0.1)
|
||||
target.sendline('')
|
||||
|
@@ -1,96 +0,0 @@
|
||||
BOARD: HBI0249
|
||||
TITLE: V2P-CA15_A7 Configuration File
|
||||
|
||||
[DCCS]
|
||||
TOTALDCCS: 1 ;Total Number of DCCS
|
||||
M0FILE: dbb_v110.ebf ;DCC0 Filename
|
||||
M0MODE: MICRO ;DCC0 Programming Mode
|
||||
|
||||
[FPGAS]
|
||||
TOTALFPGAS: 0 ;Total Number of FPGAs
|
||||
|
||||
[TAPS]
|
||||
TOTALTAPS: 3 ;Total Number of TAPs
|
||||
T0NAME: STM32TMC ;TAP0 Device Name
|
||||
T0FILE: NONE ;TAP0 Filename
|
||||
T0MODE: NONE ;TAP0 Programming Mode
|
||||
T1NAME: STM32CM3 ;TAP1 Device Name
|
||||
T1FILE: NONE ;TAP1 Filename
|
||||
T1MODE: NONE ;TAP1 Programming Mode
|
||||
T2NAME: CORTEXA15 ;TAP2 Device Name
|
||||
T2FILE: NONE ;TAP2 Filename
|
||||
T2MODE: NONE ;TAP2 Programming Mode
|
||||
|
||||
[OSCCLKS]
|
||||
TOTALOSCCLKS: 9 ;Total Number of OSCCLKS
|
||||
OSC0: 50.0 ;CPUREFCLK0 A15 CPU (20:1 - 1.0GHz)
|
||||
OSC1: 50.0 ;CPUREFCLK1 A15 CPU (20:1 - 1.0GHz)
|
||||
OSC2: 40.0 ;CPUREFCLK0 A7 CPU (20:1 - 800MHz)
|
||||
OSC3: 40.0 ;CPUREFCLK1 A7 CPU (20:1 - 800MHz)
|
||||
OSC4: 40.0 ;HSBM AXI (40MHz)
|
||||
OSC5: 23.75 ;HDLCD (23.75MHz - TC PLL is in bypass)
|
||||
OSC6: 50.0 ;SMB (50MHz)
|
||||
OSC7: 50.0 ;SYSREFCLK (20:1 - 1.0GHz, ACLK - 500MHz)
|
||||
OSC8: 50.0 ;DDR2 (8:1 - 400MHz)
|
||||
|
||||
[SCC REGISTERS]
|
||||
TOTALSCCS: 33 ;Total Number of SCC registers
|
||||
|
||||
;SCC: 0x010 0x000003D0 ;Remap to NOR0
|
||||
SCC: 0x010 $SCC_0x010 ;Switch between NOR0/NOR1
|
||||
SCC: 0x01C 0xFF00FF00 ;CFGRW3 - SMC CS6/7 N/U
|
||||
SCC: 0x118 0x01CD1011 ;CFGRW17 - HDLCD PLL external bypass
|
||||
;SCC: 0x700 0x00320003 ;CFGRW48 - [25:24]Boot CPU [28]Boot Cluster (default CA7_0)
|
||||
SCC: 0x700 $SCC_0x700 ;CFGRW48 - [25:24]Boot CPU [28]Boot Cluster (default CA7_0)
|
||||
; Bootmon configuration:
|
||||
; [15]: A7 Event stream generation (default: disabled)
|
||||
; [14]: A15 Event stream generation (default: disabled)
|
||||
; [13]: Power down the non-boot cluster (default: disabled)
|
||||
; [12]: Use per-cpu mailboxes for power management (default: disabled)
|
||||
; [11]: A15 executes WFEs as nops (default: disabled)
|
||||
|
||||
SCC: 0x400 0x33330c00 ;CFGREG41 - A15 configuration register 0 (Default 0x33330c80)
|
||||
; [29:28] SPNIDEN
|
||||
; [25:24] SPIDEN
|
||||
; [21:20] NIDEN
|
||||
; [17:16] DBGEN
|
||||
; [13:12] CFGTE
|
||||
; [9:8] VINITHI_CORE
|
||||
; [7] IMINLN
|
||||
; [3:0] CLUSTER_ID
|
||||
|
||||
;Set the CPU clock PLLs
|
||||
SCC: 0x120 0x022F1010 ;CFGRW19 - CA15_0 PLL control - 20:1 (lock OFF)
|
||||
SCC: 0x124 0x0011710D ;CFGRW20 - CA15_0 PLL value
|
||||
SCC: 0x128 0x022F1010 ;CFGRW21 - CA15_1 PLL control - 20:1 (lock OFF)
|
||||
SCC: 0x12C 0x0011710D ;CFGRW22 - CA15_1 PLL value
|
||||
SCC: 0x130 0x022F1010 ;CFGRW23 - CA7_0 PLL control - 20:1 (lock OFF)
|
||||
SCC: 0x134 0x0011710D ;CFGRW24 - CA7_0 PLL value
|
||||
SCC: 0x138 0x022F1010 ;CFGRW25 - CA7_1 PLL control - 20:1 (lock OFF)
|
||||
SCC: 0x13C 0x0011710D ;CFGRW26 - CA7_1 PLL value
|
||||
|
||||
;Power management interface
|
||||
SCC: 0xC00 0x00000005 ;Control: [0]PMI_EN [1]DBG_EN [2]SPC_SYSCFG
|
||||
SCC: 0xC04 0x060E0356 ;Latency in uS max: [15:0]DVFS [31:16]PWRUP
|
||||
SCC: 0xC08 0x00000000 ;Reserved
|
||||
SCC: 0xC0C 0x00000000 ;Reserved
|
||||
|
||||
;CA15 performance values: 0xVVVFFFFF
|
||||
SCC: 0xC10 0x384061A8 ;CA15 PERFVAL0, 900mV, 20,000*20= 500MHz
|
||||
SCC: 0xC14 0x38407530 ;CA15 PERFVAL1, 900mV, 25,000*20= 600MHz
|
||||
SCC: 0xC18 0x384088B8 ;CA15 PERFVAL2, 900mV, 30,000*20= 700MHz
|
||||
SCC: 0xC1C 0x38409C40 ;CA15 PERFVAL3, 900mV, 35,000*20= 800MHz
|
||||
SCC: 0xC20 0x3840AFC8 ;CA15 PERFVAL4, 900mV, 40,000*20= 900MHz
|
||||
SCC: 0xC24 0x3840C350 ;CA15 PERFVAL5, 900mV, 45,000*20=1000MHz
|
||||
SCC: 0xC28 0x3CF0D6D8 ;CA15 PERFVAL6, 975mV, 50,000*20=1100MHz
|
||||
SCC: 0xC2C 0x41A0EA60 ;CA15 PERFVAL7, 1050mV, 55,000*20=1200MHz
|
||||
|
||||
;CA7 performance values: 0xVVVFFFFF
|
||||
SCC: 0xC30 0x3840445C ;CA7 PERFVAL0, 900mV, 10,000*20= 350MHz
|
||||
SCC: 0xC34 0x38404E20 ;CA7 PERFVAL1, 900mV, 15,000*20= 400MHz
|
||||
SCC: 0xC38 0x384061A8 ;CA7 PERFVAL2, 900mV, 20,000*20= 500MHz
|
||||
SCC: 0xC3C 0x38407530 ;CA7 PERFVAL3, 900mV, 25,000*20= 600MHz
|
||||
SCC: 0xC40 0x384088B8 ;CA7 PERFVAL4, 900mV, 30,000*20= 700MHz
|
||||
SCC: 0xC44 0x38409C40 ;CA7 PERFVAL5, 900mV, 35,000*20= 800MHz
|
||||
SCC: 0xC48 0x3CF0AFC8 ;CA7 PERFVAL6, 975mV, 40,000*20= 900MHz
|
||||
SCC: 0xC4C 0x41A0C350 ;CA7 PERFVAL7, 1050mV, 45,000*20=1000MHz
|
@@ -1,25 +0,0 @@
|
||||
TITLE: Versatile Express Images Configuration File
|
||||
|
||||
[IMAGES]
|
||||
TOTALIMAGES: 4 ;Number of Images (Max : 32)
|
||||
NOR0UPDATE: AUTO ;Image Update:NONE/AUTO/FORCE
|
||||
NOR0ADDRESS: BOOT ;Image Flash Address
|
||||
NOR0FILE: \SOFTWARE\$bm_image ;Image File Name
|
||||
|
||||
NOR1UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR1ADDRESS: 0x00000000 ;Image Flash Address
|
||||
NOR1FILE: \SOFTWARE\kern_iks.bin ;Image File Name
|
||||
NOR1LOAD: 0x80008000
|
||||
NOR1ENTRY: 0x80008000
|
||||
|
||||
NOR2UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR2ADDRESS: 0x00000000 ;Image Flash Address
|
||||
NOR2FILE: \SOFTWARE\iks.dtb ;Image File Name for booting in A7 cluster
|
||||
NOR2LOAD: 0x84000000
|
||||
NOR2ENTRY: 0x84000000
|
||||
|
||||
NOR3UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR3ADDRESS: 0x00000000 ;Image Flash Address
|
||||
NOR3FILE: \SOFTWARE\init_iks.bin ;Image File Name
|
||||
NOR3LOAD: 0x90100000
|
||||
NOR3ENTRY: 0x90100000
|
@@ -1,55 +0,0 @@
|
||||
TITLE: Versatile Express Images Configuration File
|
||||
[IMAGES]
|
||||
TOTALIMAGES: 9 ;Number of Images (Max: 32)
|
||||
NOR0UPDATE: AUTO ;Image Update:NONE/AUTO/FORCE
|
||||
NOR0ADDRESS: BOOT ;Image Flash Address
|
||||
NOR0FILE: \SOFTWARE\$bm_image ;Image File Name
|
||||
|
||||
NOR1UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR1ADDRESS: 0x0E000000 ;Image Flash Address
|
||||
NOR1FILE: \SOFTWARE\kern_mp.bin ;Image File Name
|
||||
NOR1LOAD: 0x80008000
|
||||
NOR1ENTRY: 0x80008000
|
||||
|
||||
NOR2UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR2ADDRESS: 0x0E800000 ;Image Flash Address
|
||||
NOR2FILE: \SOFTWARE\mp_a7.dtb ;Image File Name for booting in A7 cluster
|
||||
NOR2LOAD: 0x84000000
|
||||
NOR2ENTRY: 0x84000000
|
||||
|
||||
NOR3UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR3ADDRESS: 0x0E900000 ;Image Flash Address
|
||||
NOR3FILE: \SOFTWARE\mp_a15.dtb ;Image File Name
|
||||
NOR3LOAD: 0x84000000
|
||||
NOR3ENTRY: 0x84000000
|
||||
|
||||
NOR4UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR4ADDRESS: 0x0EA00000 ;Image Flash Address
|
||||
NOR4FILE: \SOFTWARE\mp_a7bc.dtb ;Image File Name
|
||||
NOR4LOAD: 0x84000000
|
||||
NOR4ENTRY: 0x84000000
|
||||
|
||||
NOR5UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR5ADDRESS: 0x0EB00000 ;Image Flash Address
|
||||
NOR5FILE: \SOFTWARE\mp_a15bc.dtb ;Image File Name
|
||||
NOR5LOAD: 0x84000000
|
||||
NOR5ENTRY: 0x84000000
|
||||
|
||||
NOR6UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR6ADDRESS: 0x0EC00000 ;Image Flash Address
|
||||
NOR6FILE: \SOFTWARE\init_mp.bin ;Image File Name
|
||||
NOR6LOAD: 0x85000000
|
||||
NOR6ENTRY: 0x85000000
|
||||
|
||||
NOR7UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR7ADDRESS: 0x0C000000 ;Image Flash Address
|
||||
NOR7FILE: \SOFTWARE\tc2_sec.bin ;Image File Name
|
||||
NOR7LOAD: 0
|
||||
NOR7ENTRY: 0
|
||||
|
||||
NOR8UPDATE: AUTO ;IMAGE UPDATE:NONE/AUTO/FORCE
|
||||
NOR8ADDRESS: 0x0D000000 ;Image Flash Address
|
||||
NOR8FILE: \SOFTWARE\tc2_uefi.bin ;Image File Name
|
||||
NOR8LOAD: 0
|
||||
NOR8ENTRY: 0
|
||||
|
@@ -1,36 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wlauto import LinuxDevice, Parameter
|
||||
|
||||
|
||||
class Xe503c12Chormebook(LinuxDevice):
|
||||
|
||||
name = "XE503C12"
|
||||
description = 'A developer-unlocked Samsung XE503C12 running sshd.'
|
||||
platform = 'chromeos'
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=['a15', 'a15', 'a15', 'a15'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0, 0, 0], override=True),
|
||||
Parameter('username', default='chronos', override=True),
|
||||
Parameter('password', default='', override=True),
|
||||
Parameter('password_prompt', default='Password:', override=True),
|
||||
Parameter('binaries_directory', default='/home/chronos/bin', override=True),
|
||||
]
|
||||
|
||||
abi = 'armeabi'
|
||||
|
@@ -1,16 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
@@ -1,100 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import re
|
||||
|
||||
from wlauto import LinuxDevice, Parameter
|
||||
from wlauto.exceptions import DeviceError
|
||||
from wlauto.core.device import RuntimeParameter
|
||||
from wlauto.utils.misc import convert_new_lines
|
||||
from wlauto.utils.types import boolean
|
||||
|
||||
|
||||
class ChromeOsDevice(LinuxDevice):
|
||||
|
||||
name = "chromeos_test_image"
|
||||
description = """
|
||||
Chrome OS test image device. Use this if you are working on a Chrome OS device with a test
|
||||
image. An off the shelf device will not work with this device interface.
|
||||
|
||||
More information on how to build a Chrome OS test image can be found here:
|
||||
|
||||
https://www.chromium.org/chromium-os/developer-guide#TOC-Build-a-disk-image-for-your-board
|
||||
|
||||
"""
|
||||
|
||||
platform = 'chromeos'
|
||||
abi = 'armeabi'
|
||||
has_gpu = True
|
||||
default_timeout = 100
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=[], override=True),
|
||||
Parameter('core_clusters', default=[], override=True),
|
||||
Parameter('username', default='root', override=True),
|
||||
Parameter('password_prompt', default='Password:', override=True),
|
||||
Parameter('binaries_directory', default='/usr/local/bin', override=True),
|
||||
Parameter('working_directory', default='/home/root/wa-working', override=True),
|
||||
]
|
||||
|
||||
runtime_parameters = [
|
||||
RuntimeParameter('ui', 'get_ui_status', 'set_ui_status', value_name='status'),
|
||||
]
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(ChromeOsDevice, self).__init__(**kwargs)
|
||||
self.ui_status = None
|
||||
|
||||
def validate(self):
|
||||
# pylint: disable=access-member-before-definition,attribute-defined-outside-init
|
||||
if self.password is None and not self.keyfile:
|
||||
self.password = 'test0000'
|
||||
|
||||
def initialize(self, context, *args, **kwargs):
|
||||
if self.busybox == 'busybox':
|
||||
self.logger.debug('Busybox already installed on the device: replacing with wa version')
|
||||
self.uninstall('busybox')
|
||||
self.busybox = self.deploy_busybox(context)
|
||||
|
||||
def get_ui_status(self):
|
||||
return self.ui_status
|
||||
|
||||
def set_ui_status(self, status):
|
||||
self.ui_status = boolean(status)
|
||||
if self.ui_status is None:
|
||||
pass
|
||||
elif self.ui_status:
|
||||
try:
|
||||
self.execute('start ui')
|
||||
except DeviceError:
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
self.execute('stop ui')
|
||||
except DeviceError:
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
if self.ui_status is None:
|
||||
pass
|
||||
elif not self.ui_status:
|
||||
try:
|
||||
self.execute('start ui')
|
||||
except DeviceError:
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
self.ui_status = None
|
||||
|
@@ -1,120 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Original implementation by Rene de Jong. Updated by Sascha Bischoff.
|
||||
|
||||
import logging
|
||||
|
||||
from wlauto import LinuxDevice, Parameter
|
||||
from wlauto.common.gem5.device import BaseGem5Device
|
||||
from wlauto.utils import types
|
||||
|
||||
|
||||
class Gem5LinuxDevice(BaseGem5Device, LinuxDevice):
|
||||
"""
|
||||
Implements gem5 Linux device.
|
||||
|
||||
This class allows a user to connect WA to a simulation using gem5. The
|
||||
connection to the device is made using the telnet connection of the
|
||||
simulator, and is used for all commands. The simulator does not have ADB
|
||||
support, and therefore we need to fall back to using standard shell
|
||||
commands.
|
||||
|
||||
Files are copied into the simulation using a VirtIO 9P device in gem5. Files
|
||||
are copied out of the simulated environment using the m5 writefile command
|
||||
within the simulated system.
|
||||
|
||||
When starting the workload run, the simulator is automatically started by
|
||||
Workload Automation, and a connection to the simulator is established. WA
|
||||
will then wait for Android to boot on the simulated system (which can take
|
||||
hours), prior to executing any other commands on the device. It is also
|
||||
possible to resume from a checkpoint when starting the simulation. To do
|
||||
this, please append the relevant checkpoint commands from the gem5
|
||||
simulation script to the gem5_discription argument in the agenda.
|
||||
|
||||
Host system requirements:
|
||||
* VirtIO support. We rely on diod on the host system. This can be
|
||||
installed on ubuntu using the following command:
|
||||
|
||||
sudo apt-get install diod
|
||||
|
||||
Guest requirements:
|
||||
* VirtIO support. We rely on VirtIO to move files into the simulation.
|
||||
Please make sure that the following are set in the kernel
|
||||
configuration:
|
||||
|
||||
CONFIG_NET_9P=y
|
||||
|
||||
CONFIG_NET_9P_VIRTIO=y
|
||||
|
||||
CONFIG_9P_FS=y
|
||||
|
||||
CONFIG_9P_FS_POSIX_ACL=y
|
||||
|
||||
CONFIG_9P_FS_SECURITY=y
|
||||
|
||||
CONFIG_VIRTIO_BLK=y
|
||||
|
||||
* m5 binary. Please make sure that the m5 binary is on the device and
|
||||
can by found in the path.
|
||||
"""
|
||||
|
||||
name = 'gem5_linux'
|
||||
platform = 'linux'
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=[], override=True),
|
||||
Parameter('core_clusters', default=[], override=True),
|
||||
Parameter('host', default='localhost', override=True,
|
||||
description='Host name or IP address for the device.'),
|
||||
Parameter('login_prompt', kind=types.list_of_strs,
|
||||
default=['login:', 'AEL login:', 'username:'],
|
||||
mandatory=False),
|
||||
Parameter('login_password_prompt', kind=types.list_of_strs,
|
||||
default=['password:'], mandatory=False),
|
||||
]
|
||||
|
||||
# Overwritten from Device. For documentation, see corresponding method in
|
||||
# Device.
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.logger = logging.getLogger('Gem5LinuxDevice')
|
||||
LinuxDevice.__init__(self, **kwargs)
|
||||
BaseGem5Device.__init__(self)
|
||||
|
||||
def login_to_device(self):
|
||||
# Wait for the login prompt
|
||||
prompt = self.login_prompt + [self.sckt.UNIQUE_PROMPT]
|
||||
i = self.sckt.expect(prompt, timeout=10)
|
||||
# Check if we are already at a prompt, or if we need to log in.
|
||||
if i < len(prompt) - 1:
|
||||
self.sckt.sendline("{}".format(self.username))
|
||||
password_prompt = self.login_password_prompt + [r'# ', self.sckt.UNIQUE_PROMPT]
|
||||
j = self.sckt.expect(password_prompt, timeout=self.delay)
|
||||
if j < len(password_prompt) - 2:
|
||||
self.sckt.sendline("{}".format(self.password))
|
||||
self.sckt.expect([r'# ', self.sckt.UNIQUE_PROMPT], timeout=self.delay)
|
||||
|
||||
def capture_screen(self, filepath):
|
||||
if BaseGem5Device.capture_screen(self, filepath):
|
||||
return
|
||||
|
||||
# If we didn't manage to do the above, call the parent class.
|
||||
self.logger.warning("capture_screen: falling back to parent class implementation")
|
||||
LinuxDevice.capture_screen(self, filepath)
|
||||
|
||||
def initialize(self, context):
|
||||
self.resize_shell()
|
||||
self.deploy_m5(context, force=False)
|
@@ -1,37 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wlauto import LinuxDevice, Parameter
|
||||
|
||||
|
||||
class GenericDevice(LinuxDevice):
|
||||
name = 'generic_linux'
|
||||
description = """
|
||||
A generic Linux device interface. Use this if you do not have an interface
|
||||
for your device.
|
||||
|
||||
This should allow basic WA functionality on most Linux devices with SSH access
|
||||
configured. Some additional configuration may be required for some WA extensions
|
||||
(e.g. configuring ``core_names`` and ``core_clusters``).
|
||||
|
||||
"""
|
||||
|
||||
has_gpu = True
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=[], override=True),
|
||||
Parameter('core_clusters', default=[], override=True),
|
||||
]
|
@@ -1,35 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wlauto import LinuxDevice, Parameter
|
||||
|
||||
|
||||
class OdroidXU3LinuxDevice(LinuxDevice):
|
||||
|
||||
name = "odroidxu3_linux"
|
||||
description = 'HardKernel Odroid XU3 development board (Ubuntu image).'
|
||||
|
||||
core_modules = [
|
||||
'odroidxu3-fan',
|
||||
]
|
||||
|
||||
parameters = [
|
||||
Parameter('core_names', default=['a7', 'a7', 'a7', 'a7', 'a15', 'a15', 'a15', 'a15'], override=True),
|
||||
Parameter('core_clusters', default=[0, 0, 0, 0, 1, 1, 1, 1], override=True),
|
||||
]
|
||||
|
||||
abi = 'armeabi'
|
||||
|
@@ -141,3 +141,20 @@ class WorkerThreadError(WAError):
|
||||
message = 'Exception of type {} occured on thread {}:\n'.format(orig_name, thread)
|
||||
message += '{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
|
||||
super(WorkerThreadError, self).__init__(message)
|
||||
|
||||
|
||||
class SerializerSyntaxError(Exception):
|
||||
"""
|
||||
Error loading a serialized structure from/to a file handle.
|
||||
"""
|
||||
|
||||
def __init__(self, message, line=None, column=None):
|
||||
super(SerializerSyntaxError, self).__init__(message)
|
||||
self.line = line
|
||||
self.column = column
|
||||
|
||||
def __str__(self):
|
||||
linestring = ' on line {}'.format(self.line) if self.line else ''
|
||||
colstring = ' in column {}'.format(self.column) if self.column else ''
|
||||
message = 'Syntax Error{}: {}'
|
||||
return message.format(''.join([linestring, colstring]), self.message)
|
||||
|
@@ -197,12 +197,12 @@ class Daq(Instrument):
|
||||
raise InstrumentError('GPIO sysfs not enabled on the device.')
|
||||
try:
|
||||
export_path = self.device.path.join(GPIO_ROOT, 'export')
|
||||
self.device.set_sysfile_value(export_path, self.gpio_sync, verify=False)
|
||||
self.device.write_value(export_path, self.gpio_sync, verify=False)
|
||||
pin_root = self.device.path.join(GPIO_ROOT, 'gpio{}'.format(self.gpio_sync))
|
||||
direction_path = self.device.path.join(pin_root, 'direction')
|
||||
self.device.set_sysfile_value(direction_path, 'out')
|
||||
self.device.write_value(direction_path, 'out')
|
||||
self.gpio_path = self.device.path.join(pin_root, 'value')
|
||||
self.device.set_sysfile_value(self.gpio_path, 0, verify=False)
|
||||
self.device.write_value(self.gpio_path, 0, verify=False)
|
||||
signal.connect(self.insert_start_marker, signal.BEFORE_WORKLOAD_EXECUTION, priority=11)
|
||||
signal.connect(self.insert_stop_marker, signal.AFTER_WORKLOAD_EXECUTION, priority=11)
|
||||
except DeviceError as e:
|
||||
@@ -276,7 +276,7 @@ class Daq(Instrument):
|
||||
def finalize(self, context):
|
||||
if self.gpio_path:
|
||||
unexport_path = self.device.path.join(GPIO_ROOT, 'unexport')
|
||||
self.device.set_sysfile_value(unexport_path, self.gpio_sync, verify=False)
|
||||
self.device.write_value(unexport_path, self.gpio_sync, verify=False)
|
||||
|
||||
def validate(self): # pylint: disable=too-many-branches
|
||||
if not daq:
|
||||
|
@@ -39,7 +39,7 @@ class DmesgInstrument(Instrument):
|
||||
def setup(self, context):
|
||||
if self.loglevel:
|
||||
self.old_loglevel = self.device.get_sysfile_value(self.loglevel_file)
|
||||
self.device.set_sysfile_value(self.loglevel_file, self.loglevel, verify=False)
|
||||
self.device.write_value(self.loglevel_file, self.loglevel, verify=False)
|
||||
self.before_file = _f(os.path.join(context.output_directory, 'dmesg', 'before'))
|
||||
self.after_file = _f(os.path.join(context.output_directory, 'dmesg', 'after'))
|
||||
|
||||
@@ -57,6 +57,6 @@ class DmesgInstrument(Instrument):
|
||||
|
||||
def teardown(self, context): # pylint: disable=unused-argument
|
||||
if self.loglevel:
|
||||
self.device.set_sysfile_value(self.loglevel_file, self.old_loglevel, verify=False)
|
||||
self.device.write_value(self.loglevel_file, self.old_loglevel, verify=False)
|
||||
|
||||
|
||||
|
@@ -634,7 +634,7 @@ class EnergyModelInstrument(Instrument):
|
||||
self.enable_all_idle_states()
|
||||
self.reset_cgroups()
|
||||
self.cpuset.move_all_tasks_to(self.measuring_cluster)
|
||||
server_process = 'adbd' if self.device.platform == 'android' else 'sshd'
|
||||
server_process = 'adbd' if self.device.os == 'android' else 'sshd'
|
||||
server_pids = self.device.get_pids_of(server_process)
|
||||
children_ps = [e for e in self.device.ps()
|
||||
if e.ppid in server_pids and e.name != 'sshd']
|
||||
@@ -769,7 +769,7 @@ class EnergyModelInstrument(Instrument):
|
||||
for tzpath in tzone_paths.strip().split():
|
||||
mode_file = '{}/mode'.format(tzpath)
|
||||
if self.device.file_exists(mode_file):
|
||||
self.device.set_sysfile_value(mode_file, 'disabled')
|
||||
self.device.write_value(mode_file, 'disabled')
|
||||
|
||||
def get_device_idle_states(self, cluster):
|
||||
if cluster == 'big':
|
||||
|
@@ -81,7 +81,7 @@ class JunoEnergy(Instrument):
|
||||
self.device.killall('readenergy', signal='TERM', as_root=True)
|
||||
|
||||
def update_result(self, context):
|
||||
self.device.pull_file(self.device_output_file, self.host_output_file)
|
||||
self.device.pull(self.device_output_file, self.host_output_file)
|
||||
context.add_artifact('junoenergy', self.host_output_file, 'data')
|
||||
|
||||
with open(self.host_output_file) as fh:
|
||||
@@ -99,7 +99,7 @@ class JunoEnergy(Instrument):
|
||||
context.add_metric(header, value, UNIT_MAP[header.split('_')[-1]])
|
||||
|
||||
def teardown(self, conetext):
|
||||
self.device.delete_file(self.device_output_file)
|
||||
self.device.remove(self.device_output_file)
|
||||
|
||||
def validate(self):
|
||||
if self.strict:
|
||||
|
@@ -33,9 +33,11 @@ import tarfile
|
||||
from itertools import izip, izip_longest
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
from devlib.exception import TargetError
|
||||
|
||||
from wlauto import Instrument, Parameter
|
||||
from wlauto.core import signal
|
||||
from wlauto.exceptions import DeviceError, ConfigError
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.utils.misc import diff_tokens, write_table, check_output, as_relative
|
||||
from wlauto.utils.misc import ensure_file_directory_exists as _f
|
||||
from wlauto.utils.misc import ensure_directory_exists as _d
|
||||
@@ -134,7 +136,7 @@ class SysfsExtractor(Instrument):
|
||||
as_root=True, check_exit_code=False)
|
||||
else: # not rooted
|
||||
for dev_dir, before_dir, _, _ in self.device_and_host_paths:
|
||||
self.device.pull_file(dev_dir, before_dir)
|
||||
self.device.pull(dev_dir, before_dir)
|
||||
|
||||
def slow_stop(self, context):
|
||||
if self.use_tmpfs:
|
||||
@@ -146,7 +148,7 @@ class SysfsExtractor(Instrument):
|
||||
as_root=True, check_exit_code=False)
|
||||
else: # not using tmpfs
|
||||
for dev_dir, _, after_dir, _ in self.device_and_host_paths:
|
||||
self.device.pull_file(dev_dir, after_dir)
|
||||
self.device.pull(dev_dir, after_dir)
|
||||
|
||||
def update_result(self, context):
|
||||
if self.use_tmpfs:
|
||||
@@ -157,10 +159,10 @@ class SysfsExtractor(Instrument):
|
||||
self.tmpfs_mount_point),
|
||||
as_root=True)
|
||||
self.device.execute('chmod 0777 {}'.format(on_device_tarball), as_root=True)
|
||||
self.device.pull_file(on_device_tarball, on_host_tarball)
|
||||
self.device.pull(on_device_tarball, on_host_tarball)
|
||||
with tarfile.open(on_host_tarball, 'r:gz') as tf:
|
||||
tf.extractall(context.output_directory)
|
||||
self.device.delete_file(on_device_tarball)
|
||||
self.device.remove(on_device_tarball)
|
||||
os.remove(on_host_tarball)
|
||||
|
||||
for paths in self.device_and_host_paths:
|
||||
@@ -181,7 +183,7 @@ class SysfsExtractor(Instrument):
|
||||
if self.use_tmpfs:
|
||||
try:
|
||||
self.device.execute('umount {}'.format(self.tmpfs_mount_point), as_root=True)
|
||||
except (DeviceError, CalledProcessError):
|
||||
except (TargetError, CalledProcessError):
|
||||
# assume a directory but not mount point
|
||||
pass
|
||||
self.device.execute('rm -rf {}'.format(self.tmpfs_mount_point),
|
||||
@@ -386,4 +388,3 @@ def _diff_sysfs_dirs(before, after, result): # pylint: disable=R0914
|
||||
else:
|
||||
dchunks = [diff_tokens(b, a) for b, a in zip(bchunks, achunks)]
|
||||
dfh.write(''.join(dchunks))
|
||||
|
||||
|
@@ -159,7 +159,7 @@ class NetstatsInstrument(Instrument):
|
||||
]
|
||||
|
||||
def initialize(self, context):
|
||||
if self.device.platform != 'android':
|
||||
if self.device.os != 'android':
|
||||
raise DeviceError('nestats instrument only supports on Android devices.')
|
||||
apk = context.resolver.get(ApkFile(self))
|
||||
self.collector = NetstatsCollector(self.device, apk) # pylint: disable=attribute-defined-outside-init
|
||||
|
@@ -106,7 +106,7 @@ class PerfInstrument(Instrument):
|
||||
self.device.kick_off(command)
|
||||
|
||||
def stop(self, context):
|
||||
as_root = self.device.platform == 'android'
|
||||
as_root = self.device.os == 'android'
|
||||
self.device.killall('sleep', as_root=as_root)
|
||||
|
||||
def update_result(self, context):
|
||||
@@ -114,7 +114,7 @@ class PerfInstrument(Instrument):
|
||||
device_file = self._get_device_outfile(label)
|
||||
host_relpath = os.path.join('perf', os.path.basename(device_file))
|
||||
host_file = _f(os.path.join(context.output_directory, host_relpath))
|
||||
self.device.pull_file(device_file, host_file)
|
||||
self.device.pull(device_file, host_file)
|
||||
context.add_iteration_artifact(label, kind='raw', path=host_relpath)
|
||||
with open(host_file) as fh:
|
||||
in_results_section = False
|
||||
@@ -165,7 +165,7 @@ class PerfInstrument(Instrument):
|
||||
def _clean_device(self):
|
||||
for label in self.labels:
|
||||
filepath = self._get_device_outfile(label)
|
||||
self.device.delete_file(filepath)
|
||||
self.device.remove(filepath)
|
||||
|
||||
def _get_device_outfile(self, label):
|
||||
return self.device.path.join(self.device.working_directory, '{}.out'.format(label))
|
||||
|
@@ -91,21 +91,21 @@ class CciPmuLogger(Instrument):
|
||||
if self.install_module:
|
||||
self.device_driver_file = self.device.path.join(self.device.working_directory, DRIVER)
|
||||
host_driver_file = os.path.join(settings.dependencies_directory, DRIVER)
|
||||
self.device.push_file(host_driver_file, self.device_driver_file)
|
||||
self.device.push(host_driver_file, self.device_driver_file)
|
||||
|
||||
def setup(self, context):
|
||||
if self.install_module:
|
||||
self.device.execute('insmod {}'.format(self.device_driver_file), check_exit_code=False)
|
||||
self.device.set_sysfile_value(CPL_PERIOD_FILE, self.period)
|
||||
self.device.write_value(CPL_PERIOD_FILE, self.period)
|
||||
for i, event in enumerate(self.events):
|
||||
counter = CPL_BASE + 'counter{}'.format(i)
|
||||
self.device.set_sysfile_value(counter, event, verify=False)
|
||||
self.device.write_value(counter, event, verify=False)
|
||||
|
||||
def start(self, context):
|
||||
self.device.set_sysfile_value(CPL_CONTROL_FILE, 1, verify=False)
|
||||
self.device.write_value(CPL_CONTROL_FILE, 1, verify=False)
|
||||
|
||||
def stop(self, context):
|
||||
self.device.set_sysfile_value(CPL_CONTROL_FILE, 1, verify=False)
|
||||
self.device.write_value(CPL_CONTROL_FILE, 1, verify=False)
|
||||
|
||||
# Doing result processing inside teardown because need to make sure that
|
||||
# trace-cmd has processed its results and generated the trace.txt
|
||||
|
@@ -65,7 +65,7 @@ class ScreenOnInstrument(Instrument):
|
||||
|
||||
def initialize(self, context):
|
||||
self.monitor = None
|
||||
if self.device.platform != 'android':
|
||||
if self.device.os != 'android':
|
||||
raise InstrumentError('screenon instrument currently only supports Android devices.')
|
||||
|
||||
def slow_setup(self, context): # slow to run before most other setups
|
||||
|
@@ -182,13 +182,13 @@ class StreamlineInstrument(Instrument):
|
||||
caiman_path = subprocess.check_output('which caiman', shell=True).strip() # pylint: disable=E1103
|
||||
self.session_file = os.path.join(context.host_working_directory, 'streamline_session.xml')
|
||||
with open(self.session_file, 'w') as wfh:
|
||||
if self.device.platform == "android":
|
||||
if self.device.os == "android":
|
||||
wfh.write(SESSION_TEXT_TEMPLATE.format('127.0.0.1', self.port, caiman_path))
|
||||
else:
|
||||
wfh.write(SESSION_TEXT_TEMPLATE.format(self.device.host, self.port, caiman_path))
|
||||
|
||||
if self.configuration_file:
|
||||
self.device.push_file(self.configuration_file, self.on_device_config)
|
||||
self.device.push(self.configuration_file, self.on_device_config)
|
||||
self._initialize_daemon()
|
||||
|
||||
def setup(self, context):
|
||||
@@ -220,7 +220,7 @@ class StreamlineInstrument(Instrument):
|
||||
|
||||
def teardown(self, context):
|
||||
self._kill_daemon()
|
||||
self.device.delete_file(self.on_device_config)
|
||||
self.device.remove(self.on_device_config)
|
||||
|
||||
def _check_has_valid_display(self): # pylint: disable=R0201
|
||||
reason = None
|
||||
@@ -243,7 +243,7 @@ class StreamlineInstrument(Instrument):
|
||||
raise
|
||||
self.logger.debug('Driver was already installed.')
|
||||
self._start_daemon()
|
||||
if self.device.platform == "android":
|
||||
if self.device.os == "android":
|
||||
port_spec = 'tcp:{}'.format(self.port)
|
||||
self.device.forward_port(port_spec, port_spec)
|
||||
|
||||
|
@@ -166,7 +166,7 @@ class TraceCmdInstrument(Instrument):
|
||||
host_file = context.resolver.get(Executable(self, self.device.abi, 'trace-cmd'))
|
||||
self.trace_cmd = self.device.install(host_file)
|
||||
else:
|
||||
self.trace_cmd = self.device.get_binary_path("trace-cmd")
|
||||
self.trace_cmd = self.device.get_installed("trace-cmd")
|
||||
if not self.trace_cmd:
|
||||
raise ConfigError('No trace-cmd found on device and no_install=True is specified.')
|
||||
|
||||
@@ -233,7 +233,7 @@ class TraceCmdInstrument(Instrument):
|
||||
# Therefore timout for the pull command must also be adjusted
|
||||
# accordingly.
|
||||
self._pull_timeout = (self.stop_time - self.start_time) # pylint: disable=attribute-defined-outside-init
|
||||
self.device.pull_file(self.output_file, context.output_directory, timeout=self._pull_timeout)
|
||||
self.device.pull(self.output_file, context.output_directory, timeout=self._pull_timeout)
|
||||
context.add_iteration_artifact('bintrace', OUTPUT_TRACE_FILE, kind='data',
|
||||
description='trace-cmd generated ftrace dump.')
|
||||
|
||||
@@ -263,7 +263,7 @@ class TraceCmdInstrument(Instrument):
|
||||
self.logger.warning('Could not generate trace.txt.')
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.delete_file(os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE))
|
||||
self.device.remove(os.path.join(self.device.working_directory, OUTPUT_TRACE_FILE))
|
||||
|
||||
def on_run_end(self, context):
|
||||
pass
|
||||
@@ -282,11 +282,11 @@ class TraceCmdInstrument(Instrument):
|
||||
|
||||
def insert_start_mark(self, context):
|
||||
# trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_START in info field
|
||||
self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_START", verify=False)
|
||||
self.device.write_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_START", verify=False)
|
||||
|
||||
def insert_end_mark(self, context):
|
||||
# trace marker appears in ftrace as an ftrace/print event with TRACE_MARKER_STOP in info field
|
||||
self.device.set_sysfile_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_STOP", verify=False)
|
||||
self.device.write_value("/sys/kernel/debug/tracing/trace_marker", "TRACE_MARKER_STOP", verify=False)
|
||||
|
||||
def _set_buffer_size(self):
|
||||
target_buffer_size = self.buffer_size
|
||||
@@ -294,7 +294,7 @@ class TraceCmdInstrument(Instrument):
|
||||
buffer_size = 0
|
||||
floor = 1000 if target_buffer_size > 1000 else target_buffer_size
|
||||
while attempt_buffer_size >= floor:
|
||||
self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False)
|
||||
self.device.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
|
||||
buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
|
||||
if buffer_size == attempt_buffer_size:
|
||||
break
|
||||
@@ -304,7 +304,7 @@ class TraceCmdInstrument(Instrument):
|
||||
return
|
||||
while attempt_buffer_size < target_buffer_size:
|
||||
attempt_buffer_size += self.buffer_size_step
|
||||
self.device.set_sysfile_value(self.buffer_size_file, attempt_buffer_size, verify=False)
|
||||
self.device.write_value(self.buffer_size_file, attempt_buffer_size, verify=False)
|
||||
buffer_size = self.device.get_sysfile_value(self.buffer_size_file, kind=int)
|
||||
if attempt_buffer_size != buffer_size:
|
||||
self.logger.warning('Failed to set trace buffer size to {}, value set was {}'.format(target_buffer_size, buffer_size))
|
||||
@@ -316,7 +316,7 @@ class TraceCmdInstrument(Instrument):
|
||||
txt_trace_file = os.path.join(self.device.working_directory, OUTPUT_TEXT_FILE)
|
||||
command = 'trace-cmd report {} > {}'.format(trace_file, txt_trace_file)
|
||||
self.device.execute(command)
|
||||
self.device.pull_file(txt_trace_file, context.output_directory, timeout=self._pull_timeout)
|
||||
self.device.pull(txt_trace_file, context.output_directory, timeout=self._pull_timeout)
|
||||
except DeviceError:
|
||||
raise InstrumentError('Could not generate TXT report on target.')
|
||||
|
||||
|
0
wlauto/managers/__init__.py
Normal file
0
wlauto/managers/__init__.py
Normal file
189
wlauto/managers/android.py
Normal file
189
wlauto/managers/android.py
Normal file
@@ -0,0 +1,189 @@
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import time
|
||||
import tempfile
|
||||
import shutil
|
||||
import threading
|
||||
|
||||
from wlauto.core.device_manager import DeviceManager
|
||||
from wlauto import Parameter, Alias
|
||||
from wlauto.utils.types import boolean, regex
|
||||
from wlauto.utils.android import adb_command
|
||||
from wlauto.exceptions import WorkerThreadError
|
||||
|
||||
from devlib.target import AndroidTarget
|
||||
|
||||
SCREEN_STATE_REGEX = re.compile('(?:mPowerState|mScreenOn|Display Power: state)=([0-9]+|true|false|ON|OFF)', re.I)
|
||||
SCREEN_SIZE_REGEX = re.compile(r'mUnrestrictedScreen=\(\d+,\d+\)\s+(?P<width>\d+)x(?P<height>\d+)')
|
||||
|
||||
|
||||
class AndroidDevice(DeviceManager):
|
||||
|
||||
name = "android"
|
||||
target_type = AndroidTarget
|
||||
|
||||
aliases = [
|
||||
Alias('generic_android'),
|
||||
]
|
||||
|
||||
parameters = [
|
||||
Parameter('adb_name', default=None, kind=str,
|
||||
description='The unique ID of the device as output by "adb devices".'),
|
||||
Parameter('android_prompt', kind=regex, default=re.compile('^.*(shell|root)@.*:/\S* [#$] ', re.MULTILINE), # ##
|
||||
description='The format of matching the shell prompt in Android.'),
|
||||
Parameter('working_directory', default='/sdcard/wa-working', override=True),
|
||||
Parameter('binaries_directory', default='/data/local/tmp', override=True),
|
||||
Parameter('package_data_directory', default='/data/data',
|
||||
description='Location of of data for an installed package (APK).'),
|
||||
Parameter('external_storage_directory', default='/sdcard',
|
||||
description='Mount point for external storage.'),
|
||||
Parameter('logcat_poll_period', kind=int,
|
||||
description="""
|
||||
If specified and is not ``0``, logcat will be polled every
|
||||
``logcat_poll_period`` seconds, and buffered on the host. This
|
||||
can be used if a lot of output is expected in logcat and the fixed
|
||||
logcat buffer on the device is not big enough. The trade off is that
|
||||
this introduces some minor runtime overhead. Not set by default.
|
||||
"""), # ##
|
||||
Parameter('enable_screen_check', kind=boolean, default=False,
|
||||
description="""
|
||||
Specified whether the device should make sure that the screen is on
|
||||
during initialization.
|
||||
"""),
|
||||
Parameter('swipe_to_unlock', kind=str, default=None,
|
||||
allowed_values=[None, "horizontal", "vertical"],
|
||||
description="""
|
||||
If set a swipe of the specified direction will be performed.
|
||||
This should unlock the screen.
|
||||
"""), # ##
|
||||
]
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(AndroidDevice, self).__init__(**kwargs)
|
||||
self.connection_settings = self._make_connection_settings()
|
||||
|
||||
self.platform = self.platform_type(core_names=self.core_names, # pylint: disable=E1102
|
||||
core_clusters=self.core_clusters)
|
||||
|
||||
self.target = self.target_type(connection_settings=self.connection_settings,
|
||||
connect=False,
|
||||
platform=self.platform,
|
||||
working_directory=self.working_directory,
|
||||
executables_directory=self.binaries_directory,)
|
||||
self._logcat_poller = None
|
||||
|
||||
def connect(self):
|
||||
self.target.connect()
|
||||
|
||||
def initialize(self, context):
|
||||
super(AndroidDevice, self).initialize(context)
|
||||
if self.enable_screen_check:
|
||||
self.target.ensure_screen_is_on()
|
||||
if self.swipe_to_unlock:
|
||||
self.target.swipe_to_unlock(direction=self.swipe_to_unlock)
|
||||
|
||||
def start(self):
|
||||
if self.logcat_poll_period:
|
||||
if self._logcat_poller:
|
||||
self._logcat_poller.close()
|
||||
self._logcat_poller = _LogcatPoller(self, self.logcat_poll_period,
|
||||
timeout=self.default_timeout)
|
||||
self._logcat_poller.start()
|
||||
else:
|
||||
self.target.clear_logcat()
|
||||
|
||||
def _make_connection_settings(self):
|
||||
connection_settings = {}
|
||||
connection_settings['device'] = self.adb_name
|
||||
return connection_settings
|
||||
|
||||
def dump_logcat(self, outfile, filter_spec=None):
|
||||
"""
|
||||
Dump the contents of logcat, for the specified filter spec to the
|
||||
specified output file.
|
||||
See http://developer.android.com/tools/help/logcat.html
|
||||
|
||||
:param outfile: Output file on the host into which the contents of the
|
||||
log will be written.
|
||||
:param filter_spec: Logcat filter specification.
|
||||
see http://developer.android.com/tools/debugging/debugging-log.html#filteringOutput
|
||||
|
||||
"""
|
||||
if self._logcat_poller:
|
||||
return self._logcat_poller.write_log(outfile)
|
||||
else:
|
||||
if filter_spec:
|
||||
command = 'logcat -d -s {} > {}'.format(filter_spec, outfile)
|
||||
else:
|
||||
command = 'logcat -d > {}'.format(outfile)
|
||||
return adb_command(self.adb_name, command)
|
||||
|
||||
|
||||
class _LogcatPoller(threading.Thread):
|
||||
|
||||
join_timeout = 5
|
||||
|
||||
def __init__(self, target, period, timeout=None):
|
||||
super(_LogcatPoller, self).__init__()
|
||||
self.target = target
|
||||
self.logger = target.logger
|
||||
self.period = period
|
||||
self.timeout = timeout
|
||||
self.stop_signal = threading.Event()
|
||||
self.lock = threading.RLock()
|
||||
self.buffer_file = tempfile.mktemp()
|
||||
self.last_poll = 0
|
||||
self.daemon = True
|
||||
self.exc = None
|
||||
|
||||
def run(self):
|
||||
self.logger.debug('Starting logcat polling.')
|
||||
try:
|
||||
while True:
|
||||
if self.stop_signal.is_set():
|
||||
break
|
||||
with self.lock:
|
||||
current_time = time.time()
|
||||
if (current_time - self.last_poll) >= self.period:
|
||||
self._poll()
|
||||
time.sleep(0.5)
|
||||
except Exception: # pylint: disable=W0703
|
||||
self.exc = WorkerThreadError(self.name, sys.exc_info())
|
||||
self.logger.debug('Logcat polling stopped.')
|
||||
|
||||
def stop(self):
|
||||
self.logger.debug('Stopping logcat polling.')
|
||||
self.stop_signal.set()
|
||||
self.join(self.join_timeout)
|
||||
if self.is_alive():
|
||||
self.logger.error('Could not join logcat poller thread.')
|
||||
if self.exc:
|
||||
raise self.exc # pylint: disable=E0702
|
||||
|
||||
def clear_buffer(self):
|
||||
self.logger.debug('Clearing logcat buffer.')
|
||||
with self.lock:
|
||||
self.target.clear_logcat()
|
||||
with open(self.buffer_file, 'w') as _: # NOQA
|
||||
pass
|
||||
|
||||
def write_log(self, outfile):
|
||||
self.logger.debug('Writing logbuffer to {}.'.format(outfile))
|
||||
with self.lock:
|
||||
self._poll()
|
||||
if os.path.isfile(self.buffer_file):
|
||||
shutil.copy(self.buffer_file, outfile)
|
||||
else: # there was no logcat trace at this time
|
||||
with open(outfile, 'w') as _: # NOQA
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
self.logger.debug('Closing logcat poller.')
|
||||
if os.path.isfile(self.buffer_file):
|
||||
os.remove(self.buffer_file)
|
||||
|
||||
def _poll(self):
|
||||
with self.lock:
|
||||
self.last_poll = time.time()
|
||||
self.target.dump_logcat(self.buffer_file, append=True)
|
65
wlauto/managers/linux.py
Normal file
65
wlauto/managers/linux.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from wlauto.core.device_manager import DeviceManager
|
||||
from wlauto import Parameter, Alias
|
||||
from wlauto.utils.types import boolean
|
||||
from wlauto.exceptions import ConfigError
|
||||
|
||||
from devlib.target import LinuxTarget
|
||||
|
||||
|
||||
class LinuxManager(DeviceManager):
|
||||
|
||||
name = "linux"
|
||||
target_type = LinuxTarget
|
||||
|
||||
aliases = [
|
||||
Alias('generic_linux'),
|
||||
]
|
||||
|
||||
parameters = [
|
||||
Parameter('host', mandatory=True, description='Host name or IP address for the device.'),
|
||||
Parameter('username', mandatory=True, description='User name for the account on the device.'),
|
||||
Parameter('password', description='Password for the account on the device (for password-based auth).'),
|
||||
Parameter('keyfile', description='Keyfile to be used for key-based authentication.'),
|
||||
Parameter('port', kind=int, default=22, description='SSH port number on the device.'),
|
||||
Parameter('password_prompt', default='[sudo] password',
|
||||
description='Prompt presented by sudo when requesting the password.'),
|
||||
Parameter('use_telnet', kind=boolean, default=False,
|
||||
description='Optionally, telnet may be used instead of ssh, though this is discouraged.'),
|
||||
Parameter('boot_timeout', kind=int, default=120,
|
||||
description='How long to try to connect to the device after a reboot.'),
|
||||
Parameter('working_directory', default="/root/wa", override=True),
|
||||
Parameter('binaries_directory', default="/root/wa/bin", override=True),
|
||||
]
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(LinuxManager, self).__init__(**kwargs)
|
||||
self.platform = self.platform_type(core_names=self.core_names, # pylint: disable=E1102
|
||||
core_clusters=self.core_clusters,
|
||||
modules=self.modules)
|
||||
self.target = self.target_type(connection_settings=self._make_connection_settings(),
|
||||
connect=False,
|
||||
platform=self.platform,
|
||||
working_directory=self.working_directory,
|
||||
executables_directory=self.binaries_directory,)
|
||||
|
||||
def validate(self):
|
||||
if self.password and self.keyfile:
|
||||
raise ConfigError("Either `password` or `keyfile` must be given but not both")
|
||||
|
||||
def connect(self):
|
||||
self.target.connect(self.boot_timeout)
|
||||
|
||||
def _make_connection_settings(self):
|
||||
connection_settings = {}
|
||||
connection_settings['host'] = self.host
|
||||
connection_settings['username'] = self.username
|
||||
connection_settings['port'] = self.port
|
||||
connection_settings['telnet'] = self.use_telnet
|
||||
connection_settings['password_prompt'] = self.password_prompt
|
||||
|
||||
if self.keyfile:
|
||||
connection_settings['keyfile'] = self.keyfilehollis
|
||||
elif self.password:
|
||||
connection_settings['password'] = self.password
|
||||
|
||||
return connection_settings
|
30
wlauto/managers/locallinux.py
Normal file
30
wlauto/managers/locallinux.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from wlauto.core.device_manager import DeviceManager
|
||||
from wlauto import Parameter
|
||||
|
||||
from devlib.target import LocalLinuxTarget
|
||||
|
||||
|
||||
class LocalLinuxManager(DeviceManager):
|
||||
|
||||
name = "local_linux"
|
||||
target_type = LocalLinuxTarget
|
||||
|
||||
parameters = [
|
||||
Parameter('password',
|
||||
description='Password for the user.'),
|
||||
]
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(LocalLinuxManager, self).__init__(**kwargs)
|
||||
self.platform = self.platform_type(core_names=self.core_names, # pylint: disable=E1102
|
||||
core_clusters=self.core_clusters,
|
||||
modules=self.modules)
|
||||
self.target = self.target_type(connection_settings=self._make_connection_settings())
|
||||
|
||||
def connect(self):
|
||||
self.target.connect()
|
||||
|
||||
def _make_connection_settings(self):
|
||||
connection_settings = {}
|
||||
connection_settings['password'] = self.password
|
||||
return connection_settings
|
@@ -1,16 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
@@ -1,73 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wlauto import Module, Parameter
|
||||
from wlauto.utils.serial_port import open_serial_connection
|
||||
|
||||
|
||||
class MbedFanActiveCooling(Module):
|
||||
|
||||
name = 'mbed-fan'
|
||||
description = 'Controls a cooling fan via an mbed connected to a serial port.'
|
||||
|
||||
capabilities = ['active_cooling']
|
||||
|
||||
parameters = [
|
||||
Parameter('port', default='/dev/ttyACM0',
|
||||
description="""The serial port for the active cooling solution (see above)."""),
|
||||
Parameter('buad', kind=int, default=115200,
|
||||
description="""Baud for the serial port (see above)."""),
|
||||
Parameter('fan_pin', kind=int, default=0,
|
||||
description="""Which controller pin on the mbed the fan for the active cooling solution is
|
||||
connected to (controller pin 0 is physical pin 22 on the mbed)."""),
|
||||
]
|
||||
|
||||
timeout = 30
|
||||
|
||||
def start_active_cooling(self):
|
||||
with open_serial_connection(timeout=self.timeout,
|
||||
port=self.port,
|
||||
baudrate=self.buad) as target:
|
||||
target.sendline('motor_{}_1'.format(self.fan_pin))
|
||||
|
||||
def stop_active_cooling(self):
|
||||
with open_serial_connection(timeout=self.timeout,
|
||||
port=self.port,
|
||||
baudrate=self.buad) as target:
|
||||
target.sendline('motor_{}_0'.format(self.fan_pin))
|
||||
|
||||
|
||||
class OdroidXU3ctiveCooling(Module):
|
||||
|
||||
name = 'odroidxu3-fan'
|
||||
description = """
|
||||
Enabled active cooling by controling the fan an Odroid XU3
|
||||
|
||||
.. note:: depending on the kernel used, it may not be possible to turn the fan
|
||||
off completely; in such situations, the fan will be set to its minimum
|
||||
speed.
|
||||
|
||||
"""
|
||||
|
||||
capabilities = ['active_cooling']
|
||||
|
||||
def start_active_cooling(self):
|
||||
self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/fan_mode', 0, verify=False)
|
||||
self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/pwm_duty', 255, verify=False)
|
||||
|
||||
def stop_active_cooling(self):
|
||||
self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/fan_mode', 0, verify=False)
|
||||
self.owner.set_sysfile_value('/sys/devices/odroid_fan.15/pwm_duty', 1, verify=False)
|
@@ -1,178 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
import logging
|
||||
|
||||
import wlauto.core.signal as signal
|
||||
from wlauto import Module, Parameter
|
||||
from wlauto.utils.misc import list_to_ranges, isiterable
|
||||
|
||||
|
||||
class CgroupController(object):
|
||||
|
||||
kind = 'cpuset'
|
||||
|
||||
def __new__(cls, arg):
|
||||
if isinstance(arg, cls):
|
||||
return arg
|
||||
else:
|
||||
return object.__new__(cls, arg)
|
||||
|
||||
def __init__(self, mount_name):
|
||||
self.mount_point = None
|
||||
self.mount_name = mount_name
|
||||
self.logger = logging.getLogger(self.kind)
|
||||
|
||||
def mount(self, device, mount_root):
|
||||
self.device = device
|
||||
self.mount_point = device.path.join(mount_root, self.mount_name)
|
||||
mounted = self.device.list_file_systems()
|
||||
if self.mount_point in [e.mount_point for e in mounted]:
|
||||
self.logger.debug('controller is already mounted.')
|
||||
else:
|
||||
self.device.execute('mkdir -p {} 2>/dev/null'.format(self.mount_point),
|
||||
as_root=True)
|
||||
self.device.execute('mount -t cgroup -o {} {} {}'.format(self.kind,
|
||||
self.mount_name,
|
||||
self.mount_point),
|
||||
as_root=True)
|
||||
|
||||
|
||||
class CpusetGroup(object):
|
||||
|
||||
def __init__(self, controller, name, cpus, mems):
|
||||
self.controller = controller
|
||||
self.device = controller.device
|
||||
self.name = name
|
||||
if name == 'root':
|
||||
self.directory = controller.mount_point
|
||||
else:
|
||||
self.directory = self.device.path.join(controller.mount_point, name)
|
||||
self.device.execute('mkdir -p {}'.format(self.directory), as_root=True)
|
||||
self.cpus_file = self.device.path.join(self.directory, 'cpuset.cpus')
|
||||
self.mems_file = self.device.path.join(self.directory, 'cpuset.mems')
|
||||
self.tasks_file = self.device.path.join(self.directory, 'tasks')
|
||||
self.set(cpus, mems)
|
||||
|
||||
def set(self, cpus, mems):
|
||||
if isiterable(cpus):
|
||||
cpus = list_to_ranges(cpus)
|
||||
if isiterable(mems):
|
||||
mems = list_to_ranges(mems)
|
||||
self.device.set_sysfile_value(self.cpus_file, cpus)
|
||||
self.device.set_sysfile_value(self.mems_file, mems)
|
||||
|
||||
def get(self):
|
||||
cpus = self.device.get_sysfile_value(self.cpus_file)
|
||||
mems = self.device.get_sysfile_value(self.mems_file)
|
||||
return (cpus, mems)
|
||||
|
||||
def get_tasks(self):
|
||||
task_ids = self.device.get_sysfile_value(self.tasks_file).split()
|
||||
return map(int, task_ids)
|
||||
|
||||
def add_tasks(self, tasks):
|
||||
for tid in tasks:
|
||||
self.add_task(tid)
|
||||
|
||||
def add_task(self, tid):
|
||||
self.device.set_sysfile_value(self.tasks_file, tid, verify=False)
|
||||
|
||||
|
||||
class CpusetController(CgroupController):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CpusetController, self).__init__(*args, **kwargs)
|
||||
self.groups = {}
|
||||
|
||||
def mount(self, device, mount_root):
|
||||
super(CpusetController, self).mount(device, mount_root)
|
||||
self.create_group('root', self.device.online_cpus, 0)
|
||||
|
||||
def create_group(self, name, cpus, mems):
|
||||
if not hasattr(self, 'device'):
|
||||
raise RuntimeError('Attempting to create group for unmounted controller {}'.format(self.kind))
|
||||
if name in self.groups:
|
||||
raise ValueError('Group {} already exists'.format(name))
|
||||
self.groups[name] = CpusetGroup(self, name, cpus, mems)
|
||||
|
||||
def move_tasks(self, source, dest):
|
||||
try:
|
||||
source_group = self.groups[source]
|
||||
dest_group = self.groups[dest]
|
||||
command = 'for task in $(cat {}); do echo $task>{}; done'
|
||||
self.device.execute(command.format(source_group.tasks_file, dest_group.tasks_file),
|
||||
# this will always fail as some of the tasks
|
||||
# are kthreads that cannot be migrated, but we
|
||||
# don't care about those, so don't check exit
|
||||
# code.
|
||||
check_exit_code=False, as_root=True)
|
||||
except KeyError as e:
|
||||
raise ValueError('Unkown group: {}'.format(e))
|
||||
|
||||
def move_all_tasks_to(self, target_group):
|
||||
for group in self.groups:
|
||||
if group != target_group:
|
||||
self.move_tasks(group, target_group)
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return self.groups[name]
|
||||
except KeyError:
|
||||
raise AttributeError(name)
|
||||
|
||||
|
||||
class Cgroups(Module):
|
||||
|
||||
name = 'cgroups'
|
||||
description = """
|
||||
Adds cgroups query and manupution APIs to a Device interface.
|
||||
|
||||
Currently, only cpusets controller is supported.
|
||||
|
||||
"""
|
||||
capabilities = ['cgroups']
|
||||
|
||||
controllers = [
|
||||
CpusetController('wa_cpuset'),
|
||||
]
|
||||
|
||||
parameters = [
|
||||
Parameter('cgroup_root', default='/sys/fs/cgroup',
|
||||
description='Location where cgroups are mounted on the device.'),
|
||||
]
|
||||
|
||||
def initialize(self, context):
|
||||
self.device = self.root_owner
|
||||
signal.connect(self._on_device_init, signal.RUN_INIT, priority=1)
|
||||
|
||||
def get_cgroup_controller(self, kind):
|
||||
for controller in self.controllers:
|
||||
if controller.kind == kind:
|
||||
return controller
|
||||
raise ValueError(kind)
|
||||
|
||||
def _on_device_init(self, context): # pylint: disable=unused-argument
|
||||
mounted = self.device.list_file_systems()
|
||||
if self.cgroup_root not in [e.mount_point for e in mounted]:
|
||||
self.device.execute('mount -t tmpfs {} {}'.format('cgroup_root', self.cgroup_root),
|
||||
as_root=True)
|
||||
else:
|
||||
self.logger.debug('cgroup_root already mounted at {}'.format(self.cgroup_root))
|
||||
for controller in self.controllers:
|
||||
if controller.kind in [e.device for e in mounted]:
|
||||
self.logger.debug('controller {} is already mounted.'.format(controller.kind))
|
||||
else:
|
||||
controller.mount(self.device, self.cgroup_root)
|
@@ -1,450 +0,0 @@
|
||||
from wlauto import Module
|
||||
from wlauto.exceptions import ConfigError, DeviceError
|
||||
|
||||
|
||||
# a dict of governor name and a list of it tunables that can't be read
|
||||
WRITE_ONLY_TUNABLES = {
|
||||
'interactive': ['boostpulse']
|
||||
}
|
||||
|
||||
|
||||
class CpufreqModule(Module):
|
||||
|
||||
name = 'devcpufreq'
|
||||
description = """
|
||||
cpufreq-related functionality module for the device. Query and set frequencies, governors, etc.
|
||||
|
||||
APIs in this module break down into three categories: those that operate on cpus, those that
|
||||
operate on cores, and those that operate on clusters.
|
||||
|
||||
"cpu" APIs expect a cpufreq CPU id, which could be either an integer or or a string of the
|
||||
form "cpu0".
|
||||
|
||||
"cluster" APIs expect a cluster ID. This is an integer as defined by the
|
||||
``device.core_clusters`` list.
|
||||
|
||||
"core" APIs expect a core name, as defined by ``device.core_names`` list.
|
||||
|
||||
"""
|
||||
capabilities = ['cpufreq']
|
||||
|
||||
def probe(self, device): # pylint: disable=no-self-use
|
||||
path = '/sys/devices/system/cpu/cpu{}/cpufreq'.format(device.online_cpus[0])
|
||||
return device.file_exists(path)
|
||||
|
||||
def initialize(self, context):
|
||||
# pylint: disable=W0201
|
||||
CpufreqModule._available_governors = {}
|
||||
CpufreqModule._available_governor_tunables = {}
|
||||
CpufreqModule.device = self.root_owner
|
||||
|
||||
def list_available_cpu_governors(self, cpu):
|
||||
"""Returns a list of governors supported by the cpu."""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
if cpu not in self._available_governors:
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_available_governors'.format(cpu)
|
||||
output = self.device.get_sysfile_value(sysfile)
|
||||
self._available_governors[cpu] = output.strip().split() # pylint: disable=E1103
|
||||
return self._available_governors[cpu]
|
||||
|
||||
def get_cpu_governor(self, cpu):
|
||||
"""Returns the governor currently set for the specified CPU."""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
|
||||
return self.device.get_sysfile_value(sysfile)
|
||||
|
||||
def set_cpu_governor(self, cpu, governor, **kwargs):
|
||||
"""
|
||||
Set the governor for the specified CPU.
|
||||
See https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
|
||||
|
||||
:param cpu: The CPU for which the governor is to be set. This must be
|
||||
the full name as it appears in sysfs, e.g. "cpu0".
|
||||
:param governor: The name of the governor to be used. This must be
|
||||
supported by the specific device.
|
||||
|
||||
Additional keyword arguments can be used to specify governor tunables for
|
||||
governors that support them.
|
||||
|
||||
:note: On big.LITTLE all cores in a cluster must be using the same governor.
|
||||
Setting the governor on any core in a cluster will also set it on all
|
||||
other cores in that cluster.
|
||||
|
||||
:raises: ConfigError if governor is not supported by the CPU.
|
||||
:raises: DeviceError if, for some reason, the governor could not be set.
|
||||
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
supported = self.list_available_cpu_governors(cpu)
|
||||
if governor not in supported:
|
||||
raise ConfigError('Governor {} not supported for cpu {}'.format(governor, cpu))
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_governor'.format(cpu)
|
||||
self.device.set_sysfile_value(sysfile, governor)
|
||||
self.set_cpu_governor_tunables(cpu, governor, **kwargs)
|
||||
|
||||
def list_available_cpu_governor_tunables(self, cpu):
|
||||
"""Returns a list of tunables available for the governor on the specified CPU."""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
governor = self.get_cpu_governor(cpu)
|
||||
if governor not in self._available_governor_tunables:
|
||||
try:
|
||||
tunables_path = '/sys/devices/system/cpu/{}/cpufreq/{}'.format(cpu, governor)
|
||||
self._available_governor_tunables[governor] = self.device.listdir(tunables_path)
|
||||
except DeviceError: # probably an older kernel
|
||||
try:
|
||||
tunables_path = '/sys/devices/system/cpu/cpufreq/{}'.format(governor)
|
||||
self._available_governor_tunables[governor] = self.device.listdir(tunables_path)
|
||||
except DeviceError: # governor does not support tunables
|
||||
self._available_governor_tunables[governor] = []
|
||||
return self._available_governor_tunables[governor]
|
||||
|
||||
def get_cpu_governor_tunables(self, cpu):
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
governor = self.get_cpu_governor(cpu)
|
||||
tunables = {}
|
||||
for tunable in self.list_available_cpu_governor_tunables(cpu):
|
||||
if tunable not in WRITE_ONLY_TUNABLES.get(governor, []):
|
||||
try:
|
||||
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
|
||||
tunables[tunable] = self.device.get_sysfile_value(path)
|
||||
except DeviceError: # May be an older kernel
|
||||
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
|
||||
tunables[tunable] = self.device.get_sysfile_value(path)
|
||||
return tunables
|
||||
|
||||
def set_cpu_governor_tunables(self, cpu, governor, **kwargs):
|
||||
"""
|
||||
Set tunables for the specified governor. Tunables should be specified as
|
||||
keyword arguments. Which tunables and values are valid depends on the
|
||||
governor.
|
||||
|
||||
:param cpu: The cpu for which the governor will be set. This must be the
|
||||
full cpu name as it appears in sysfs, e.g. ``cpu0``.
|
||||
:param governor: The name of the governor. Must be all lower case.
|
||||
|
||||
The rest should be keyword parameters mapping tunable name onto the value to
|
||||
be set for it.
|
||||
|
||||
:raises: ConfigError if governor specified is not a valid governor name, or if
|
||||
a tunable specified is not valid for the governor.
|
||||
:raises: DeviceError if could not set tunable.
|
||||
|
||||
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
valid_tunables = self.list_available_cpu_governor_tunables(cpu)
|
||||
for tunable, value in kwargs.iteritems():
|
||||
if tunable in valid_tunables:
|
||||
try:
|
||||
path = '/sys/devices/system/cpu/{}/cpufreq/{}/{}'.format(cpu, governor, tunable)
|
||||
self.device.set_sysfile_value(path, value)
|
||||
except DeviceError: # May be an older kernel
|
||||
path = '/sys/devices/system/cpu/cpufreq/{}/{}'.format(governor, tunable)
|
||||
self.device.set_sysfile_value(path, value)
|
||||
else:
|
||||
message = 'Unexpected tunable {} for governor {} on {}.\n'.format(tunable, governor, cpu)
|
||||
message += 'Available tunables are: {}'.format(valid_tunables)
|
||||
raise ConfigError(message)
|
||||
|
||||
def list_available_core_frequencies(self, core):
|
||||
cpu = self.get_core_online_cpu(core)
|
||||
return self.list_available_cpu_frequencies(cpu)
|
||||
|
||||
def list_available_cpu_frequencies(self, cpu):
|
||||
"""Returns a list of frequencies supported by the cpu or an empty list
|
||||
if not could be found."""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
try:
|
||||
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/scaling_available_frequencies'.format(cpu)
|
||||
output = self.device.execute(cmd)
|
||||
#available_frequencies = map(int, output.strip().split()) # pylint: disable=E1103
|
||||
available_frequencies = []
|
||||
for f in output.strip().split():
|
||||
try:
|
||||
available_frequencies.append(int(f))
|
||||
except ValueError:
|
||||
pass
|
||||
except DeviceError:
|
||||
# On some devices scaling_available_frequencies is not generated.
|
||||
# http://adrynalyne-teachtofish.blogspot.co.uk/2011/11/how-to-enable-scalingavailablefrequenci.html
|
||||
# Fall back to parsing stats/time_in_state
|
||||
cmd = 'cat /sys/devices/system/cpu/{}/cpufreq/stats/time_in_state'.format(cpu)
|
||||
out_iter = iter(self.device.execute(cmd).strip().split())
|
||||
available_frequencies = map(int, reversed([f for f, _ in zip(out_iter, out_iter)]))
|
||||
return available_frequencies
|
||||
|
||||
def get_cpu_min_frequency(self, cpu):
|
||||
"""
|
||||
Returns the min frequency currently set for the specified CPU.
|
||||
|
||||
Warning, this method does not check if the cpu is online or not. It will
|
||||
try to read the minimum frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: DeviceError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
|
||||
return self.device.get_sysfile_value(sysfile)
|
||||
|
||||
def set_cpu_min_frequency(self, cpu, frequency):
|
||||
"""
|
||||
Set's the minimum value for CPU frequency. Actual frequency will
|
||||
depend on the Governor used and may vary during execution. The value should be
|
||||
either an int or a string representing an integer. The Value must also be
|
||||
supported by the device. The available frequencies can be obtained by calling
|
||||
get_available_frequencies() or examining
|
||||
|
||||
/sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies
|
||||
|
||||
on the device.
|
||||
|
||||
:raises: ConfigError if the frequency is not supported by the CPU.
|
||||
:raises: DeviceError if, for some reason, frequency could not be set.
|
||||
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
available_frequencies = self.list_available_cpu_frequencies(cpu)
|
||||
try:
|
||||
value = int(frequency)
|
||||
if available_frequencies and value not in available_frequencies:
|
||||
raise ConfigError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
value,
|
||||
available_frequencies))
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_min_freq'.format(cpu)
|
||||
self.device.set_sysfile_value(sysfile, value)
|
||||
except ValueError:
|
||||
raise ValueError('value must be an integer; got: "{}"'.format(value))
|
||||
|
||||
def get_cpu_frequency(self, cpu):
|
||||
"""
|
||||
Returns the current frequency currently set for the specified CPU.
|
||||
|
||||
Warning, this method does not check if the cpu is online or not. It will
|
||||
try to read the current frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: DeviceError if for some reason the frequency could not be read.
|
||||
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_cur_freq'.format(cpu)
|
||||
return self.device.get_sysfile_value(sysfile)
|
||||
|
||||
def set_cpu_frequency(self, cpu, frequency, exact=True):
|
||||
"""
|
||||
Set's the minimum value for CPU frequency. Actual frequency will
|
||||
depend on the Governor used and may vary during execution. The value should be
|
||||
either an int or a string representing an integer.
|
||||
|
||||
If ``exact`` flag is set (the default), the Value must also be supported by
|
||||
the device. The available frequencies can be obtained by calling
|
||||
get_available_frequencies() or examining
|
||||
|
||||
/sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies
|
||||
|
||||
on the device (if it exists).
|
||||
|
||||
:raises: ConfigError if the frequency is not supported by the CPU.
|
||||
:raises: DeviceError if, for some reason, frequency could not be set.
|
||||
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
try:
|
||||
value = int(frequency)
|
||||
if exact:
|
||||
available_frequencies = self.list_available_cpu_frequencies(cpu)
|
||||
if available_frequencies and value not in available_frequencies:
|
||||
raise ConfigError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
value,
|
||||
available_frequencies))
|
||||
if self.get_cpu_governor(cpu) != 'userspace':
|
||||
raise ConfigError('Can\'t set {} frequency; governor must be "userspace"'.format(cpu))
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_setspeed'.format(cpu)
|
||||
self.device.set_sysfile_value(sysfile, value, verify=False)
|
||||
except ValueError:
|
||||
raise ValueError('frequency must be an integer; got: "{}"'.format(value))
|
||||
|
||||
def get_cpu_max_frequency(self, cpu):
|
||||
"""
|
||||
Returns the max frequency currently set for the specified CPU.
|
||||
|
||||
Warning, this method does not check if the cpu is online or not. It will
|
||||
try to read the maximum frequency and the following exception will be
|
||||
raised ::
|
||||
|
||||
:raises: DeviceError if for some reason the frequency could not be read.
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
|
||||
return self.device.get_sysfile_value(sysfile)
|
||||
|
||||
def set_cpu_max_frequency(self, cpu, frequency):
|
||||
"""
|
||||
Set's the minimum value for CPU frequency. Actual frequency will
|
||||
depend on the Governor used and may vary during execution. The value should be
|
||||
either an int or a string representing an integer. The Value must also be
|
||||
supported by the device. The available frequencies can be obtained by calling
|
||||
get_available_frequencies() or examining
|
||||
|
||||
/sys/devices/system/cpu/cpuX/cpufreq/scaling_available_frequencies
|
||||
|
||||
on the device.
|
||||
|
||||
:raises: ConfigError if the frequency is not supported by the CPU.
|
||||
:raises: DeviceError if, for some reason, frequency could not be set.
|
||||
|
||||
"""
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
available_frequencies = self.list_available_cpu_frequencies(cpu)
|
||||
try:
|
||||
value = int(frequency)
|
||||
if available_frequencies and value not in available_frequencies:
|
||||
raise DeviceError('Can\'t set {} frequency to {}\nmust be in {}'.format(cpu,
|
||||
value,
|
||||
available_frequencies))
|
||||
sysfile = '/sys/devices/system/cpu/{}/cpufreq/scaling_max_freq'.format(cpu)
|
||||
self.device.set_sysfile_value(sysfile, value)
|
||||
except ValueError:
|
||||
raise ValueError('value must be an integer; got: "{}"'.format(value))
|
||||
|
||||
# Core- and cluster-level mapping for the above cpu-level APIs above. The
|
||||
# APIs make the following assumptions, which were True for all devices that
|
||||
# existed at the time of writing:
|
||||
# 1. A cluster can only contain cores of one type.
|
||||
# 2. All cores in a cluster are tied to the same DVFS domain, therefore
|
||||
# changes to cpufreq for a core will affect all other cores on the
|
||||
# same cluster.
|
||||
|
||||
def get_core_clusters(self, core, strict=True):
|
||||
"""Returns the list of clusters that contain the specified core. if ``strict``
|
||||
is ``True``, raises ValueError if no clusters has been found (returns empty list
|
||||
if ``strict`` is ``False``)."""
|
||||
core_indexes = [i for i, c in enumerate(self.device.core_names) if c == core]
|
||||
clusters = sorted(list(set(self.device.core_clusters[i] for i in core_indexes)))
|
||||
if strict and not clusters:
|
||||
raise ValueError('No active clusters for core {}'.format(core))
|
||||
return clusters
|
||||
|
||||
def get_cluster_active_cpu(self, cluster):
|
||||
"""Returns the first *active* cpu for the cluster. If the entire cluster
|
||||
has been hotplugged, this will raise a ``ValueError``."""
|
||||
cpu_indexes = set([i for i, c in enumerate(self.device.core_clusters) if c == cluster])
|
||||
active_cpus = sorted(list(cpu_indexes.intersection(self.device.online_cpus)))
|
||||
if not active_cpus:
|
||||
raise ValueError('All cpus for cluster {} are offline'.format(cluster))
|
||||
return active_cpus[0]
|
||||
|
||||
def list_available_core_governors(self, core):
|
||||
cpu = self.get_core_online_cpu(core)
|
||||
return self.list_available_cpu_governors(cpu)
|
||||
|
||||
def list_available_cluster_governors(self, cluster):
|
||||
cpu = self.get_cluster_active_cpu(cluster)
|
||||
return self.list_available_cpu_governors(cpu)
|
||||
|
||||
def get_core_governor(self, core):
|
||||
cpu = self.get_core_online_cpu(core)
|
||||
return self.get_cpu_governor(cpu)
|
||||
|
||||
def set_core_governor(self, core, governor, **tunables):
|
||||
for cluster in self.get_core_clusters(core):
|
||||
self.set_cluster_governor(cluster, governor, **tunables)
|
||||
|
||||
def get_cluster_governor(self, cluster):
|
||||
cpu = self.get_cluster_active_cpu(cluster)
|
||||
return self.get_cpu_governor(cpu)
|
||||
|
||||
def set_cluster_governor(self, cluster, governor, **tunables):
|
||||
cpu = self.get_cluster_active_cpu(cluster)
|
||||
return self.set_cpu_governor(cpu, governor, **tunables)
|
||||
|
||||
def list_available_cluster_governor_tunables(self, cluster):
|
||||
cpu = self.get_cluster_active_cpu(cluster)
|
||||
return self.list_available_cpu_governor_tunables(cpu)
|
||||
|
||||
def get_cluster_governor_tunables(self, cluster):
|
||||
cpu = self.get_cluster_active_cpu(cluster)
|
||||
return self.get_cpu_governor_tunables(cpu)
|
||||
|
||||
def set_cluster_governor_tunables(self, cluster, governor, **tunables):
|
||||
cpu = self.get_cluster_active_cpu(cluster)
|
||||
return self.set_cpu_governor_tunables(cpu, governor, **tunables)
|
||||
|
||||
def get_cluster_min_frequency(self, cluster):
|
||||
cpu = self.get_cluster_active_cpu(cluster)
|
||||
return self.get_cpu_min_frequency(cpu)
|
||||
|
||||
def set_cluster_min_frequency(self, cluster, freq):
|
||||
cpu = self.get_cluster_active_cpu(cluster)
|
||||
return self.set_cpu_min_frequency(cpu, freq)
|
||||
|
||||
def get_cluster_cur_frequency(self, cluster):
|
||||
cpu = self.get_cluster_active_cpu(cluster)
|
||||
return self.get_cpu_frequency(cpu)
|
||||
|
||||
def set_cluster_cur_frequency(self, cluster, freq):
|
||||
cpu = self.get_cluster_active_cpu(cluster)
|
||||
return self.set_cpu_frequency(cpu, freq)
|
||||
|
||||
def get_cluster_max_frequency(self, cluster):
|
||||
cpu = self.get_cluster_active_cpu(cluster)
|
||||
return self.get_cpu_max_frequency(cpu)
|
||||
|
||||
def set_cluster_max_frequency(self, cluster, freq):
|
||||
cpu = self.get_cluster_active_cpu(cluster)
|
||||
return self.set_cpu_max_frequency(cpu, freq)
|
||||
|
||||
def get_core_online_cpu(self, core):
|
||||
for cluster in self.get_core_clusters(core):
|
||||
try:
|
||||
return self.get_cluster_active_cpu(cluster)
|
||||
except ValueError:
|
||||
pass
|
||||
raise ValueError('No active CPUs found for core {}'.format(core))
|
||||
|
||||
def list_available_core_governor_tunables(self, core):
|
||||
return self.list_available_cpu_governor_tunables(self.get_core_online_cpu(core))
|
||||
|
||||
def get_core_governor_tunables(self, core):
|
||||
return self.get_cpu_governor_tunables(self.get_core_online_cpu(core))
|
||||
|
||||
def set_core_governor_tunables(self, core, tunables):
|
||||
for cluster in self.get_core_clusters(core):
|
||||
governor = self.get_cluster_governor(cluster)
|
||||
self.set_cluster_governor_tunables(cluster, governor, **tunables)
|
||||
|
||||
def get_core_min_frequency(self, core):
|
||||
return self.get_cpu_min_frequency(self.get_core_online_cpu(core))
|
||||
|
||||
def set_core_min_frequency(self, core, freq):
|
||||
for cluster in self.get_core_clusters(core):
|
||||
self.set_cluster_min_frequency(cluster, freq)
|
||||
|
||||
def get_core_cur_frequency(self, core):
|
||||
return self.get_cpu_frequency(self.get_core_online_cpu(core))
|
||||
|
||||
def set_core_cur_frequency(self, core, freq):
|
||||
for cluster in self.get_core_clusters(core):
|
||||
self.set_cluster_cur_frequency(cluster, freq)
|
||||
|
||||
def get_core_max_frequency(self, core):
|
||||
return self.get_cpu_max_frequency(self.get_core_online_cpu(core))
|
||||
|
||||
def set_core_max_frequency(self, core, freq):
|
||||
for cluster in self.get_core_clusters(core):
|
||||
self.set_cluster_max_frequency(cluster, freq)
|
@@ -1,117 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
import wlauto.core.signal as signal
|
||||
from wlauto import Module
|
||||
from wlauto.exceptions import DeviceError
|
||||
|
||||
|
||||
class CpuidleState(object):
|
||||
|
||||
@property
|
||||
def usage(self):
|
||||
return self.get('usage')
|
||||
|
||||
@property
|
||||
def time(self):
|
||||
return self.get('time')
|
||||
|
||||
@property
|
||||
def disable(self):
|
||||
return self.get('disable')
|
||||
|
||||
@disable.setter
|
||||
def disable(self, value):
|
||||
self.set('disable', value)
|
||||
|
||||
@property
|
||||
def ordinal(self):
|
||||
i = len(self.id)
|
||||
while self.id[i - 1].isdigit():
|
||||
i -= 1
|
||||
if not i:
|
||||
raise ValueError('invalid idle state name: "{}"'.format(self.id))
|
||||
return int(self.id[i:])
|
||||
|
||||
def __init__(self, device, index, path):
|
||||
self.device = device
|
||||
self.index = index
|
||||
self.path = path
|
||||
self.id = self.device.path.basename(self.path)
|
||||
self.cpu = self.device.path.basename(self.device.path.dirname(path))
|
||||
self.desc = self.get('desc')
|
||||
self.name = self.get('name')
|
||||
self.latency = self.get('latency')
|
||||
self.power = self.get('power')
|
||||
|
||||
def get(self, prop):
|
||||
property_path = self.device.path.join(self.path, prop)
|
||||
return self.device.get_sysfile_value(property_path)
|
||||
|
||||
def set(self, prop, value):
|
||||
property_path = self.device.path.join(self.path, prop)
|
||||
self.device.set_sysfile_value(property_path, value)
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, CpuidleState):
|
||||
return (self.name == other.name) and (self.desc == other.desc)
|
||||
elif isinstance(other, basestring):
|
||||
return (self.name == other) or (self.desc == other)
|
||||
else:
|
||||
return False
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
|
||||
class Cpuidle(Module):
|
||||
|
||||
name = 'cpuidle'
|
||||
description = """
|
||||
Adds cpuidle state query and manupution APIs to a Device interface.
|
||||
|
||||
"""
|
||||
capabilities = ['cpuidle']
|
||||
|
||||
root_path = '/sys/devices/system/cpu/cpuidle'
|
||||
|
||||
def probe(self, device):
|
||||
return device.file_exists(self.root_path)
|
||||
|
||||
def initialize(self, context):
|
||||
self.device = self.root_owner
|
||||
signal.connect(self._on_device_init, signal.RUN_INIT, priority=1)
|
||||
|
||||
def get_cpuidle_driver(self):
|
||||
return self.device.get_sysfile_value(self.device.path.join(self.root_path, 'current_driver')).strip()
|
||||
|
||||
def get_cpuidle_governor(self):
|
||||
return self.device.get_sysfile_value(self.device.path.join(self.root_path, 'current_governor_ro')).strip()
|
||||
|
||||
def get_cpuidle_states(self, cpu=0):
|
||||
if isinstance(cpu, int):
|
||||
cpu = 'cpu{}'.format(cpu)
|
||||
states_dir = self.device.path.join(self.device.path.dirname(self.root_path), cpu, 'cpuidle')
|
||||
idle_states = []
|
||||
for state in self.device.listdir(states_dir):
|
||||
if state.startswith('state'):
|
||||
index = int(state[5:])
|
||||
idle_states.append(CpuidleState(self.device, index, self.device.path.join(states_dir, state)))
|
||||
return idle_states
|
||||
|
||||
def _on_device_init(self, context): # pylint: disable=unused-argument
|
||||
if not self.device.file_exists(self.root_path):
|
||||
raise DeviceError('Device kernel does not appear to have cpuidle enabled.')
|
||||
|
@@ -1,248 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
import os
|
||||
import time
|
||||
import tarfile
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
from wlauto import Module
|
||||
from wlauto.exceptions import ConfigError, DeviceError
|
||||
from wlauto.utils.android import fastboot_flash_partition, fastboot_command
|
||||
from wlauto.utils.serial_port import open_serial_connection
|
||||
from wlauto.utils.uefi import UefiMenu
|
||||
from wlauto.utils.misc import merge_dicts
|
||||
|
||||
|
||||
class Flasher(Module):
|
||||
"""
|
||||
Implements a mechanism for flashing a device. The images to be flashed can be
|
||||
specified either as a tarball "image bundle" (in which case instructions for
|
||||
flashing are provided as flasher-specific metadata also in the bundle), or as
|
||||
individual image files, in which case instructions for flashing as specified
|
||||
as part of flashing config.
|
||||
|
||||
.. note:: It is important that when resolving configuration, concrete flasher
|
||||
implementations prioritise settings specified in the config over those
|
||||
in the bundle (if they happen to clash).
|
||||
|
||||
"""
|
||||
|
||||
capabilities = ['flash']
|
||||
|
||||
def flash(self, image_bundle=None, images=None):
|
||||
"""
|
||||
Flashes the specified device using the specified config. As a post condition,
|
||||
the device must be ready to run workloads upon returning from this method (e.g.
|
||||
it must be fully-booted into the OS).
|
||||
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class FastbootFlasher(Flasher):
|
||||
|
||||
name = 'fastboot'
|
||||
description = """
|
||||
Enables automated flashing of images using the fastboot utility.
|
||||
|
||||
To use this flasher, a set of image files to be flused are required.
|
||||
In addition a mapping between partitions and image file is required. There are two ways
|
||||
to specify those requirements:
|
||||
|
||||
- Image mapping: In this mode, a mapping between partitions and images is given in the agenda.
|
||||
- Image Bundle: In This mode a tarball is specified, which must contain all image files as well
|
||||
as well as a partition file, named ``partitions.txt`` which contains the mapping between
|
||||
partitions and images.
|
||||
|
||||
The format of ``partitions.txt`` defines one mapping per line as such: ::
|
||||
|
||||
kernel zImage-dtb
|
||||
ramdisk ramdisk_image
|
||||
|
||||
"""
|
||||
|
||||
delay = 0.5
|
||||
serial_timeout = 30
|
||||
partitions_file_name = 'partitions.txt'
|
||||
|
||||
def flash(self, image_bundle=None, images=None):
|
||||
self.prelude_done = False
|
||||
to_flash = {}
|
||||
if image_bundle: # pylint: disable=access-member-before-definition
|
||||
image_bundle = expand_path(image_bundle)
|
||||
to_flash = self._bundle_to_images(image_bundle)
|
||||
to_flash = merge_dicts(to_flash, images or {}, should_normalize=False)
|
||||
for partition, image_path in to_flash.iteritems():
|
||||
self.logger.debug('flashing {}'.format(partition))
|
||||
self._flash_image(self.owner, partition, expand_path(image_path))
|
||||
fastboot_command('reboot')
|
||||
|
||||
def _validate_image_bundle(self, image_bundle):
|
||||
if not tarfile.is_tarfile(image_bundle):
|
||||
raise ConfigError('File {} is not a tarfile'.format(image_bundle))
|
||||
with tarfile.open(image_bundle) as tar:
|
||||
files = [tf.name for tf in tar.getmembers()]
|
||||
if not any(pf in files for pf in (self.partitions_file_name, '{}/{}'.format(files[0], self.partitions_file_name))):
|
||||
ConfigError('Image bundle does not contain the required partition file (see documentation)')
|
||||
|
||||
def _bundle_to_images(self, image_bundle):
|
||||
"""
|
||||
Extracts the bundle to a temporary location and creates a mapping between the contents of the bundle
|
||||
and images to be flushed.
|
||||
"""
|
||||
self._validate_image_bundle(image_bundle)
|
||||
extract_dir = tempfile.mkdtemp()
|
||||
with tarfile.open(image_bundle) as tar:
|
||||
tar.extractall(path=extract_dir)
|
||||
files = [tf.name for tf in tar.getmembers()]
|
||||
if self.partitions_file_name not in files:
|
||||
extract_dir = os.path.join(extract_dir, files[0])
|
||||
partition_file = os.path.join(extract_dir, self.partitions_file_name)
|
||||
return get_mapping(extract_dir, partition_file)
|
||||
|
||||
def _flash_image(self, device, partition, image_path):
|
||||
if not self.prelude_done:
|
||||
self._fastboot_prelude(device)
|
||||
fastboot_flash_partition(partition, image_path)
|
||||
time.sleep(self.delay)
|
||||
|
||||
def _fastboot_prelude(self, device):
|
||||
with open_serial_connection(port=device.port,
|
||||
baudrate=device.baudrate,
|
||||
timeout=self.serial_timeout,
|
||||
init_dtr=0,
|
||||
get_conn=False) as target:
|
||||
device.reset()
|
||||
time.sleep(self.delay)
|
||||
target.sendline(' ')
|
||||
time.sleep(self.delay)
|
||||
target.sendline('fast')
|
||||
time.sleep(self.delay)
|
||||
self.prelude_done = True
|
||||
|
||||
|
||||
class VersatileExpressFlasher(Flasher):
|
||||
|
||||
name = 'vexpress'
|
||||
description = """
|
||||
Enables flashing of kernels and firmware to ARM Versatile Express devices.
|
||||
|
||||
This modules enables flashing of image bundles or individual images to ARM
|
||||
Versatile Express-based devices (e.g. JUNO) via host-mounted MicroSD on the
|
||||
board.
|
||||
|
||||
The bundle, if specified, must reflect the directory structure of the MicroSD
|
||||
and will be extracted directly into the location it is mounted on the host. The
|
||||
images, if specified, must be a dict mapping the absolute path of the image on
|
||||
the host to the destination path within the board's MicroSD; the destination path
|
||||
may be either absolute, or relative to the MicroSD mount location.
|
||||
|
||||
"""
|
||||
|
||||
def flash(self, image_bundle=None, images=None, recreate_uefi_entry=True): # pylint: disable=arguments-differ
|
||||
device = self.owner
|
||||
if not hasattr(device, 'port') or not hasattr(device, 'microsd_mount_point'):
|
||||
msg = 'Device {} does not appear to support VExpress flashing.'
|
||||
raise ConfigError(msg.format(device.name))
|
||||
with open_serial_connection(port=device.port,
|
||||
baudrate=device.baudrate,
|
||||
timeout=device.timeout,
|
||||
init_dtr=0) as target:
|
||||
target.sendline('usb_on') # this will cause the MicroSD to be mounted on the host
|
||||
device.wait_for_microsd_mount_point(target)
|
||||
self.deploy_images(device, image_bundle, images)
|
||||
|
||||
self.logger.debug('Resetting the device.')
|
||||
device.hard_reset()
|
||||
|
||||
with open_serial_connection(port=device.port,
|
||||
baudrate=device.baudrate,
|
||||
timeout=device.timeout,
|
||||
init_dtr=0) as target:
|
||||
menu = UefiMenu(target)
|
||||
menu.open(timeout=300)
|
||||
if recreate_uefi_entry and menu.has_option(device.uefi_entry):
|
||||
self.logger.debug('Deleting existing device entry.')
|
||||
menu.delete_entry(device.uefi_entry)
|
||||
menu.create_entry(device.uefi_entry, device.uefi_config)
|
||||
elif not menu.has_option(device.uefi_entry):
|
||||
menu.create_entry(device.uefi_entry, device.uefi_config)
|
||||
menu.select(device.uefi_entry)
|
||||
target.expect(device.android_prompt, timeout=device.timeout)
|
||||
|
||||
def deploy_images(self, device, image_bundle=None, images=None):
|
||||
try:
|
||||
if image_bundle:
|
||||
self.deploy_image_bundle(device, image_bundle)
|
||||
if images:
|
||||
self.overlay_images(device, images)
|
||||
os.system('sync')
|
||||
except (IOError, OSError), e:
|
||||
msg = 'Could not deploy images to {}; got: {}'
|
||||
raise DeviceError(msg.format(device.microsd_mount_point, e))
|
||||
|
||||
def deploy_image_bundle(self, device, bundle):
|
||||
self.logger.debug('Validating {}'.format(bundle))
|
||||
validate_image_bundle(bundle)
|
||||
self.logger.debug('Extracting {} into {}...'.format(bundle, device.microsd_mount_point))
|
||||
with tarfile.open(bundle) as tar:
|
||||
tar.extractall(device.microsd_mount_point)
|
||||
|
||||
def overlay_images(self, device, images):
|
||||
for dest, src in images.iteritems():
|
||||
dest = os.path.join(device.microsd_mount_point, dest)
|
||||
self.logger.debug('Copying {} to {}'.format(src, dest))
|
||||
shutil.copy(src, dest)
|
||||
|
||||
|
||||
# utility functions
|
||||
|
||||
def get_mapping(base_dir, partition_file):
|
||||
mapping = {}
|
||||
with open(partition_file) as pf:
|
||||
for line in pf:
|
||||
pair = line.split()
|
||||
if len(pair) != 2:
|
||||
ConfigError('partitions.txt is not properly formated')
|
||||
image_path = os.path.join(base_dir, pair[1])
|
||||
if not os.path.isfile(expand_path(image_path)):
|
||||
ConfigError('file {} was not found in the bundle or was misplaced'.format(pair[1]))
|
||||
mapping[pair[0]] = image_path
|
||||
return mapping
|
||||
|
||||
|
||||
def expand_path(original_path):
|
||||
path = os.path.abspath(os.path.expanduser(original_path))
|
||||
if not os.path.exists(path):
|
||||
raise ConfigError('{} does not exist.'.format(path))
|
||||
return path
|
||||
|
||||
|
||||
def validate_image_bundle(bundle):
|
||||
if not tarfile.is_tarfile(bundle):
|
||||
raise ConfigError('Image bundle {} does not appear to be a valid TAR file.'.format(bundle))
|
||||
with tarfile.open(bundle) as tar:
|
||||
try:
|
||||
tar.getmember('config.txt')
|
||||
except KeyError:
|
||||
try:
|
||||
tar.getmember('./config.txt')
|
||||
except KeyError:
|
||||
msg = 'Tarball {} does not appear to be a valid image bundle (did not see config.txt).'
|
||||
raise ConfigError(msg.format(bundle))
|
||||
|
@@ -1,55 +0,0 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import time
|
||||
|
||||
from wlauto import Module, Parameter
|
||||
from wlauto.exceptions import DeviceError
|
||||
from wlauto.utils.netio import KshellConnection
|
||||
|
||||
|
||||
class NetioSwitchReset(Module):
|
||||
|
||||
#pylint: disable=E1101
|
||||
name = 'netio_switch'
|
||||
description = """
|
||||
Enables hard reset of devices connected to a Netio ethernet power switch
|
||||
"""
|
||||
capabilities = ['reset_power']
|
||||
|
||||
parameters = [
|
||||
Parameter('host', default='ippowerbar',
|
||||
description='IP address or DNS name of the Netio power switch.'),
|
||||
Parameter('port', kind=int, default=1234,
|
||||
description='Port on which KSHELL is listening.'),
|
||||
Parameter('username', default='admin',
|
||||
description='User name for the administrator on the Netio.'),
|
||||
Parameter('password', default='admin',
|
||||
description='User name for the administrator on the Netio.'),
|
||||
Parameter('psu', kind=int, default=1,
|
||||
description='The device port number on the Netio, i.e. which '
|
||||
'PSU port the device is connected to.'),
|
||||
]
|
||||
|
||||
def hard_reset(self):
|
||||
try:
|
||||
conn = KshellConnection(host=self.host, port=self.port)
|
||||
conn.login(self.username, self.password)
|
||||
conn.disable_port(self.psu)
|
||||
time.sleep(2)
|
||||
conn.enable_port(self.psu)
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
raise DeviceError('Could not reset power: {}'.format(e))
|
122
wlauto/result_processors/json_rp.py
Normal file
122
wlauto/result_processors/json_rp.py
Normal file
@@ -0,0 +1,122 @@
|
||||
# Copyright 2014-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
#pylint: disable=E1101,W0201
|
||||
import os
|
||||
from base64 import b64encode
|
||||
|
||||
from wlauto import ResultProcessor, Parameter
|
||||
from wlauto.utils.serializer import json
|
||||
from wlauto.utils.misc import istextfile
|
||||
from wlauto.utils.types import list_of_strings
|
||||
from wlauto.exceptions import ResultProcessorError
|
||||
|
||||
|
||||
class JsonReportProcessor(ResultProcessor):
|
||||
|
||||
name = 'json'
|
||||
description = """
|
||||
Produces a JSON file with WA config, results ect.
|
||||
|
||||
|
||||
This includes embedded artifacts either as text or base64
|
||||
|
||||
"""
|
||||
|
||||
parameters = [
|
||||
Parameter("ignore_artifact_types", kind=list_of_strings,
|
||||
default=['export', 'raw'],
|
||||
description="""A list of which artifact types to be ignored,
|
||||
and thus not embedded in the JSON""")
|
||||
]
|
||||
final = {}
|
||||
|
||||
def initialize(self, context):
|
||||
self.final = context.run_info.to_dict()
|
||||
del self.final['workload_specs']
|
||||
|
||||
wa_adapter = self.final['device']
|
||||
self.final['device'] = {}
|
||||
self.final['device']['props'] = self.final['device_properties']
|
||||
self.final['device']['wa_adapter'] = wa_adapter
|
||||
del self.final['device_properties']
|
||||
|
||||
self.final['output_directory'] = os.path.abspath(context.output_directory)
|
||||
self.final['artifacts'] = []
|
||||
self.final['workloads'] = context.config.to_dict()['workload_specs']
|
||||
for workload in self.final['workloads']:
|
||||
workload['name'] = workload['workload_name']
|
||||
del workload['workload_name']
|
||||
workload['results'] = []
|
||||
|
||||
def export_iteration_result(self, result, context):
|
||||
r = {}
|
||||
r['iteration'] = context.current_iteration
|
||||
r['status'] = result.status
|
||||
r['events'] = [e.to_dict() for e in result.events]
|
||||
r['metrics'] = []
|
||||
for m in result.metrics:
|
||||
md = m.to_dict()
|
||||
md['is_summary'] = m.name in context.workload.summary_metrics
|
||||
r['metrics'].append(md)
|
||||
iteration_artefacts = [self.embed_artifact(context, a) for a in context.iteration_artifacts]
|
||||
r['artifacts'] = [e for e in iteration_artefacts if e is not None]
|
||||
for workload in self.final['workloads']:
|
||||
if workload['id'] == context.spec.id:
|
||||
workload.update(r)
|
||||
break
|
||||
else:
|
||||
raise ResultProcessorError("No workload spec with matching id found")
|
||||
|
||||
def export_run_result(self, result, context):
|
||||
run_artifacts = [self.embed_artifact(context, a) for a in context.run_artifacts]
|
||||
self.logger.debug('Generating results bundle...')
|
||||
run_stats = {
|
||||
'status': result.status,
|
||||
'events': [e.to_dict() for e in result.events],
|
||||
'end_time': context.run_info.end_time,
|
||||
'duration': context.run_info.duration.total_seconds(),
|
||||
'artifacts': [e for e in run_artifacts if e is not None],
|
||||
}
|
||||
self.final.update(run_stats)
|
||||
json_path = os.path.join(os.path.abspath(context.output_directory), "run.json")
|
||||
with open(json_path, 'w') as json_file:
|
||||
json.dump(self.final, json_file)
|
||||
|
||||
def embed_artifact(self, context, artifact):
|
||||
artifact_path = os.path.join(context.output_directory, artifact.path)
|
||||
|
||||
if not os.path.exists(artifact_path):
|
||||
self.logger.debug('Artifact {} has not been generated'.format(artifact_path))
|
||||
return
|
||||
elif artifact.kind in self.ignore_artifact_types:
|
||||
self.logger.debug('Ignoring {} artifact {}'.format(artifact.kind, artifact_path))
|
||||
return
|
||||
else:
|
||||
self.logger.debug('Uploading artifact {}'.format(artifact_path))
|
||||
entry = artifact.to_dict()
|
||||
path = os.path.join(os.path.abspath(context.output_directory), entry['path'])
|
||||
if istextfile(open(path)):
|
||||
entry['encoding'] = "text"
|
||||
entry['content'] = open(path).read()
|
||||
else:
|
||||
entry['encoding'] = "base64"
|
||||
entry['content'] = b64encode(open(path).read())
|
||||
|
||||
del entry['path']
|
||||
del entry['level']
|
||||
del entry['mandatory']
|
||||
return entry
|
@@ -22,7 +22,6 @@ text files in various formats.
|
||||
"""
|
||||
import os
|
||||
import csv
|
||||
import json
|
||||
|
||||
from wlauto import ResultProcessor, Parameter
|
||||
from wlauto.exceptions import ConfigError
|
||||
@@ -124,32 +123,6 @@ class CsvReportProcessor(ResultProcessor):
|
||||
writer.writerow(row)
|
||||
|
||||
|
||||
class JsonReportProcessor(ResultProcessor):
|
||||
"""
|
||||
Creates a ``results.json`` in the output directory containing results for
|
||||
all iterations in JSON format.
|
||||
|
||||
"""
|
||||
|
||||
name = 'json'
|
||||
|
||||
def process_run_result(self, result, context):
|
||||
outfile = os.path.join(context.run_output_directory, 'results.json')
|
||||
with open(outfile, 'wb') as wfh:
|
||||
output = []
|
||||
for result in result.iteration_results:
|
||||
output.append({
|
||||
'id': result.id,
|
||||
'workload': result.workload.name,
|
||||
'iteration': result.iteration,
|
||||
'metrics': [dict([(k, v) for k, v in m.__dict__.iteritems()
|
||||
if not k.startswith('_')])
|
||||
for m in result.metrics],
|
||||
})
|
||||
json.dump(output, wfh, indent=4)
|
||||
context.add_artifact('run_result_json', 'results.json', 'export')
|
||||
|
||||
|
||||
class SummaryCsvProcessor(ResultProcessor):
|
||||
"""
|
||||
Similar to csv result processor, but only contains workloads' summary metrics.
|
||||
|
@@ -815,3 +815,27 @@ def sha256(path, chunk=2048):
|
||||
|
||||
def urljoin(*parts):
|
||||
return '/'.join(p.rstrip('/') for p in parts)
|
||||
|
||||
|
||||
# From: http://eli.thegreenplace.net/2011/10/19/perls-guess-if-file-is-text-or-binary-implemented-in-python/
|
||||
def istextfile(fileobj, blocksize=512):
|
||||
""" Uses heuristics to guess whether the given file is text or binary,
|
||||
by reading a single block of bytes from the file.
|
||||
If more than 30% of the chars in the block are non-text, or there
|
||||
are NUL ('\x00') bytes in the block, assume this is a binary file.
|
||||
"""
|
||||
_text_characters = (b''.join(chr(i) for i in range(32, 127)) +
|
||||
b'\n\r\t\f\b')
|
||||
|
||||
block = fileobj.read(blocksize)
|
||||
if b'\x00' in block:
|
||||
# Files with null bytes are binary
|
||||
return False
|
||||
elif not block:
|
||||
# An empty file is considered a valid text file
|
||||
return True
|
||||
|
||||
# Use translate's 'deletechars' argument to efficiently remove all
|
||||
# occurrences of _text_characters from the block
|
||||
nontext = block.translate(None, _text_characters)
|
||||
return float(len(nontext)) / len(block) <= 0.30
|
||||
|
243
wlauto/utils/serializer.py
Normal file
243
wlauto/utils/serializer.py
Normal file
@@ -0,0 +1,243 @@
|
||||
"""
|
||||
This module contains wrappers for Python serialization modules for
|
||||
common formats that make it easier to serialize/deserialize WA
|
||||
Plain Old Data structures (serilizable WA classes implement
|
||||
``to_pod()``/``from_pod()`` methods for converting between POD
|
||||
structures and Python class instances).
|
||||
|
||||
The modifications to standard serilization procedures are:
|
||||
|
||||
- mappings are deserialized as ``OrderedDict``\ 's are than standard
|
||||
Python ``dict``\ 's. This allows for cleaner syntax in certain parts
|
||||
of WA configuration (e.g. values to be written to files can be specified
|
||||
as a dict, and they will be written in the order specified in the config).
|
||||
- regular expressions are automatically encoded/decoded. This allows for
|
||||
configuration values to be transparently specified as strings or regexes
|
||||
in the POD config.
|
||||
|
||||
This module exports the "wrapped" versions of serialization libraries,
|
||||
and this should be imported and used instead of importing the libraries
|
||||
directly. i.e. ::
|
||||
|
||||
from wa.utils.serializer import yaml
|
||||
pod = yaml.load(fh)
|
||||
|
||||
instead of ::
|
||||
|
||||
import yaml
|
||||
pod = yaml.load(fh)
|
||||
|
||||
It's also possible to use the serializer directly::
|
||||
|
||||
from wa.utils import serializer
|
||||
pod = serializer.load(fh)
|
||||
|
||||
This can also be used to ``dump()`` POD structures. By default,
|
||||
``dump()`` will produce JSON, but ``fmt`` parameter may be used to
|
||||
specify an alternative format (``yaml`` or ``python``). ``load()`` will
|
||||
use the file extension to guess the format, but ``fmt`` may also be used
|
||||
to specify it explicitly.
|
||||
|
||||
"""
|
||||
# pylint: disable=unused-argument
|
||||
|
||||
import os
|
||||
import re
|
||||
import json as _json
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime
|
||||
|
||||
import yaml as _yaml
|
||||
import dateutil.parser
|
||||
|
||||
from wlauto.exceptions import SerializerSyntaxError
|
||||
from wlauto.utils.types import regex_type
|
||||
from wlauto.utils.misc import isiterable
|
||||
|
||||
|
||||
__all__ = [
|
||||
'json',
|
||||
'yaml',
|
||||
'read_pod',
|
||||
'dump',
|
||||
'load',
|
||||
]
|
||||
|
||||
|
||||
class WAJSONEncoder(_json.JSONEncoder):
|
||||
|
||||
def default(self, obj): # pylint: disable=method-hidden
|
||||
if hasattr(obj, 'to_pod'):
|
||||
return obj.to_pod()
|
||||
elif isinstance(obj, regex_type):
|
||||
return 'REGEX:{}:{}'.format(obj.flags, obj.pattern)
|
||||
elif isinstance(obj, datetime):
|
||||
return 'DATET:{}'.format(obj.isoformat())
|
||||
else:
|
||||
return _json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
class WAJSONDecoder(_json.JSONDecoder):
|
||||
|
||||
def decode(self, s, **kwargs):
|
||||
d = _json.JSONDecoder.decode(self, s, **kwargs)
|
||||
|
||||
def try_parse_object(v):
|
||||
if isinstance(v, basestring) and v.startswith('REGEX:'):
|
||||
_, flags, pattern = v.split(':', 2)
|
||||
return re.compile(pattern, int(flags or 0))
|
||||
elif isinstance(v, basestring) and v.startswith('DATET:'):
|
||||
_, pattern = v.split(':', 1)
|
||||
return dateutil.parser.parse(pattern)
|
||||
else:
|
||||
return v
|
||||
|
||||
def load_objects(d):
|
||||
pairs = []
|
||||
for k, v in d.iteritems():
|
||||
if hasattr(v, 'iteritems'):
|
||||
pairs.append((k, load_objects(v)))
|
||||
elif isiterable(v):
|
||||
pairs.append((k, [try_parse_object(i) for i in v]))
|
||||
else:
|
||||
pairs.append((k, try_parse_object(v)))
|
||||
return OrderedDict(pairs)
|
||||
|
||||
return load_objects(d)
|
||||
|
||||
|
||||
class json(object):
|
||||
|
||||
@staticmethod
|
||||
def dump(o, wfh, indent=4, *args, **kwargs):
|
||||
return _json.dump(o, wfh, cls=WAJSONEncoder, indent=indent, *args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def load(fh, *args, **kwargs):
|
||||
try:
|
||||
return _json.load(fh, cls=WAJSONDecoder, object_pairs_hook=OrderedDict, *args, **kwargs)
|
||||
except ValueError as e:
|
||||
raise SerializerSyntaxError(e.message)
|
||||
|
||||
@staticmethod
|
||||
def loads(s, *args, **kwargs):
|
||||
try:
|
||||
return _json.loads(s, cls=WAJSONDecoder, object_pairs_hook=OrderedDict, *args, **kwargs)
|
||||
except ValueError as e:
|
||||
raise SerializerSyntaxError(e.message)
|
||||
|
||||
|
||||
_mapping_tag = _yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
|
||||
_regex_tag = u'tag:wa:regex'
|
||||
|
||||
|
||||
def _wa_dict_representer(dumper, data):
|
||||
return dumper.represent_mapping(_mapping_tag, data.iteritems())
|
||||
|
||||
|
||||
def _wa_regex_representer(dumper, data):
|
||||
text = '{}:{}'.format(data.flags, data.pattern)
|
||||
return dumper.represent_scalar(_regex_tag, text)
|
||||
|
||||
|
||||
def _wa_dict_constructor(loader, node):
|
||||
pairs = loader.construct_pairs(node)
|
||||
seen_keys = set()
|
||||
for k, _ in pairs:
|
||||
if k in seen_keys:
|
||||
raise ValueError('Duplicate entry: {}'.format(k))
|
||||
seen_keys.add(k)
|
||||
return OrderedDict(pairs)
|
||||
|
||||
|
||||
def _wa_regex_constructor(loader, node):
|
||||
value = loader.construct_scalar(node)
|
||||
flags, pattern = value.split(':', 1)
|
||||
return re.compile(pattern, int(flags or 0))
|
||||
|
||||
|
||||
_yaml.add_representer(OrderedDict, _wa_dict_representer)
|
||||
_yaml.add_representer(regex_type, _wa_regex_representer)
|
||||
_yaml.add_constructor(_mapping_tag, _wa_dict_constructor)
|
||||
_yaml.add_constructor(_regex_tag, _wa_regex_constructor)
|
||||
|
||||
|
||||
class yaml(object):
|
||||
|
||||
@staticmethod
|
||||
def dump(o, wfh, *args, **kwargs):
|
||||
return _yaml.dump(o, wfh, *args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def load(fh, *args, **kwargs):
|
||||
try:
|
||||
return _yaml.load(fh, *args, **kwargs)
|
||||
except _yaml.YAMLError as e:
|
||||
lineno = None
|
||||
if hasattr(e, 'problem_mark'):
|
||||
lineno = e.problem_mark.line # pylint: disable=no-member
|
||||
raise SerializerSyntaxError(e.message, lineno)
|
||||
|
||||
loads = load
|
||||
|
||||
|
||||
class python(object):
|
||||
|
||||
@staticmethod
|
||||
def dump(o, wfh, *args, **kwargs):
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def load(cls, fh, *args, **kwargs):
|
||||
return cls.loads(fh.read())
|
||||
|
||||
@staticmethod
|
||||
def loads(s, *args, **kwargs):
|
||||
pod = {}
|
||||
try:
|
||||
exec s in pod # pylint: disable=exec-used
|
||||
except SyntaxError as e:
|
||||
raise SerializerSyntaxError(e.message, e.lineno)
|
||||
for k in pod.keys():
|
||||
if k.startswith('__'):
|
||||
del pod[k]
|
||||
return pod
|
||||
|
||||
|
||||
def read_pod(source, fmt=None):
|
||||
if isinstance(source, basestring):
|
||||
with open(source) as fh:
|
||||
return _read_pod(fh, fmt)
|
||||
elif hasattr(source, 'read') and (hasattr(source, 'name') or fmt):
|
||||
return _read_pod(source, fmt)
|
||||
else:
|
||||
message = 'source must be a path or an open file handle; got {}'
|
||||
raise ValueError(message.format(type(source)))
|
||||
|
||||
|
||||
def dump(o, wfh, fmt='json', *args, **kwargs):
|
||||
serializer = {'yaml': yaml,
|
||||
'json': json,
|
||||
'python': python,
|
||||
'py': python,
|
||||
}.get(fmt)
|
||||
if serializer is None:
|
||||
raise ValueError('Unknown serialization format: "{}"'.format(fmt))
|
||||
serializer.dump(o, wfh, *args, **kwargs)
|
||||
|
||||
|
||||
def load(s, fmt='json', *args, **kwargs):
|
||||
return read_pod(s, fmt=fmt)
|
||||
|
||||
|
||||
def _read_pod(fh, fmt=None):
|
||||
if fmt is None:
|
||||
fmt = os.path.splitext(fh.name)[1].lower().strip('.')
|
||||
if fmt == 'yaml':
|
||||
return yaml.load(fh)
|
||||
elif fmt == 'json':
|
||||
return json.load(fh)
|
||||
elif fmt == 'py':
|
||||
return python.load(fh)
|
||||
else:
|
||||
raise ValueError('Unknown format "{}": {}'.format(fmt, getattr(fh, 'name', '<none>')))
|
@@ -40,7 +40,7 @@ class Androbench(AndroidUiAutoBenchmark):
|
||||
dbn = 'databases/history.db'
|
||||
db = self.device.path.join(self.device.package_data_directory, self.package, dbn)
|
||||
host_results = os.path.join(context.output_directory, 'history.db')
|
||||
self.device.pull_file(db, host_results, as_root=True)
|
||||
self.device.pull(db, host_results, as_root=True)
|
||||
qs = 'select * from history'
|
||||
conn = sqlite3.connect(host_results)
|
||||
c = conn.cursor()
|
||||
|
@@ -132,7 +132,7 @@ class ApplaunchWorkload(Workload):
|
||||
if self.io_stress:
|
||||
host_scheduler_file = os.path.join(context.output_directory, 'scheduler')
|
||||
device_scheduler_file = '/sys/block/mmcblk0/queue/scheduler'
|
||||
self.device.pull_file(device_scheduler_file, host_scheduler_file)
|
||||
self.device.pull(device_scheduler_file, host_scheduler_file)
|
||||
with open(host_scheduler_file) as fh:
|
||||
scheduler = fh.read()
|
||||
scheduler_used = scheduler[scheduler.index("[") + 1:scheduler.index("]")]
|
||||
@@ -144,7 +144,7 @@ class ApplaunchWorkload(Workload):
|
||||
if self.set_launcher_affinity:
|
||||
self._reset_launcher_affinity()
|
||||
if self.cleanup:
|
||||
self.device.delete_file(self.device_script_file)
|
||||
self.device.remove(self.device_script_file)
|
||||
|
||||
def _set_launcher_affinity(self):
|
||||
try:
|
||||
@@ -169,7 +169,7 @@ class ApplaunchWorkload(Workload):
|
||||
def _extract_results_from_file(self, context, filename, metric_suffix):
|
||||
host_result_file = os.path.join(context.output_directory, filename)
|
||||
device_result_file = self.device.path.join(self.device.working_directory, filename)
|
||||
self.device.pull_file(device_result_file, host_result_file)
|
||||
self.device.pull(device_result_file, host_result_file)
|
||||
|
||||
with open(host_result_file) as fh:
|
||||
if filename == 'time.result':
|
||||
|
@@ -57,7 +57,7 @@ class Audio(Workload):
|
||||
self.on_device_file = os.path.join(self.device.working_directory,
|
||||
os.path.basename(self.audio_file))
|
||||
|
||||
self.device.push_file(self.audio_file, self.on_device_file, timeout=120)
|
||||
self.device.push(self.audio_file, self.on_device_file, timeout=120)
|
||||
|
||||
# Open the browser with default page
|
||||
self.device.execute('am start -n com.android.browser/.BrowserActivity about:blank')
|
||||
@@ -75,7 +75,7 @@ class Audio(Workload):
|
||||
|
||||
if self.clear_file_cache:
|
||||
self.device.execute('sync')
|
||||
self.device.set_sysfile_value('/proc/sys/vm/drop_caches', 3)
|
||||
self.device.write_value('/proc/sys/vm/drop_caches', 3)
|
||||
|
||||
# Start the background music
|
||||
self.device.execute('am start -W -S -n com.android.music/.MediaPlaybackActivity -d {}'.format(self.on_device_file))
|
||||
@@ -95,7 +95,7 @@ class Audio(Workload):
|
||||
|
||||
def teardown(self, context):
|
||||
if self.perform_cleanup:
|
||||
self.device.delete_file(self.on_device_file)
|
||||
self.device.remove(self.on_device_file)
|
||||
|
||||
def _download_audio_file(self):
|
||||
self.logger.debug('Downloading audio file from {}'.format(DEFAULT_AUDIO_FILE_URL))
|
||||
|
@@ -59,7 +59,7 @@ class ChromeAutotest(Workload):
|
||||
]
|
||||
|
||||
def setup(self, context):
|
||||
if self.device.platform != 'chromeos':
|
||||
if self.device.os != 'chromeos':
|
||||
raise WorkloadError('{} only supports ChromeOS devices'.format(self.name))
|
||||
self.test_that = which('test_that')
|
||||
if not self.test_that:
|
||||
|
@@ -93,12 +93,12 @@ class BBench(Workload):
|
||||
|
||||
if self.with_audio:
|
||||
if self.force_dependency_push or not self.device.file_exists(self.audio_on_device):
|
||||
self.device.push_file(self.audio_file, self.audio_on_device, timeout=120)
|
||||
self.device.push(self.audio_file, self.audio_on_device, timeout=120)
|
||||
|
||||
# Push the bbench site pages and http server to target device
|
||||
if self.force_dependency_push or not self.device.file_exists(self.bbench_on_device):
|
||||
self.logger.debug('Copying bbench sites to device.')
|
||||
self.device.push_file(self.dependencies_directory, self.bbench_on_device, timeout=300)
|
||||
self.device.push(self.dependencies_directory, self.bbench_on_device, timeout=300)
|
||||
|
||||
# Push the bbench server
|
||||
host_binary = context.resolver.get(Executable(self, self.device.abi, 'bbench_server'))
|
||||
@@ -120,10 +120,10 @@ class BBench(Workload):
|
||||
self.device.execute('pm clear {}'.format(self.browser_package))
|
||||
if self.clear_file_cache:
|
||||
self.device.execute('sync')
|
||||
self.device.set_sysfile_value('/proc/sys/vm/drop_caches', 3)
|
||||
self.device.write_value('/proc/sys/vm/drop_caches', 3)
|
||||
|
||||
#On android 6+ the web browser requires permissions to access the sd card
|
||||
if self.device.get_sdk_version() >= 23:
|
||||
if self.device.os_version["sdk"]() >= 23:
|
||||
self.device.execute("pm grant com.android.browser android.permission.READ_EXTERNAL_STORAGE")
|
||||
self.device.execute("pm grant com.android.browser android.permission.WRITE_EXTERNAL_STORAGE")
|
||||
|
||||
@@ -148,12 +148,12 @@ class BBench(Workload):
|
||||
|
||||
# Get index_no_input.html
|
||||
indexfile = os.path.join(self.device.working_directory, 'bbench/index_noinput.html')
|
||||
self.device.pull_file(indexfile, context.output_directory)
|
||||
self.device.pull(indexfile, context.output_directory)
|
||||
|
||||
# Get the logs
|
||||
output_file = os.path.join(self.device.working_directory, 'browser_bbench_logcat.txt')
|
||||
self.device.execute('logcat -v time -d > {}'.format(output_file))
|
||||
self.device.pull_file(output_file, context.output_directory)
|
||||
self.device.pull(output_file, context.output_directory)
|
||||
|
||||
metrics = _parse_metrics(os.path.join(context.output_directory, 'browser_bbench_logcat.txt'),
|
||||
os.path.join(context.output_directory, 'index_noinput.html'),
|
||||
|
@@ -57,7 +57,7 @@ class Cfbench(AndroidUiAutoBenchmark):
|
||||
self.package,
|
||||
'shared_prefs', 'eu.chainfire.cfbench_preferences.xml ')
|
||||
self.device.execute('cp {} {}'.format(device_results_file, self.device.working_directory), as_root=True)
|
||||
self.device.pull_file(os.path.join(self.device.working_directory, 'eu.chainfire.cfbench_preferences.xml'), context.output_directory)
|
||||
self.device.pull(os.path.join(self.device.working_directory, 'eu.chainfire.cfbench_preferences.xml'), context.output_directory)
|
||||
result_file = os.path.join(context.output_directory, 'eu.chainfire.cfbench_preferences.xml')
|
||||
tree = ET.parse(result_file)
|
||||
root = tree.getroot()
|
||||
|
@@ -93,9 +93,9 @@ class Cyclictest(Workload):
|
||||
|
||||
if self.clear_file_cache:
|
||||
self.device.execute('sync')
|
||||
self.device.set_sysfile_value('/proc/sys/vm/drop_caches', 3)
|
||||
self.device.write_value('/proc/sys/vm/drop_caches', 3)
|
||||
|
||||
if self.device.platform == 'android':
|
||||
if self.device.os == 'android':
|
||||
if self.screen_off and self.device.is_screen_on:
|
||||
self.device.execute('input keyevent 26')
|
||||
|
||||
@@ -103,7 +103,7 @@ class Cyclictest(Workload):
|
||||
self.device.execute(self.cyclictest_command, self.duration * 2, as_root=True)
|
||||
|
||||
def update_result(self, context):
|
||||
self.device.pull_file(self.cyclictest_result, context.output_directory)
|
||||
self.device.pull(self.cyclictest_result, context.output_directory)
|
||||
|
||||
# Parsing the output
|
||||
# Standard Cyclictest Output:
|
||||
@@ -132,7 +132,7 @@ class Cyclictest(Workload):
|
||||
context.result.add_metric(full_key, value, 'microseconds')
|
||||
|
||||
def teardown(self, context):
|
||||
if self.device.platform == 'android':
|
||||
if self.device.os == 'android':
|
||||
if self.screen_off:
|
||||
self.device.ensure_screen_is_on()
|
||||
self.device.execute('rm -f {}'.format(self.cyclictest_result))
|
||||
|
@@ -71,7 +71,7 @@ class Dex2oatBenchmark(Workload):
|
||||
self.command = self.command_template.format(on_device_apk, self.on_device_oat, self.instruction_set)
|
||||
|
||||
if not self.device.file_exists(on_device_apk):
|
||||
self.device.push_file(self.apk_file, on_device_apk)
|
||||
self.device.push(self.apk_file, on_device_apk)
|
||||
|
||||
def run(self, context):
|
||||
self.device.execute(self.command, self.run_timeout)
|
||||
@@ -88,7 +88,7 @@ class Dex2oatBenchmark(Workload):
|
||||
|
||||
"""
|
||||
logcat_log = os.path.join(context.output_directory, 'logcat.log')
|
||||
self.device.dump_logcat(logcat_log)
|
||||
context.device_manager.dump_logcat(logcat_log)
|
||||
|
||||
regex_time = re.compile("^I\/dex2oat \( *[0-9]+\): dex2oat took (?P<time>[0-9]+\.?[0-9]*)(?P<unit>m?s)")
|
||||
regex_comp_time = re.compile("^I\/dex2oat \( *[0-9]+\): +(?P<time>[0-9]*\.?[0-9]*)(?P<unit>m?s) Compile Dex File")
|
||||
@@ -118,5 +118,4 @@ class Dex2oatBenchmark(Workload):
|
||||
context.result.add_metric('dex2oat_comp_time', time, "ms", lower_is_better=True)
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.delete_file(self.on_device_oat)
|
||||
|
||||
self.device.remove(self.on_device_oat)
|
||||
|
@@ -120,7 +120,7 @@ class Dhrystone(Workload):
|
||||
context.result.add_metric('total score', total_score)
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.uninstall_executable('dhrystone')
|
||||
self.device.uninstall('dhrystone')
|
||||
|
||||
def validate(self):
|
||||
if self.mloops and self.duration: # pylint: disable=E0203
|
||||
|
@@ -67,7 +67,7 @@ class Ebizzy(Workload):
|
||||
self.device.execute(self.command, timeout=self.run_timeout)
|
||||
|
||||
def update_result(self, context):
|
||||
self.device.pull_file(self.ebizzy_results, context.output_directory)
|
||||
self.device.pull(self.ebizzy_results, context.output_directory)
|
||||
|
||||
with open(os.path.join(context.output_directory, results_txt)) as ebizzy_file:
|
||||
for line in ebizzy_file:
|
||||
@@ -83,7 +83,7 @@ class Ebizzy(Workload):
|
||||
results_match.group('unit'))
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.uninstall_executable(self.device_binary)
|
||||
self.device.uninstall(self.device_binary)
|
||||
|
||||
def validate(self):
|
||||
pass
|
||||
|
@@ -121,12 +121,13 @@ class Geekbench(AndroidUiAutoBenchmark):
|
||||
score_calculator.update_results(context)
|
||||
|
||||
def update_result_3(self, context):
|
||||
outfile_glob = self.device.path.join(self.device.package_data_directory, self.package, 'files', '*gb3')
|
||||
outfile_glob = self.device.path.join(context.device_manager.package_data_directory, self.package, 'files', '*gb3')
|
||||
on_device_output_files = [f.strip() for f in
|
||||
self.device.execute('ls {}'.format(outfile_glob), as_root=True).split('\n')]
|
||||
self.device.execute('ls {}'.format(outfile_glob), as_root=True).split('\n')
|
||||
if f.strip()]
|
||||
for i, on_device_output_file in enumerate(on_device_output_files):
|
||||
host_temp_file = tempfile.mktemp()
|
||||
self.device.pull_file(on_device_output_file, host_temp_file)
|
||||
self.device.pull(on_device_output_file, host_temp_file)
|
||||
host_output_file = os.path.join(context.output_directory, os.path.basename(on_device_output_file))
|
||||
with open(host_temp_file) as fh:
|
||||
data = json.load(fh)
|
||||
|
@@ -71,7 +71,7 @@ class Hackbench(Workload):
|
||||
self.device.execute(self.command, timeout=self.run_timeout)
|
||||
|
||||
def update_result(self, context):
|
||||
self.device.pull_file(self.hackbench_result, context.output_directory)
|
||||
self.device.pull(self.hackbench_result, context.output_directory)
|
||||
|
||||
with open(os.path.join(context.output_directory, hackbench_results_txt)) as hackbench_file:
|
||||
for line in hackbench_file:
|
||||
@@ -81,7 +81,7 @@ class Hackbench(Workload):
|
||||
context.result.add_metric(label, float(match.group(1)), units)
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.uninstall_executable(self.binary_name)
|
||||
self.device.uninstall(self.binary_name)
|
||||
self.device.execute('rm -f {}'.format(self.hackbench_result))
|
||||
|
||||
def validate(self):
|
||||
|
@@ -105,4 +105,4 @@ class HWUITest(Workload):
|
||||
"frames": self.frames})
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.uninstall_executable(BINARY)
|
||||
self.device.uninstall(BINARY)
|
||||
|
@@ -44,7 +44,7 @@ class IdleWorkload(Workload):
|
||||
|
||||
def setup(self, context):
|
||||
if self.stop_android:
|
||||
if self.device.platform != 'android':
|
||||
if self.device.os != 'android':
|
||||
raise ConfigError('stop_android can only be set for Android devices')
|
||||
if not self.device.is_rooted:
|
||||
raise WorkloadError('Idle workload requires the device to be rooted in order to stop Android.')
|
||||
|
@@ -177,12 +177,12 @@ class Iozone(Workload):
|
||||
self.device.execute(self.command, timeout=self.timeout)
|
||||
|
||||
def update_result(self, context):
|
||||
self.device.pull_file(self.results, context.output_directory)
|
||||
self.device.pull(self.results, context.output_directory)
|
||||
self.outfile = os.path.join(context.output_directory,
|
||||
iozone_results_txt)
|
||||
|
||||
if '-b' in self.other_params:
|
||||
self.device.pull_file(self.device_output_file,
|
||||
self.device.pull(self.device_output_file,
|
||||
context.output_directory)
|
||||
|
||||
# if running in thread mode
|
||||
@@ -313,4 +313,4 @@ class Iozone(Workload):
|
||||
return results
|
||||
|
||||
def finalize(self, context):
|
||||
self.device.uninstall_executable(self.device_binary)
|
||||
self.device.uninstall(self.device_binary)
|
||||
|
@@ -122,7 +122,7 @@ class lmbench(Workload):
|
||||
context.add_artifact('lmbench', 'lmbench.output', 'data')
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.uninstall_executable(self.test)
|
||||
self.device.uninstall(self.test)
|
||||
|
||||
#
|
||||
# Test setup routines
|
||||
|
@@ -86,7 +86,7 @@ class ManualWorkload(Workload):
|
||||
def update_result(self, context):
|
||||
if self.enable_logcat:
|
||||
logcat_dir = os.path.join(context.output_directory, 'logcat')
|
||||
self.device.dump_logcat(logcat_dir)
|
||||
context.device_manager.dump_logcat(logcat_dir)
|
||||
|
||||
def teardown(self, context):
|
||||
pass
|
||||
@@ -102,8 +102,3 @@ class ManualWorkload(Workload):
|
||||
raise ConfigError(message)
|
||||
if not self.user_triggered and not self.duration:
|
||||
raise ConfigError('Either user_triggered must be ``True`` or duration must be > 0.')
|
||||
|
||||
if self.enable_logcat is None:
|
||||
self.enable_logcat = self.device.platform == "android"
|
||||
elif self.enable_logcat and self.device.platform != "android":
|
||||
raise ConfigError("The `enable_logcat` parameter can only be used on Android devices")
|
||||
|
@@ -71,7 +71,7 @@ class Peacekeeper(AndroidUiAutoBenchmark):
|
||||
|
||||
# Pull the result page url, which contains the results, from the
|
||||
# peacekeeper.txt file and process it
|
||||
self.device.pull_file(self.output_file, context.output_directory)
|
||||
self.device.pull(self.output_file, context.output_directory)
|
||||
result_file = os.path.join(context.output_directory, 'peacekeeper.txt')
|
||||
with open(result_file) as fh:
|
||||
for line in fh:
|
||||
|
@@ -68,7 +68,7 @@ class PowerLoadtest(Workload):
|
||||
]
|
||||
|
||||
def setup(self, context):
|
||||
if self.device.platform != 'chromeos':
|
||||
if self.device.os != 'chromeos':
|
||||
raise WorkloadError('{} only supports ChromeOS devices'.format(self.name))
|
||||
self.test_that = which('test_that')
|
||||
if not self.test_that:
|
||||
|
@@ -57,8 +57,8 @@ class Recentfling(Workload):
|
||||
def setup(self, context):
|
||||
self.defs_host = context.resolver.get(File(self, "defs.sh"))
|
||||
self.recentfling_host = context.resolver.get(File(self, "recentfling.sh"))
|
||||
self.device.push_file(self.recentfling_host, self.device.working_directory)
|
||||
self.device.push_file(self.defs_host, self.device.working_directory)
|
||||
self.device.push(self.recentfling_host, self.device.working_directory)
|
||||
self.device.push(self.defs_host, self.device.working_directory)
|
||||
self._kill_recentfling()
|
||||
self.device.ensure_screen_is_on()
|
||||
|
||||
@@ -89,9 +89,9 @@ class Recentfling(Workload):
|
||||
classifiers={"loop": count or "Average"})
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.delete_file(self.device.path.join(self.device.working_directory,
|
||||
self.device.remove(self.device.path.join(self.device.working_directory,
|
||||
"recentfling.sh"))
|
||||
self.device.delete_file(self.device.path.join(self.device.working_directory,
|
||||
self.device.remove(self.device.path.join(self.device.working_directory,
|
||||
"defs.sh"))
|
||||
|
||||
def _kill_recentfling(self):
|
||||
|
@@ -164,7 +164,7 @@ class RtApp(Workload):
|
||||
self.host_json_config = self._load_json_config(context)
|
||||
self.config_file_on_device = self.device.path.join(self.device_working_directory,
|
||||
os.path.basename(self.host_json_config))
|
||||
self.device.push_file(self.host_json_config, self.config_file_on_device, timeout=60)
|
||||
self.device.push(self.host_json_config, self.config_file_on_device, timeout=60)
|
||||
self.command = '{} {}'.format(self.device_binary, self.config_file_on_device)
|
||||
|
||||
time_buffer = 30
|
||||
@@ -216,7 +216,7 @@ class RtApp(Workload):
|
||||
|
||||
def _deploy_rt_app_binary_if_necessary(self):
|
||||
# called from initialize() so gets invoked once per run
|
||||
RtApp.device_binary = self.device.get_binary_path("rt-app")
|
||||
RtApp.device_binary = self.device.get_installed("rt-app")
|
||||
if self.force_install or not RtApp.device_binary:
|
||||
if not self.host_binary:
|
||||
message = '''rt-app is not installed on the device and could not be
|
||||
@@ -274,7 +274,7 @@ class RtApp(Workload):
|
||||
self.device.execute(tar_command, timeout=300)
|
||||
device_path = self.device.path.join(self.device_working_directory, TARBALL_FILENAME)
|
||||
host_path = os.path.join(context.output_directory, TARBALL_FILENAME)
|
||||
self.device.pull_file(device_path, host_path, timeout=120)
|
||||
self.device.pull(device_path, host_path, timeout=120)
|
||||
with tarfile.open(host_path, 'r:gz') as tf:
|
||||
tf.extractall(context.output_directory)
|
||||
os.remove(host_path)
|
||||
|
@@ -51,7 +51,7 @@ class ShellScript(Workload):
|
||||
def setup(self, context):
|
||||
self.on_device_script_file = self.device.path.join(self.device.working_directory,
|
||||
os.path.basename(self.script_file))
|
||||
self.device.push_file(self.script_file, self.on_device_script_file)
|
||||
self.device.push(self.script_file, self.on_device_script_file)
|
||||
self.command = 'sh {} {}'.format(self.on_device_script_file, self.argstring)
|
||||
|
||||
def run(self, context):
|
||||
@@ -62,4 +62,4 @@ class ShellScript(Workload):
|
||||
wfh.write(self.output)
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.delete_file(self.on_device_script_file)
|
||||
self.device.remove(self.on_device_script_file)
|
||||
|
@@ -253,7 +253,7 @@ class Spec2000(Workload):
|
||||
if self.force_push_assets or not self.device.file_exists(datadir):
|
||||
self.device.execute('mkdir -p {}'.format(datadir))
|
||||
for datafile in bench.datafiles:
|
||||
self.device.push_file(datafile, self.device.path.join(datadir, os.path.basename(datafile)))
|
||||
self.device.push(datafile, self.device.path.join(datadir, os.path.basename(datafile)))
|
||||
|
||||
if self.mode == 'speed':
|
||||
cpus = [self._get_fastest_cpu().lower()]
|
||||
|
@@ -69,7 +69,7 @@ class Stream(Workload):
|
||||
self.output = self.device.execute(self.command, timeout=self.timeout)
|
||||
|
||||
def update_result(self, context):
|
||||
self.device.pull_file(self.results, context.output_directory)
|
||||
self.device.pull(self.results, context.output_directory)
|
||||
outfile = os.path.join(context.output_directory, stream_results_txt)
|
||||
|
||||
with open(outfile, 'r') as stream_file:
|
||||
@@ -89,5 +89,5 @@ class Stream(Workload):
|
||||
context.result.add_metric(label, float(match.group(1)), match.group(2))
|
||||
|
||||
def finalize(self, context):
|
||||
self.device.uninstall_executable(self.stream_default)
|
||||
self.device.uninstall_executable(self.stream_optional)
|
||||
self.device.uninstall(self.stream_default)
|
||||
self.device.uninstall(self.stream_optional)
|
||||
|
@@ -112,7 +112,7 @@ class Sysbench(Workload):
|
||||
|
||||
def update_result(self, context):
|
||||
host_results_file = os.path.join(context.output_directory, 'sysbench_result.txt')
|
||||
self.device.pull_file(self.results_file, host_results_file)
|
||||
self.device.pull(self.results_file, host_results_file)
|
||||
context.add_iteration_artifact('sysbench_output', kind='raw', path=host_results_file)
|
||||
|
||||
with open(host_results_file) as fh:
|
||||
@@ -129,10 +129,10 @@ class Sysbench(Workload):
|
||||
extract_threads_fairness_metric('execution time', fh.next(), context.result)
|
||||
|
||||
def teardown(self, context):
|
||||
self.device.delete_file(self.results_file)
|
||||
self.device.remove(self.results_file)
|
||||
|
||||
def _check_executable(self):
|
||||
self.on_device_binary = self.device.get_binary_path("sysbench")
|
||||
self.on_device_binary = self.device.get_installed("sysbench")
|
||||
if not self.on_device_binary and not self.on_host_binary:
|
||||
raise WorkloadError('sysbench binary is not installed on the device, and it is not found on the host.')
|
||||
if self.force_install:
|
||||
|
@@ -189,12 +189,12 @@ class Telemetry(Workload):
|
||||
if self.target_config:
|
||||
device_opts = self.target_config
|
||||
else:
|
||||
if self.device.platform == 'chromeos':
|
||||
if self.device.os == 'chromeos':
|
||||
if '--remote' not in self.run_benchmark_params:
|
||||
device_opts += '--remote={} '.format(self.device.host)
|
||||
if '--browser' not in self.run_benchmark_params:
|
||||
device_opts += '--browser=cros-chrome '
|
||||
elif self.device.platform == 'android':
|
||||
elif self.device.os == 'android':
|
||||
if '--device' not in self.run_benchmark_params and self.device.adb_name:
|
||||
device_opts += '--device={} '.format(self.device.adb_name)
|
||||
if '--browser' not in self.run_benchmark_params:
|
||||
|
@@ -101,7 +101,7 @@ class Vellamo(AndroidUiAutoBenchmark):
|
||||
for test in self.benchmarks: # Get all scores from HTML files
|
||||
filename = None
|
||||
if test == "Browser":
|
||||
result_folder = self.device.path.join(self.device.package_data_directory, self.package, 'files')
|
||||
result_folder = self.device.path.join(context.device_manager.package_data_directory, self.package, 'files')
|
||||
for result_file in self.device.listdir(result_folder, as_root=True):
|
||||
if result_file.startswith("Browser"):
|
||||
filename = result_file
|
||||
@@ -110,7 +110,7 @@ class Vellamo(AndroidUiAutoBenchmark):
|
||||
|
||||
device_file = self.device.path.join(self.device.package_data_directory, self.package, 'files', filename)
|
||||
host_file = os.path.join(context.output_directory, filename)
|
||||
self.device.pull_file(device_file, host_file, as_root=True)
|
||||
self.device.pull(device_file, host_file, as_root=True)
|
||||
with open(host_file) as fh:
|
||||
parser = VellamoResultParser()
|
||||
parser.feed(fh.read())
|
||||
@@ -212,4 +212,3 @@ class VellamoResultParser(HTMLParser):
|
||||
self.benchmarks[-1].add_metric(data)
|
||||
else:
|
||||
self.failed = True
|
||||
|
||||
|
@@ -108,7 +108,7 @@ class VideoWorkload(Workload):
|
||||
on_device_video_file = os.path.join(self.device.working_directory, os.path.basename(self.host_video_file))
|
||||
if self.force_dependency_push or not self.device.file_exists(on_device_video_file):
|
||||
self.logger.debug('Copying {} to device.'.format(self.host_video_file))
|
||||
self.device.push_file(self.host_video_file, on_device_video_file, timeout=120)
|
||||
self.device.push(self.host_video_file, on_device_video_file, timeout=120)
|
||||
self.device.execute('am start -n com.android.browser/.BrowserActivity about:blank')
|
||||
time.sleep(5)
|
||||
self.device.execute('am force-stop com.android.browser')
|
||||
|
Reference in New Issue
Block a user