1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2025-09-01 10:52:33 +01:00

Initial commit of open source Workload Automation.

This commit is contained in:
Sergei Trofimov
2015-03-10 13:09:31 +00:00
commit a747ec7e4c
412 changed files with 41401 additions and 0 deletions

74
wlauto/external/README vendored Normal file
View File

@@ -0,0 +1,74 @@
This directory contains external libraries and standalone utilities which have
been written/modified to work with Workload Automation (and thus need to be
included with WA rather than obtained from orignal sources).
bbench_server
=============
This is a small sever that is used to detect when ``bbench`` workload has completed.
``bbench`` navigates though a bunch of web pages in a browser using javascript.
It will cause the browser to sent a GET request to the port the bbench_server is
listening on, indicating the end of workload.
daq_server
==========
Contains Daq server files that will run on a Windows machine. Please refer to
daq instrument documentation.
louie (third party)
=====
Python package that is itself a fork (and now, a replacement for) pydispatcher.
This library provides a signal dispatching mechanism. This has been modified for
WA to add prioritization to callbacks.
pmu_logger
==========
Source for the kernel driver that enable the logging of CCI counters to ftrace
on periodic basis. This driver is required by the ``cci_pmu_logger`` instrument.
readenergy
==========
Outputs Juno internal energy/power/voltage/current measurments by reading APB
regesiters from memory. This is used by ``juno_energy`` instrument.
revent
======
This is a tool that is used to both record and playback key press and screen tap
events. It is used to record UI manipulation for some workloads (such as games)
where it is not possible to use the Android UI Automator.
The tools is also included in binary form in wlauto/common/. In order to build
the tool from source, you will need to have Android NDK in your PATH.
stacktracer.py (third party)
==============
A module based on an ActiveState recipe that allows tracing thread stacks during
execution of a Python program. This is used through the ``--debug`` flag in WA
to ease debuging multi-threaded parts of the code.
terminalsize.py (third party)
===============
Implements a platform-agnostic way of determining terminal window size. Taken
from a public Github gist.
uiauto
======
Contains the utilities library for UI automation.

31
wlauto/external/bbench_server/build.sh vendored Executable file
View File

@@ -0,0 +1,31 @@
#!/bin/bash
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
BUILD_COMMAND=ndk-build
if [[ $(which $BUILD_COMMAND) ]] ; then
$BUILD_COMMAND
if [[ $? ]]; then
echo Coping to ../../workloads/bbench/
cp libs/armeabi/bbench_server ../../workloads/bbench/bin/armeabi/bbench_server
fi
else
echo Please make sure you have Android NDK in your PATH.
exit 1
fi

View File

@@ -0,0 +1,9 @@
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= bbench_server.cpp
LOCAL_MODULE := bbench_server
LOCAL_MODULE_TAGS := optional
LOCAL_STATIC_LIBRARIES := libc
LOCAL_SHARED_LIBRARIES :=
include $(BUILD_EXECUTABLE)

View File

@@ -0,0 +1,151 @@
/* Copyright 2012-2015 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**************************************************************************/
/* Simple HTTP server program that will return on accepting connection */
/**************************************************************************/
/* Tested on Android ICS browser and FireFox browser */
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netdb.h>
#include <arpa/inet.h>
#include <sys/wait.h>
#define SERVERPORT "3030"
void ExitOnError(int condition, const char *msg)
{
if(condition) { printf("Server: %s\n", msg); exit(1);}
}
void *GetInetAddr(struct sockaddr *sa)
{
if (sa->sa_family == AF_INET)
{
return &(((struct sockaddr_in*)sa)->sin_addr);
}
else
{
return &(((struct sockaddr_in6*)sa)->sin6_addr);
}
}
int main(int argc, char *argv[])
{
socklen_t addr_size;
struct addrinfo hints, *res;
int server_fd, client_fd;
int retval;
int timeout_in_seconds;
// Get the timeout value in seconds
if(argc < 2)
{
printf("Usage %s <timeout in seconds>\n", argv[0]);
exit(1);
}
else
{
timeout_in_seconds = atoi(argv[1]);
printf("Server: Waiting for connection on port %s with timeout of %d seconds\n", SERVERPORT, timeout_in_seconds);
}
/**************************************************************************/
/* Listen to a socket */
/**************************************************************************/
memset(&hints, 0, sizeof hints);
hints.ai_family = AF_UNSPEC; // use IPv4 or IPv6, whichever
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE; // fill in my IP for me
getaddrinfo(NULL, SERVERPORT, &hints, &res);
server_fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
ExitOnError(server_fd < 0, "Socket creation failed");
retval = bind(server_fd, res->ai_addr, res->ai_addrlen);
ExitOnError(retval < 0, "Bind failed");
retval = listen(server_fd, 10);
ExitOnError(retval < 0, "Listen failed");
/**************************************************************************/
/* Wait for connection to arrive or time out */
/**************************************************************************/
fd_set readfds;
FD_ZERO(&readfds);
FD_SET(server_fd, &readfds);
// Timeout parameter
timeval tv;
tv.tv_sec = timeout_in_seconds;
tv.tv_usec = 0;
int ret = select(server_fd+1, &readfds, NULL, NULL, &tv);
ExitOnError(ret <= 0, "No connection established, timed out");
ExitOnError(FD_ISSET(server_fd, &readfds) == 0, "Error occured in select");
/**************************************************************************/
/* Accept connection and print the information */
/**************************************************************************/
{
struct sockaddr_storage client_addr;
char client_addr_string[INET6_ADDRSTRLEN];
addr_size = sizeof client_addr;
client_fd = accept(server_fd, (struct sockaddr *)&client_addr, &addr_size);
ExitOnError(client_fd < 0, "Accept failed");
inet_ntop(client_addr.ss_family,
GetInetAddr((struct sockaddr *)&client_addr),
client_addr_string,
sizeof client_addr_string);
printf("Server: Received connection from %s\n", client_addr_string);
}
/**************************************************************************/
/* Send a acceptable HTTP response */
/**************************************************************************/
{
char response[] = "HTTP/1.1 200 OK\r\n"
"Content-Type: text/html\r\n"
"Connection: close\r\n"
"\r\n"
"<html>"
"<head>Local Server: Connection Accepted</head>"
"<body></body>"
"</html>";
int bytes_sent;
bytes_sent = send(client_fd, response, strlen(response), 0);
ExitOnError(bytes_sent < 0, "Sending Response failed");
}
close(client_fd);
close(server_fd);
return 0;
}

Binary file not shown.

View File

0
wlauto/external/daq_server/src/README vendored Normal file
View File

25
wlauto/external/daq_server/src/build.sh vendored Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/bash
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
python setup.py sdist
rm -rf build
rm -f MANIFEST
if [[ -d dist ]]; then
mv dist/*.tar.gz ..
rm -rf dist
fi
find . -iname \*.pyc -delete

View File

@@ -0,0 +1,17 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = '1.0.1'

View File

@@ -0,0 +1,380 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101,E1103
import os
import sys
from twisted.internet import reactor
from twisted.internet.protocol import Protocol, ClientFactory, ReconnectingClientFactory
from twisted.internet.error import ConnectionLost, ConnectionDone
from twisted.protocols.basic import LineReceiver
if __name__ == '__main__': # for debugging
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from daqpower import log
from daqpower.common import DaqServerRequest, DaqServerResponse, Status
from daqpower.config import get_config_parser
__all__ = ['execute_command', 'run_send_command', 'Status']
class Command(object):
def __init__(self, name, **params):
self.name = name
self.params = params
class CommandResult(object):
def __init__(self):
self.status = None
self.message = None
self.data = None
def __str__(self):
return '{} {}'.format(self.status, self.message)
class CommandExecutorProtocol(Protocol):
def __init__(self, command, timeout=10, retries=1):
self.command = command
self.sent_request = None
self.waiting_for_response = False
self.keep_going = None
self.ports_to_pull = None
self.factory = None
self.timeoutCallback = None
self.timeout = timeout
self.retries = retries
self.retry_count = 0
def connectionMade(self):
if self.command.name == 'get_data':
self.sendRequest('list_port_files')
else:
self.sendRequest(self.command.name, **self.command.params)
def connectionLost(self, reason=ConnectionDone):
if isinstance(reason, ConnectionLost):
self.errorOut('connection lost: {}'.format(reason))
elif self.waiting_for_response:
self.errorOut('Server closed connection without sending a response.')
else:
log.debug('connection terminated.')
def sendRequest(self, command, **params):
self.sent_request = DaqServerRequest(command, params)
request_string = self.sent_request.serialize()
log.debug('sending request: {}'.format(request_string))
self.transport.write(''.join([request_string, '\r\n']))
self.timeoutCallback = reactor.callLater(self.timeout, self.requestTimedOut)
self.waiting_for_response = True
def dataReceived(self, data):
self.keep_going = False
if self.waiting_for_response:
self.waiting_for_response = False
self.timeoutCallback.cancel()
try:
response = DaqServerResponse.deserialize(data)
except Exception, e: # pylint: disable=W0703
self.errorOut('Invalid response: {} ({})'.format(data, e))
else:
if response.status != Status.ERROR:
self.processResponse(response) # may set self.keep_going
if not self.keep_going:
self.commandCompleted(response.status, response.message, response.data)
else:
self.errorOut(response.message)
else:
self.errorOut('unexpected data received: {}\n'.format(data))
def processResponse(self, response):
if self.sent_request.command in ['list_ports', 'list_port_files']:
self.processPortsResponse(response)
elif self.sent_request.command == 'list_devices':
self.processDevicesResponse(response)
elif self.sent_request.command == 'pull':
self.processPullResponse(response)
def processPortsResponse(self, response):
if 'ports' not in response.data:
self.errorOut('Response did not containt ports data: {} ({}).'.format(response, response.data))
ports = response.data['ports']
response.data = ports
if self.command.name == 'get_data':
if ports:
self.ports_to_pull = ports
self.sendPullRequest(self.ports_to_pull.pop())
else:
response.status = Status.OKISH
response.message = 'No ports were returned.'
def processDevicesResponse(self, response):
if 'devices' not in response.data:
self.errorOut('Response did not containt devices data: {} ({}).'.format(response, response.data))
ports = response.data['devices']
response.data = ports
def sendPullRequest(self, port_id):
self.sendRequest('pull', port_id=port_id)
self.keep_going = True
def processPullResponse(self, response):
if 'port_number' not in response.data:
self.errorOut('Response does not contain port number: {} ({}).'.format(response, response.data))
port_number = response.data.pop('port_number')
filename = self.sent_request.params['port_id'] + '.csv'
self.factory.initiateFileTransfer(filename, port_number)
if self.ports_to_pull:
self.sendPullRequest(self.ports_to_pull.pop())
def commandCompleted(self, status, message=None, data=None):
self.factory.result.status = status
self.factory.result.message = message
self.factory.result.data = data
self.transport.loseConnection()
def requestTimedOut(self):
self.retry_count += 1
if self.retry_count > self.retries:
self.errorOut("Request timed out; server failed to respond.")
else:
log.debug('Retrying...')
self.connectionMade()
def errorOut(self, message):
self.factory.errorOut(message)
class CommandExecutorFactory(ClientFactory):
protocol = CommandExecutorProtocol
wait_delay = 1
def __init__(self, config, command, timeout=10, retries=1):
self.config = config
self.command = command
self.timeout = timeout
self.retries = retries
self.result = CommandResult()
self.done = False
self.transfers_in_progress = {}
if command.name == 'get_data':
if 'output_directory' not in command.params:
self.errorOut('output_directory not specifed for get_data command.')
self.output_directory = command.params['output_directory']
if not os.path.isdir(self.output_directory):
log.debug('Creating output directory {}'.format(self.output_directory))
os.makedirs(self.output_directory)
def buildProtocol(self, addr):
protocol = CommandExecutorProtocol(self.command, self.timeout, self.retries)
protocol.factory = self
return protocol
def initiateFileTransfer(self, filename, port):
log.debug('Downloading {} from port {}'.format(filename, port))
filepath = os.path.join(self.output_directory, filename)
session = FileReceiverFactory(filepath, self)
connector = reactor.connectTCP(self.config.host, port, session)
self.transfers_in_progress[session] = connector
def transferComplete(self, session):
connector = self.transfers_in_progress[session]
log.debug('Transfer on port {} complete.'.format(connector.port))
del self.transfers_in_progress[session]
def clientConnectionLost(self, connector, reason):
if self.transfers_in_progress:
log.debug('Waiting for the transfer(s) to complete.')
self.waitForTransfersToCompleteAndExit()
def clientConnectionFailed(self, connector, reason):
self.result.status = Status.ERROR
self.result.message = 'Could not connect to server.'
self.waitForTransfersToCompleteAndExit()
def waitForTransfersToCompleteAndExit(self):
if self.transfers_in_progress:
reactor.callLater(self.wait_delay, self.waitForTransfersToCompleteAndExit)
else:
log.debug('Stopping the reactor.')
reactor.stop()
def errorOut(self, message):
self.result.status = Status.ERROR
self.result.message = message
reactor.crash()
def __str__(self):
return '<CommandExecutorProtocol {}>'.format(self.command.name)
__repr__ = __str__
class FileReceiver(LineReceiver): # pylint: disable=W0223
def __init__(self, path):
self.path = path
self.fh = None
self.factory = None
def connectionMade(self):
if os.path.isfile(self.path):
log.warning('overriding existing file.')
os.remove(self.path)
self.fh = open(self.path, 'w')
def connectionLost(self, reason=ConnectionDone):
if self.fh:
self.fh.close()
def lineReceived(self, line):
line = line.rstrip('\r\n') + '\n'
self.fh.write(line)
class FileReceiverFactory(ReconnectingClientFactory):
def __init__(self, path, owner):
self.path = path
self.owner = owner
def buildProtocol(self, addr):
protocol = FileReceiver(self.path)
protocol.factory = self
self.resetDelay()
return protocol
def clientConnectionLost(self, conector, reason):
if isinstance(reason, ConnectionLost):
log.error('Connection lost: {}'.format(reason))
ReconnectingClientFactory.clientConnectionLost(self, conector, reason)
else:
self.owner.transferComplete(self)
def clientConnectionFailed(self, conector, reason):
if isinstance(reason, ConnectionLost):
log.error('Connection failed: {}'.format(reason))
ReconnectingClientFactory.clientConnectionFailed(self, conector, reason)
def __str__(self):
return '<FileReceiver {}>'.format(self.path)
__repr__ = __str__
def execute_command(server_config, command, **kwargs):
before_fds = _get_open_fds() # see the comment in the finally clause below
if isinstance(command, basestring):
command = Command(command, **kwargs)
timeout = 300 if command.name in ['stop', 'pull'] else 10
factory = CommandExecutorFactory(server_config, command, timeout)
# reactors aren't designed to be re-startable. In order to be
# able to call execute_command multiple times, we need to froce
# re-installation of the reactor; hence this hackery.
# TODO: look into implementing restartable reactors. According to the
# Twisted FAQ, there is no good reason why there isn't one:
# http://twistedmatrix.com/trac/wiki/FrequentlyAskedQuestions#WhycanttheTwistedsreactorberestarted
from twisted.internet import default
del sys.modules['twisted.internet.reactor']
default.install()
global reactor # pylint: disable=W0603
reactor = sys.modules['twisted.internet.reactor']
try:
reactor.connectTCP(server_config.host, server_config.port, factory)
reactor.run()
return factory.result
finally:
# re-startable reactor hack part 2.
# twisted hijacks SIGINT and doesn't bother to un-hijack it when the reactor
# stops. So we have to do it for it *rolls eye*.
import signal
signal.signal(signal.SIGINT, signal.default_int_handler)
# OK, the reactor is also leaking file descriptors. Tracking down all
# of them is non trivial, so instead we're just comparing the before
# and after lists of open FDs for the current process, and closing all
# new ones, as execute_command should never leave anything open after
# it exits (even when downloading data files from the server).
# TODO: This is way too hacky even compared to the rest of this function.
# Additionally, the current implementation ties this to UNIX,
# so in the long run, we need to do this properly and get the FDs
# from the reactor.
after_fds = _get_open_fds()
for fd in (after_fds - before_fds):
try:
os.close(int(fd[1:]))
except OSError:
pass
# Below is the alternative code that gets FDs from the reactor, however
# at the moment it doesn't seem to get everything, which is why code
# above is used instead.
#for fd in readtor._selectables:
# os.close(fd)
#reactor._poller.close()
def _get_open_fds():
if os.name == 'posix':
import subprocess
pid = os.getpid()
procs = subprocess.check_output(
[ "lsof", '-w', '-Ff', "-p", str( pid ) ] )
return set(procs.split())
else:
# TODO: Implement the Windows equivalent.
return []
def run_send_command():
"""Main entry point when running as a script -- should not be invoked form another module."""
parser = get_config_parser()
parser.add_argument('command')
parser.add_argument('-o', '--output-directory', metavar='DIR', default='.',
help='Directory used to output data files (defaults to the current directory).')
parser.add_argument('--verbose', help='Produce verobose output.', action='store_true', default=False)
args = parser.parse_args()
if not args.device_config.labels:
args.device_config.labels = ['PORT_{}'.format(i) for i in xrange(len(args.device_config.resistor_values))]
if args.verbose:
log.start_logging('DEBUG')
else:
log.start_logging('INFO', fmt='%(levelname)-8s %(message)s')
if args.command == 'configure':
args.device_config.validate()
command = Command(args.command, config=args.device_config)
elif args.command == 'get_data':
command = Command(args.command, output_directory=args.output_directory)
else:
command = Command(args.command)
result = execute_command(args.server_config, command)
print result
if result.data:
print result.data
if __name__ == '__main__':
run_send_command()

View File

@@ -0,0 +1,99 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import json
class Serializer(json.JSONEncoder):
def default(self, o): # pylint: disable=E0202
if isinstance(o, Serializable):
return o.serialize()
if isinstance(o, Enum.EnumEntry):
return o.name
return json.JSONEncoder.default(self, o)
class Serializable(object):
@classmethod
def deserialize(cls, text):
return cls(**json.loads(text))
def serialize(self, d=None):
if d is None:
d = self.__dict__
return json.dumps(d, cls=Serializer)
class DaqServerRequest(Serializable):
def __init__(self, command, params=None): # pylint: disable=W0231
self.command = command
self.params = params or {}
class DaqServerResponse(Serializable):
def __init__(self, status, message=None, data=None): # pylint: disable=W0231
self.status = status
self.message = message.strip().replace('\r\n', ' ') if message else ''
self.data = data or {}
def __str__(self):
return '{} {}'.format(self.status, self.message or '')
class Enum(object):
"""
Assuming MyEnum = Enum('A', 'B'),
MyEnum.A and MyEnum.B are valid values.
a = MyEnum.A
(a == MyEnum.A) == True
(a in MyEnum) == True
MyEnum('A') == MyEnum.A
str(MyEnum.A) == 'A'
"""
class EnumEntry(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __cmp__(self, other):
return cmp(self.name, str(other))
def __init__(self, *args):
for a in args:
setattr(self, a, self.EnumEntry(a))
def __call__(self, value):
if value not in self.__dict__:
raise ValueError('Not enum value: {}'.format(value))
return self.__dict__[value]
def __iter__(self):
for e in self.__dict__:
yield self.__dict__[e]
Status = Enum('OK', 'OKISH', 'ERROR')

View File

@@ -0,0 +1,154 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from daqpower.common import Serializable
class ConfigurationError(Exception):
"""Raised when configuration passed into DaqServer is invaid."""
pass
class DeviceConfiguration(Serializable):
"""Encapulates configuration for the DAQ, typically, passed from
the client."""
valid_settings = ['device_id', 'v_range', 'dv_range', 'sampling_rate', 'resistor_values', 'labels']
default_device_id = 'Dev1'
default_v_range = 2.5
default_dv_range = 0.2
default_sampling_rate = 10000
# Channel map used in DAQ 6363 and similar.
default_channel_map = (0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23)
@property
def number_of_ports(self):
return len(self.resistor_values)
def __init__(self, **kwargs): # pylint: disable=W0231
try:
self.device_id = kwargs.pop('device_id') or self.default_device_id
self.v_range = float(kwargs.pop('v_range') or self.default_v_range)
self.dv_range = float(kwargs.pop('dv_range') or self.default_dv_range)
self.sampling_rate = int(kwargs.pop('sampling_rate') or self.default_sampling_rate)
self.resistor_values = kwargs.pop('resistor_values') or []
self.channel_map = kwargs.pop('channel_map') or self.default_channel_map
self.labels = (kwargs.pop('labels') or
['PORT_{}.csv'.format(i) for i in xrange(len(self.resistor_values))])
except KeyError, e:
raise ConfigurationError('Missing config: {}'.format(e.message))
if kwargs:
raise ConfigurationError('Unexpected config: {}'.format(kwargs))
def validate(self):
if not self.number_of_ports:
raise ConfigurationError('No resistor values were specified.')
if not len(self.resistor_values) == len(self.labels):
message = 'The number of resistors ({}) does not match the number of labels ({})'
raise ConfigurationError(message.format(len(self.resistor_values), len(self.labels)))
def __str__(self):
return self.serialize()
__repr__ = __str__
class ServerConfiguration(object):
"""Client-side server configuration."""
valid_settings = ['host', 'port']
default_host = '127.0.0.1'
default_port = 45677
def __init__(self, **kwargs):
self.host = kwargs.pop('host', None) or self.default_host
self.port = kwargs.pop('port', None) or self.default_port
if kwargs:
raise ConfigurationError('Unexpected config: {}'.format(kwargs))
def validate(self):
if not self.host:
raise ConfigurationError('Server host not specified.')
if not self.port:
raise ConfigurationError('Server port not specified.')
elif not isinstance(self.port, int):
raise ConfigurationError('Server port must be an integer.')
class UpdateDeviceConfig(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setting = option_string.strip('-').replace('-', '_')
if setting not in DeviceConfiguration.valid_settings:
raise ConfigurationError('Unkown option: {}'.format(option_string))
setattr(namespace._device_config, setting, values)
class UpdateServerConfig(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setting = option_string.strip('-').replace('-', '_')
if setting not in namespace.server_config.valid_settings:
raise ConfigurationError('Unkown option: {}'.format(option_string))
setattr(namespace.server_config, setting, values)
class ConfigNamespace(object):
class _N(object):
def __init__(self):
self.device_id = None
self.v_range = None
self.dv_range = None
self.sampling_rate = None
self.resistor_values = None
self.labels = None
self.channel_map = None
@property
def device_config(self):
return DeviceConfiguration(**self._device_config.__dict__)
def __init__(self):
self._device_config = self._N()
self.server_config = ServerConfiguration()
class ConfigArgumentParser(argparse.ArgumentParser):
def parse_args(self, *args, **kwargs):
kwargs['namespace'] = ConfigNamespace()
return super(ConfigArgumentParser, self).parse_args(*args, **kwargs)
def get_config_parser(server=True, device=True):
parser = ConfigArgumentParser()
if device:
parser.add_argument('--device-id', action=UpdateDeviceConfig)
parser.add_argument('--v-range', action=UpdateDeviceConfig, type=float)
parser.add_argument('--dv-range', action=UpdateDeviceConfig, type=float)
parser.add_argument('--sampling-rate', action=UpdateDeviceConfig, type=int)
parser.add_argument('--resistor-values', action=UpdateDeviceConfig, type=float, nargs='*')
parser.add_argument('--labels', action=UpdateDeviceConfig, nargs='*')
if server:
parser.add_argument('--host', action=UpdateServerConfig)
parser.add_argument('--port', action=UpdateServerConfig, type=int)
return parser

View File

@@ -0,0 +1,265 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Creates a new DAQ device class. This class assumes that there is a
DAQ connected and mapped as Dev1. It assumes a specific syndesmology on the DAQ (it is not
meant to be a generic DAQ interface). The following diagram shows the wiring for one DaqDevice
port::
Port 0
========
| A0+ <--- Vr -------------------------|
| |
| A0- <--- GND -------------------// |
| |
| A1+ <--- V+ ------------|-------V+ |
| r | |
| A1- <--- Vr --/\/\/\----| |
| | |
| | |
| |--------------------------|
========
:number_of_ports: The number of ports connected on the DAQ. Each port requires 2 DAQ Channels
one for the source voltage and one for the Voltage drop over the
resistor r (V+ - Vr) allows us to detect the current.
:resistor_value: The resistance of r. Typically a few milliOhm
:downsample: The number of samples combined to create one Power point. If set to one
each sample corresponds to one reported power point.
:sampling_rate: The rate at which DAQ takes a sample from each channel.
"""
# pylint: disable=F0401,E1101,W0621
import os
import sys
import csv
import time
import threading
from Queue import Queue, Empty
import numpy
from PyDAQmx import Task
from PyDAQmx.DAQmxFunctions import DAQmxGetSysDevNames
from PyDAQmx.DAQmxTypes import int32, byref, create_string_buffer
from PyDAQmx.DAQmxConstants import (DAQmx_Val_Diff, DAQmx_Val_Volts, DAQmx_Val_GroupByScanNumber, DAQmx_Val_Auto,
DAQmx_Val_Acquired_Into_Buffer, DAQmx_Val_Rising, DAQmx_Val_ContSamps)
from daqpower import log
def list_available_devices():
"""Returns the list of DAQ devices visible to the driver."""
bufsize = 2048 # Should be plenty for all but the most pathalogical of situations.
buf = create_string_buffer('\000' * bufsize)
DAQmxGetSysDevNames(buf, bufsize)
return buf.value.split(',')
class ReadSamplesTask(Task):
def __init__(self, config, consumer):
Task.__init__(self)
self.config = config
self.consumer = consumer
self.sample_buffer_size = (self.config.sampling_rate + 1) * self.config.number_of_ports * 2
self.samples_read = int32()
self.remainder = []
# create voltage channels
for i in xrange(0, 2 * self.config.number_of_ports, 2):
self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i]),
'', DAQmx_Val_Diff,
-config.v_range, config.v_range,
DAQmx_Val_Volts, None)
self.CreateAIVoltageChan('{}/ai{}'.format(config.device_id, config.channel_map[i + 1]),
'', DAQmx_Val_Diff,
-config.dv_range, config.dv_range,
DAQmx_Val_Volts, None)
# configure sampling rate
self.CfgSampClkTiming('',
self.config.sampling_rate,
DAQmx_Val_Rising,
DAQmx_Val_ContSamps,
self.config.sampling_rate)
# register callbacks
self.AutoRegisterEveryNSamplesEvent(DAQmx_Val_Acquired_Into_Buffer, self.config.sampling_rate // 2, 0)
self.AutoRegisterDoneEvent(0)
def EveryNCallback(self):
samples_buffer = numpy.zeros((self.sample_buffer_size,), dtype=numpy.float64)
self.ReadAnalogF64(DAQmx_Val_Auto, 0.0, DAQmx_Val_GroupByScanNumber, samples_buffer,
self.sample_buffer_size, byref(self.samples_read), None)
self.consumer.write((samples_buffer, self.samples_read.value))
def DoneCallback(self, status): # pylint: disable=W0613,R0201
return 0 # The function should return an integer
class AsyncWriter(threading.Thread):
def __init__(self, wait_period=1):
super(AsyncWriter, self).__init__()
self.daemon = True
self.wait_period = wait_period
self.running = threading.Event()
self._stop_signal = threading.Event()
self._queue = Queue()
def write(self, stuff):
if self._stop_signal.is_set():
raise IOError('Attempting to writer to {} after it has been closed.'.format(self.__class__.__name__))
self._queue.put(stuff)
def do_write(self, stuff):
raise NotImplementedError()
def run(self):
self.running.set()
while True:
if self._stop_signal.is_set() and self._queue.empty():
break
try:
self.do_write(self._queue.get(block=True, timeout=self.wait_period))
except Empty:
pass # carry on
self.running.clear()
def stop(self):
self._stop_signal.set()
def wait(self):
while self.running.is_set():
time.sleep(self.wait_period)
class PortWriter(object):
def __init__(self, path):
self.path = path
self.fh = open(path, 'w', 0)
self.writer = csv.writer(self.fh)
self.writer.writerow(['power', 'voltage'])
def write(self, row):
self.writer.writerow(row)
def close(self):
self.fh.close()
def __del__(self):
self.close()
class SamplePorcessorError(Exception):
pass
class SampleProcessor(AsyncWriter):
def __init__(self, resistor_values, output_directory, labels):
super(SampleProcessor, self).__init__()
self.resistor_values = resistor_values
self.output_directory = output_directory
self.labels = labels
self.number_of_ports = len(resistor_values)
if len(self.labels) != self.number_of_ports:
message = 'Number of labels ({}) does not match number of ports ({}).'
raise SamplePorcessorError(message.format(len(self.labels), self.number_of_ports))
self.port_writers = []
def do_write(self, sample_tuple):
samples, number_of_samples = sample_tuple
for i in xrange(0, number_of_samples * self.number_of_ports * 2, self.number_of_ports * 2):
for j in xrange(self.number_of_ports):
V = float(samples[i + 2 * j])
DV = float(samples[i + 2 * j + 1])
P = V * (DV / self.resistor_values[j])
self.port_writers[j].write([P, V])
def start(self):
for label in self.labels:
port_file = self.get_port_file_path(label)
writer = PortWriter(port_file)
self.port_writers.append(writer)
super(SampleProcessor, self).start()
def stop(self):
super(SampleProcessor, self).stop()
self.wait()
for writer in self.port_writers:
writer.close()
def get_port_file_path(self, port_id):
if port_id in self.labels:
return os.path.join(self.output_directory, port_id + '.csv')
else:
raise SamplePorcessorError('Invalid port ID: {}'.format(port_id))
def __del__(self):
self.stop()
class DaqRunner(object):
@property
def number_of_ports(self):
return self.config.number_of_ports
def __init__(self, config, output_directory):
self.config = config
self.processor = SampleProcessor(config.resistor_values, output_directory, config.labels)
self.task = ReadSamplesTask(config, self.processor)
self.is_running = False
def start(self):
log.debug('Starting sample processor.')
self.processor.start()
log.debug('Starting DAQ Task.')
self.task.StartTask()
self.is_running = True
log.debug('Runner started.')
def stop(self):
self.is_running = False
log.debug('Stopping DAQ Task.')
self.task.StopTask()
log.debug('Stopping sample processor.')
self.processor.stop()
log.debug('Runner stopped.')
def get_port_file_path(self, port_id):
return self.processor.get_port_file_path(port_id)
if __name__ == '__main__':
from collections import namedtuple
DeviceConfig = namedtuple('DeviceConfig', ['device_id', 'channel_map', 'resistor_values',
'v_range', 'dv_range', 'sampling_rate',
'number_of_ports', 'labels'])
channel_map = (0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23)
resistor_values = [0.005]
labels = ['PORT_0']
dev_config = DeviceConfig('Dev1', channel_map, resistor_values, 2.5, 0.2, 10000, len(resistor_values), labels)
if not len(sys.argv) == 3:
print 'Usage: {} OUTDIR DURATION'.format(os.path.basename(__file__))
sys.exit(1)
output_directory = sys.argv[1]
duration = float(sys.argv[2])
print "Avialable devices:", list_availabe_devices()
runner = DaqRunner(dev_config, output_directory)
runner.start()
time.sleep(duration)
runner.stop()

View File

@@ -0,0 +1,53 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from twisted.python import log
__all__ = ['debug', 'info', 'warning', 'error', 'critical', 'start_logging']
debug = lambda x: log.msg(x, logLevel=logging.DEBUG)
info = lambda x: log.msg(x, logLevel=logging.INFO)
warning = lambda x: log.msg(x, logLevel=logging.WARNING)
error = lambda x: log.msg(x, logLevel=logging.ERROR)
critical = lambda x: log.msg(x, logLevel=logging.CRITICAL)
class CustomLoggingObserver(log.PythonLoggingObserver):
def emit(self, eventDict):
if 'logLevel' in eventDict:
level = eventDict['logLevel']
elif eventDict['isError']:
level = logging.ERROR
else:
# All of that just just to override this one line from
# default INFO level...
level = logging.DEBUG
text = log.textFromEventDict(eventDict)
if text is None:
return
self.logger.log(level, text)
logObserver = CustomLoggingObserver()
logObserver.start()
def start_logging(level, fmt='%(asctime)s %(levelname)-8s: %(message)s'):
logging.basicConfig(level=getattr(logging, level), format=fmt)

View File

@@ -0,0 +1,480 @@
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101,W0613
from __future__ import division
import os
import sys
import socket
import argparse
import shutil
import time
from datetime import datetime
from zope.interface import implements
from twisted.protocols.basic import LineReceiver
from twisted.internet.protocol import Factory, Protocol
from twisted.internet import reactor, interfaces
from twisted.internet.error import ConnectionLost, ConnectionDone
if __name__ == "__main__": # for debugging
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from daqpower import log
from daqpower.config import DeviceConfiguration
from daqpower.common import DaqServerRequest, DaqServerResponse, Status
try:
from daqpower.daq import DaqRunner, list_available_devices
except ImportError:
# May be using debug mode.
DaqRunner = None
list_available_devices = lambda : ['Dev1']
class ProtocolError(Exception):
pass
class DummyDaqRunner(object):
"""Dummy stub used when running in debug mode."""
num_rows = 200
@property
def number_of_ports(self):
return self.config.number_of_ports
def __init__(self, config, output_directory):
log.info('Creating runner with {} {}'.format(config, output_directory))
self.config = config
self.output_directory = output_directory
self.is_running = False
def start(self):
import csv, random
log.info('runner started')
for i in xrange(self.config.number_of_ports):
rows = [['power', 'voltage']] + [[random.gauss(1.0, 1.0), random.gauss(1.0, 0.1)]
for j in xrange(self.num_rows)]
with open(self.get_port_file_path(self.config.labels[i]), 'wb') as wfh:
writer = csv.writer(wfh)
writer.writerows(rows)
self.is_running = True
def stop(self):
self.is_running = False
log.info('runner stopped')
def get_port_file_path(self, port_id):
if port_id in self.config.labels:
return os.path.join(self.output_directory, '{}.csv'.format(port_id))
else:
raise Exception('Invalid port id: {}'.format(port_id))
class DaqServer(object):
def __init__(self, base_output_directory):
self.base_output_directory = os.path.abspath(base_output_directory)
if os.path.isdir(self.base_output_directory):
log.info('Using output directory: {}'.format(self.base_output_directory))
else:
log.info('Creating new output directory: {}'.format(self.base_output_directory))
os.makedirs(self.base_output_directory)
self.runner = None
self.output_directory = None
self.labels = None
def configure(self, config_string):
message = None
if self.runner:
message = 'Configuring a new session before previous session has been terminated.'
log.warning(message)
if self.runner.is_running:
self.runner.stop()
config = DeviceConfiguration.deserialize(config_string)
config.validate()
self.output_directory = self._create_output_directory()
self.labels = config.labels
log.info('Writing port files to {}'.format(self.output_directory))
self.runner = DaqRunner(config, self.output_directory)
return message
def start(self):
if self.runner:
if not self.runner.is_running:
self.runner.start()
else:
message = 'Calling start() before stop() has been called. Data up to this point will be lost.'
log.warning(message)
self.runner.stop()
self.runner.start()
return message
else:
raise ProtocolError('Start called before a session has been configured.')
def stop(self):
if self.runner:
if self.runner.is_running:
self.runner.stop()
else:
message = 'Attempting to stop() before start() was invoked.'
log.warning(message)
self.runner.stop()
return message
else:
raise ProtocolError('Stop called before a session has been configured.')
def list_devices(self):
return list_available_devices()
def list_ports(self):
return self.labels
def list_port_files(self):
if not self.runner:
raise ProtocolError('Attempting to list port files before session has been configured.')
ports_with_files = []
for port_id in self.labels:
path = self.get_port_file_path(port_id)
if os.path.isfile(path):
ports_with_files.append(port_id)
return ports_with_files
def get_port_file_path(self, port_id):
if not self.runner:
raise ProtocolError('Attepting to get port file path before session has been configured.')
return self.runner.get_port_file_path(port_id)
def terminate(self):
message = None
if self.runner:
if self.runner.is_running:
message = 'Terminating session before runner has been stopped.'
log.warning(message)
self.runner.stop()
self.runner = None
if self.output_directory and os.path.isdir(self.output_directory):
shutil.rmtree(self.output_directory)
self.output_directory = None
log.info('Session terminated.')
else: # Runner has not been created.
message = 'Attempting to close session before it has been configured.'
log.warning(message)
return message
def _create_output_directory(self):
basename = datetime.now().strftime('%Y-%m-%d_%H%M%S%f')
dirname = os.path.join(self.base_output_directory, basename)
os.makedirs(dirname)
return dirname
def __del__(self):
if self.runner:
self.runner.stop()
def __str__(self):
return '({})'.format(self.base_output_directory)
__repr__ = __str__
class DaqControlProtocol(LineReceiver): # pylint: disable=W0223
def __init__(self, daq_server):
self.daq_server = daq_server
self.factory = None
def lineReceived(self, line):
line = line.strip()
log.info('Received: {}'.format(line))
try:
request = DaqServerRequest.deserialize(line)
except Exception, e: # pylint: disable=W0703
self.sendError('Received bad request ({}: {})'.format(e.__class__.__name__, e.message))
else:
self.processRequest(request)
def processRequest(self, request):
try:
if request.command == 'configure':
self.configure(request)
elif request.command == 'start':
self.start(request)
elif request.command == 'stop':
self.stop(request)
elif request.command == 'list_devices':
self.list_devices(request)
elif request.command == 'list_ports':
self.list_ports(request)
elif request.command == 'list_port_files':
self.list_port_files(request)
elif request.command == 'pull':
self.pull_port_data(request)
elif request.command == 'close':
self.terminate(request)
else:
self.sendError('Received unknown command: {}'.format(request.command))
except Exception, e: # pylint: disable=W0703
self.sendError('{}: {}'.format(e.__class__.__name__, e.message))
def configure(self, request):
if 'config' in request.params:
result = self.daq_server.configure(request.params['config'])
if not result:
self.sendResponse(Status.OK)
else:
self.sendResponse(Status.OKISH, message=result)
else:
self.sendError('Invalid config; config string not provided.')
def start(self, request):
result = self.daq_server.start()
if not result:
self.sendResponse(Status.OK)
else:
self.sendResponse(Status.OKISH, message=result)
def stop(self, request):
result = self.daq_server.stop()
if not result:
self.sendResponse(Status.OK)
else:
self.sendResponse(Status.OKISH, message=result)
def pull_port_data(self, request):
if 'port_id' in request.params:
port_id = request.params['port_id']
port_file = self.daq_server.get_port_file_path(port_id)
if os.path.isfile(port_file):
port = self._initiate_file_transfer(port_file)
self.sendResponse(Status.OK, data={'port_number': port})
else:
self.sendError('File for port {} does not exist.'.format(port_id))
else:
self.sendError('Invalid pull request; port id not provided.')
def list_devices(self, request):
devices = self.daq_server.list_devices()
self.sendResponse(Status.OK, data={'devices': devices})
def list_ports(self, request):
port_labels = self.daq_server.list_ports()
self.sendResponse(Status.OK, data={'ports': port_labels})
def list_port_files(self, request):
port_labels = self.daq_server.list_port_files()
self.sendResponse(Status.OK, data={'ports': port_labels})
def terminate(self, request):
status = Status.OK
message = ''
if self.factory.transfer_sessions:
message = 'Terminating with file tranfer sessions in progress. '
log.warning(message)
for session in self.factory.transfer_sessions:
self.factory.transferComplete(session)
message += self.daq_server.terminate() or ''
if message:
status = Status.OKISH
self.sendResponse(status, message)
def sendError(self, message):
log.error(message)
self.sendResponse(Status.ERROR, message)
def sendResponse(self, status, message=None, data=None):
response = DaqServerResponse(status, message=message, data=data)
self.sendLine(response.serialize())
def sendLine(self, line):
log.info('Responding: {}'.format(line))
LineReceiver.sendLine(self, line.replace('\r\n',''))
def _initiate_file_transfer(self, filepath):
sender_factory = FileSenderFactory(filepath, self.factory)
connector = reactor.listenTCP(0, sender_factory)
self.factory.transferInitiated(sender_factory, connector)
return connector.getHost().port
class DaqFactory(Factory):
protocol = DaqControlProtocol
check_alive_period = 5 * 60
max_transfer_lifetime = 30 * 60
def __init__(self, server):
self.server = server
self.transfer_sessions = {}
def buildProtocol(self, addr):
proto = DaqControlProtocol(self.server)
proto.factory = self
reactor.callLater(self.check_alive_period, self.pulse)
return proto
def clientConnectionLost(self, connector, reason):
log.msg('client connection lost: {}.'.format(reason))
if not isinstance(reason, ConnectionLost):
log.msg('ERROR: Client terminated connection mid-transfer.')
for session in self.transfer_sessions:
self.transferComplete(session)
def transferInitiated(self, session, connector):
self.transfer_sessions[session] = (time.time(), connector)
def transferComplete(self, session, reason='OK'):
if reason != 'OK':
log.error(reason)
self.transfer_sessions[session][1].stopListening()
del self.transfer_sessions[session]
def pulse(self):
"""Close down any file tranfer sessions that have been open for too long."""
current_time = time.time()
for session in self.transfer_sessions:
start_time, conn = self.transfer_sessions[session]
if (current_time - start_time) > self.max_transfer_lifetime:
message = '{} session on port {} timed out'
self.transferComplete(session, message.format(session, conn.getHost().port))
if self.transfer_sessions:
reactor.callLater(self.check_alive_period, self.pulse)
def __str__(self):
return '<DAQ {}>'.format(self.server)
__repr__ = __str__
class FileReader(object):
implements(interfaces.IPushProducer)
def __init__(self, filepath):
self.fh = open(filepath)
self.proto = None
self.done = False
self._paused = True
def setProtocol(self, proto):
self.proto = proto
def resumeProducing(self):
if not self.proto:
raise ProtocolError('resumeProducing called with no protocol set.')
self._paused = False
try:
while not self._paused:
line = self.fh.next().rstrip('\n') + '\r\n'
self.proto.transport.write(line)
except StopIteration:
log.debug('Sent everything.')
self.stopProducing()
def pauseProducing(self):
self._paused = True
def stopProducing(self):
self.done = True
self.fh.close()
self.proto.transport.unregisterProducer()
self.proto.transport.loseConnection()
class FileSenderProtocol(Protocol):
def __init__(self, reader):
self.reader = reader
self.factory = None
def connectionMade(self):
self.transport.registerProducer(self.reader, True)
self.reader.resumeProducing()
def connectionLost(self, reason=ConnectionDone):
if self.reader.done:
self.factory.transferComplete()
else:
self.reader.pauseProducing()
self.transport.unregisterProducer()
class FileSenderFactory(Factory):
@property
def done(self):
if self.reader:
return self.reader.done
else:
return None
def __init__(self, path, owner):
self.path = os.path.abspath(path)
self.reader = None
self.owner = owner
def buildProtocol(self, addr):
if not self.reader:
self.reader = FileReader(self.path)
proto = FileSenderProtocol(self.reader)
proto.factory = self
self.reader.setProtocol(proto)
return proto
def transferComplete(self):
self.owner.transferComplete(self)
def __hash__(self):
return hash(self.path)
def __str__(self):
return '<FileSender {}>'.format(self.path)
__repr__ = __str__
def run_server():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory', help='Working directory', metavar='DIR', default='.')
parser.add_argument('-p', '--port', help='port the server will listen on.',
metavar='PORT', default=45677, type=int)
parser.add_argument('--debug', help='Run in debug mode (no DAQ connected).',
action='store_true', default=False)
parser.add_argument('--verbose', help='Produce verobose output.', action='store_true', default=False)
args = parser.parse_args()
if args.debug:
global DaqRunner # pylint: disable=W0603
DaqRunner = DummyDaqRunner
else:
if not DaqRunner:
raise ImportError('DaqRunner')
if args.verbose or args.debug:
log.start_logging('DEBUG')
else:
log.start_logging('INFO')
server = DaqServer(args.directory)
reactor.listenTCP(args.port, DaqFactory(server)).getHost()
hostname = socket.gethostbyname(socket.gethostname())
log.info('Listening on {}:{}'.format(hostname, args.port))
reactor.run()
if __name__ == "__main__":
run_server()

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env python
from daqpower.server import run_server
run_server()

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env python
from daqpower.client import run_send_command
run_send_command()

52
wlauto/external/daq_server/src/setup.py vendored Normal file
View File

@@ -0,0 +1,52 @@
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from distutils.core import setup
import daqpower
warnings.filterwarnings('ignore', "Unknown distribution option: 'install_requires'")
params = dict(
name='daqpower',
version=daqpower.__version__,
packages=[
'daqpower',
],
scripts=[
'scripts/run-daq-server',
'scripts/send-daq-command',
],
url='N/A',
maintainer='workload-automation',
maintainer_email='workload-automation@arm.com',
install_requires=[
'twisted',
'PyDAQmx',
],
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: Other/Proprietary License',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
],
)
setup(**params)

12
wlauto/external/louie/LICENSE vendored Normal file
View File

@@ -0,0 +1,12 @@
This directory contains Louie package that has been modified by ARM Ltd.
Original Louie package is licensed under BSD license. ARM Ltd. changes are
licensed under Apache version 2 license.
Original Louie package may be found here:
https://pypi.python.org/pypi/Louie/1.1
The text of the BSD License may be viewed here:
http://opensource.org/licenses/bsd-license.php

46
wlauto/external/louie/__init__.py vendored Normal file
View File

@@ -0,0 +1,46 @@
__all__ = [
'dispatcher',
'error',
'plugin',
'robustapply',
'saferef',
'sender',
'signal',
'version',
'connect',
'disconnect',
'get_all_receivers',
'reset',
'send',
'send_exact',
'send_minimal',
'send_robust',
'install_plugin',
'remove_plugin',
'Plugin',
'QtWidgetPlugin',
'TwistedDispatchPlugin',
'Anonymous',
'Any',
'All',
'Signal',
]
import louie.dispatcher, louie.error, louie.plugin, louie.robustapply, \
louie.saferef, louie.sender, louie.signal, louie.version
from louie.dispatcher import \
connect, disconnect, get_all_receivers, reset, \
send, send_exact, send_minimal, send_robust
from louie.plugin import \
install_plugin, remove_plugin, Plugin, \
QtWidgetPlugin, TwistedDispatchPlugin
from louie.sender import Anonymous, Any
from louie.signal import All, Signal

591
wlauto/external/louie/dispatcher.py vendored Normal file
View File

@@ -0,0 +1,591 @@
"""Multiple-producer-multiple-consumer signal-dispatching.
``dispatcher`` is the core of Louie, providing the primary API and the
core logic for the system.
Internal attributes:
- ``WEAKREF_TYPES``: Tuple of types/classes which represent weak
references to receivers, and thus must be dereferenced on retrieval
to retrieve the callable object
- ``connections``::
{ senderkey (id) : { signal : [receivers...] } }
- ``senders``: Used for cleaning up sender references on sender
deletion::
{ senderkey (id) : weakref(sender) }
- ``senders_back``: Used for cleaning up receiver references on receiver
deletion::
{ receiverkey (id) : [senderkey (id)...] }
"""
import os
import weakref
try:
set
except NameError:
from sets import Set as set, ImmutableSet as frozenset
from louie import error
from louie import robustapply
from louie import saferef
from louie.sender import Any, Anonymous
from louie.signal import All
from prioritylist import PriorityList
# Support for statistics.
if __debug__:
connects = 0
disconnects = 0
sends = 0
def print_stats():
print ('\n'
'Louie connects: %i\n'
'Louie disconnects: %i\n'
'Louie sends: %i\n'
'\n') % (connects, disconnects, sends)
if 'PYDISPATCH_STATS' in os.environ:
import atexit
atexit.register(print_stats)
WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref)
connections = {}
senders = {}
senders_back = {}
plugins = []
def reset():
"""Reset the state of Louie.
Useful during unit testing. Should be avoided otherwise.
"""
global connections, senders, senders_back, plugins
connections = {}
senders = {}
senders_back = {}
plugins = []
def connect(receiver, signal=All, sender=Any, weak=True, priority=0):
"""Connect ``receiver`` to ``sender`` for ``signal``.
- ``receiver``: A callable Python object which is to receive
messages/signals/events. Receivers must be hashable objects.
If weak is ``True``, then receiver must be weak-referencable (more
precisely ``saferef.safe_ref()`` must be able to create a
reference to the receiver).
Receivers are fairly flexible in their specification, as the
machinery in the ``robustapply`` module takes care of most of the
details regarding figuring out appropriate subsets of the sent
arguments to apply to a given receiver.
Note: If ``receiver`` is itself a weak reference (a callable), it
will be de-referenced by the system's machinery, so *generally*
weak references are not suitable as receivers, though some use
might be found for the facility whereby a higher-level library
passes in pre-weakrefed receiver references.
- ``signal``: The signal to which the receiver should respond.
If ``All``, receiver will receive all signals from the indicated
sender (which might also be ``All``, but is not necessarily
``All``).
Otherwise must be a hashable Python object other than ``None``
(``DispatcherError`` raised on ``None``).
- ``sender``: The sender to which the receiver should respond.
If ``Any``, receiver will receive the indicated signals from any
sender.
If ``Anonymous``, receiver will only receive indicated signals
from ``send``/``send_exact`` which do not specify a sender, or
specify ``Anonymous`` explicitly as the sender.
Otherwise can be any python object.
- ``weak``: Whether to use weak references to the receiver.
By default, the module will attempt to use weak references to
the receiver objects. If this parameter is ``False``, then strong
references will be used.
- ``priority``: specifies the priority by which a reciever should
get notified
Returns ``None``, may raise ``DispatcherTypeError``.
"""
if signal is None:
raise error.DispatcherTypeError(
'Signal cannot be None (receiver=%r sender=%r)'
% (receiver, sender))
if weak:
receiver = saferef.safe_ref(receiver, on_delete=_remove_receiver)
senderkey = id(sender)
if connections.has_key(senderkey):
signals = connections[senderkey]
else:
connections[senderkey] = signals = {}
# Keep track of senders for cleanup.
# Is Anonymous something we want to clean up?
if sender not in (None, Anonymous, Any):
def remove(object, senderkey=senderkey):
_remove_sender(senderkey=senderkey)
# Skip objects that can not be weakly referenced, which means
# they won't be automatically cleaned up, but that's too bad.
try:
weak_sender = weakref.ref(sender, remove)
senders[senderkey] = weak_sender
except:
pass
receiver_id = id(receiver)
# get current set, remove any current references to
# this receiver in the set, including back-references
if signals.has_key(signal):
receivers = signals[signal]
_remove_old_back_refs(senderkey, signal, receiver, receivers)
else:
receivers = signals[signal] = PriorityList()
try:
current = senders_back.get(receiver_id)
if current is None:
senders_back[receiver_id] = current = []
if senderkey not in current:
current.append(senderkey)
except:
pass
receivers.add(receiver, priority)
# Update stats.
if __debug__:
global connects
connects += 1
def disconnect(receiver, signal=All, sender=Any, weak=True):
"""Disconnect ``receiver`` from ``sender`` for ``signal``.
- ``receiver``: The registered receiver to disconnect.
- ``signal``: The registered signal to disconnect.
- ``sender``: The registered sender to disconnect.
- ``weak``: The weakref state to disconnect.
``disconnect`` reverses the process of ``connect``, the semantics for
the individual elements are logically equivalent to a tuple of
``(receiver, signal, sender, weak)`` used as a key to be deleted
from the internal routing tables. (The actual process is slightly
more complex but the semantics are basically the same).
Note: Using ``disconnect`` is not required to cleanup routing when
an object is deleted; the framework will remove routes for deleted
objects automatically. It's only necessary to disconnect if you
want to stop routing to a live object.
Returns ``None``, may raise ``DispatcherTypeError`` or
``DispatcherKeyError``.
"""
if signal is None:
raise error.DispatcherTypeError(
'Signal cannot be None (receiver=%r sender=%r)'
% (receiver, sender))
if weak:
receiver = saferef.safe_ref(receiver)
senderkey = id(sender)
try:
signals = connections[senderkey]
receivers = signals[signal]
except KeyError:
raise error.DispatcherKeyError(
'No receivers found for signal %r from sender %r'
% (signal, sender)
)
try:
# also removes from receivers
_remove_old_back_refs(senderkey, signal, receiver, receivers)
except ValueError:
raise error.DispatcherKeyError(
'No connection to receiver %s for signal %s from sender %s'
% (receiver, signal, sender)
)
_cleanup_connections(senderkey, signal)
# Update stats.
if __debug__:
global disconnects
disconnects += 1
def get_receivers(sender=Any, signal=All):
"""Get list of receivers from global tables.
This function allows you to retrieve the raw list of receivers
from the connections table for the given sender and signal pair.
Note: There is no guarantee that this is the actual list stored in
the connections table, so the value should be treated as a simple
iterable/truth value rather than, for instance a list to which you
might append new records.
Normally you would use ``live_receivers(get_receivers(...))`` to
retrieve the actual receiver objects as an iterable object.
"""
try:
return connections[id(sender)][signal]
except KeyError:
return []
def live_receivers(receivers):
"""Filter sequence of receivers to get resolved, live receivers.
This is a generator which will iterate over the passed sequence,
checking for weak references and resolving them, then returning
all live receivers.
"""
for receiver in receivers:
if isinstance(receiver, WEAKREF_TYPES):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
# Check installed plugins to make sure this receiver is
# live.
live = True
for plugin in plugins:
if not plugin.is_live(receiver):
live = False
break
if live:
yield receiver
def get_all_receivers(sender=Any, signal=All):
"""Get list of all receivers from global tables.
This gets all receivers which should receive the given signal from
sender, each receiver should be produced only once by the
resulting generator.
"""
yielded = set()
for receivers in (
# Get receivers that receive *this* signal from *this* sender.
get_receivers(sender, signal),
# Add receivers that receive *all* signals from *this* sender.
get_receivers(sender, All),
# Add receivers that receive *this* signal from *any* sender.
get_receivers(Any, signal),
# Add receivers that receive *all* signals from *any* sender.
get_receivers(Any, All),
):
for receiver in receivers:
if receiver: # filter out dead instance-method weakrefs
try:
if not receiver in yielded:
yielded.add(receiver)
yield receiver
except TypeError:
# dead weakrefs raise TypeError on hash...
pass
def send(signal=All, sender=Anonymous, *arguments, **named):
"""Send ``signal`` from ``sender`` to all connected receivers.
- ``signal``: (Hashable) signal value; see ``connect`` for details.
- ``sender``: The sender of the signal.
If ``Any``, only receivers registered for ``Any`` will receive the
message.
If ``Anonymous``, only receivers registered to receive messages
from ``Anonymous`` or ``Any`` will receive the message.
Otherwise can be any Python object (normally one registered with
a connect if you actually want something to occur).
- ``arguments``: Positional arguments which will be passed to *all*
receivers. Note that this may raise ``TypeError`` if the receivers
do not allow the particular arguments. Note also that arguments
are applied before named arguments, so they should be used with
care.
- ``named``: Named arguments which will be filtered according to the
parameters of the receivers to only provide those acceptable to
the receiver.
Return a list of tuple pairs ``[(receiver, response), ...]``
If any receiver raises an error, the error propagates back through
send, terminating the dispatch loop, so it is quite possible to
not have all receivers called if a raises an error.
"""
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in live_receivers(get_all_receivers(sender, signal)):
# Wrap receiver using installed plugins.
original = receiver
for plugin in plugins:
receiver = plugin.wrap_receiver(receiver)
response = robustapply.robust_apply(
receiver, original,
signal=signal,
sender=sender,
*arguments,
**named
)
responses.append((receiver, response))
# Update stats.
if __debug__:
global sends
sends += 1
return responses
def send_minimal(signal=All, sender=Anonymous, *arguments, **named):
"""Like ``send``, but does not attach ``signal`` and ``sender``
arguments to the call to the receiver."""
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in live_receivers(get_all_receivers(sender, signal)):
# Wrap receiver using installed plugins.
original = receiver
for plugin in plugins:
receiver = plugin.wrap_receiver(receiver)
response = robustapply.robust_apply(
receiver, original,
*arguments,
**named
)
responses.append((receiver, response))
# Update stats.
if __debug__:
global sends
sends += 1
return responses
def send_exact(signal=All, sender=Anonymous, *arguments, **named):
"""Send ``signal`` only to receivers registered for exact message.
``send_exact`` allows for avoiding ``Any``/``Anonymous`` registered
handlers, sending only to those receivers explicitly registered
for a particular signal on a particular sender.
"""
responses = []
for receiver in live_receivers(get_receivers(sender, signal)):
# Wrap receiver using installed plugins.
original = receiver
for plugin in plugins:
receiver = plugin.wrap_receiver(receiver)
response = robustapply.robust_apply(
receiver, original,
signal=signal,
sender=sender,
*arguments,
**named
)
responses.append((receiver, response))
return responses
def send_robust(signal=All, sender=Anonymous, *arguments, **named):
"""Send ``signal`` from ``sender`` to all connected receivers catching
errors
- ``signal``: (Hashable) signal value, see connect for details
- ``sender``: The sender of the signal.
If ``Any``, only receivers registered for ``Any`` will receive the
message.
If ``Anonymous``, only receivers registered to receive messages
from ``Anonymous`` or ``Any`` will receive the message.
Otherwise can be any Python object (normally one registered with
a connect if you actually want something to occur).
- ``arguments``: Positional arguments which will be passed to *all*
receivers. Note that this may raise ``TypeError`` if the receivers
do not allow the particular arguments. Note also that arguments
are applied before named arguments, so they should be used with
care.
- ``named``: Named arguments which will be filtered according to the
parameters of the receivers to only provide those acceptable to
the receiver.
Return a list of tuple pairs ``[(receiver, response), ... ]``
If any receiver raises an error (specifically, any subclass of
``Exception``), the error instance is returned as the result for
that receiver.
"""
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in live_receivers(get_all_receivers(sender, signal)):
original = receiver
for plugin in plugins:
receiver = plugin.wrap_receiver(receiver)
try:
response = robustapply.robust_apply(
receiver, original,
signal=signal,
sender=sender,
*arguments,
**named
)
except Exception, err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _remove_receiver(receiver):
"""Remove ``receiver`` from connections."""
if not senders_back:
# During module cleanup the mapping will be replaced with None.
return False
backKey = id(receiver)
for senderkey in senders_back.get(backKey, ()):
try:
signals = connections[senderkey].keys()
except KeyError:
pass
else:
for signal in signals:
try:
receivers = connections[senderkey][signal]
except KeyError:
pass
else:
try:
receivers.remove(receiver)
except Exception:
pass
_cleanup_connections(senderkey, signal)
try:
del senders_back[backKey]
except KeyError:
pass
def _cleanup_connections(senderkey, signal):
"""Delete empty signals for ``senderkey``. Delete ``senderkey`` if
empty."""
try:
receivers = connections[senderkey][signal]
except:
pass
else:
if not receivers:
# No more connected receivers. Therefore, remove the signal.
try:
signals = connections[senderkey]
except KeyError:
pass
else:
del signals[signal]
if not signals:
# No more signal connections. Therefore, remove the sender.
_remove_sender(senderkey)
def _remove_sender(senderkey):
"""Remove ``senderkey`` from connections."""
_remove_back_refs(senderkey)
try:
del connections[senderkey]
except KeyError:
pass
# Senderkey will only be in senders dictionary if sender
# could be weakly referenced.
try:
del senders[senderkey]
except:
pass
def _remove_back_refs(senderkey):
"""Remove all back-references to this ``senderkey``."""
try:
signals = connections[senderkey]
except KeyError:
signals = None
else:
for signal, receivers in signals.iteritems():
for receiver in receivers:
_kill_back_ref(receiver, senderkey)
def _remove_old_back_refs(senderkey, signal, receiver, receivers):
"""Kill old ``senders_back`` references from ``receiver``.
This guards against multiple registration of the same receiver for
a given signal and sender leaking memory as old back reference
records build up.
Also removes old receiver instance from receivers.
"""
try:
index = receivers.index(receiver)
# need to scan back references here and remove senderkey
except ValueError:
return False
else:
old_receiver = receivers[index]
del receivers[index]
found = 0
signals = connections.get(signal)
if signals is not None:
for sig, recs in connections.get(signal, {}).iteritems():
if sig != signal:
for rec in recs:
if rec is old_receiver:
found = 1
break
if not found:
_kill_back_ref(old_receiver, senderkey)
return True
return False
def _kill_back_ref(receiver, senderkey):
"""Do actual removal of back reference from ``receiver`` to
``senderkey``."""
receiverkey = id(receiver)
senders = senders_back.get(receiverkey, ())
while senderkey in senders:
try:
senders.remove(senderkey)
except:
break
if not senders:
try:
del senders_back[receiverkey]
except KeyError:
pass
return True

22
wlauto/external/louie/error.py vendored Normal file
View File

@@ -0,0 +1,22 @@
"""Error types for Louie."""
class LouieError(Exception):
"""Base class for all Louie errors"""
class DispatcherError(LouieError):
"""Base class for all Dispatcher errors"""
class DispatcherKeyError(KeyError, DispatcherError):
"""Error raised when unknown (sender, signal) specified"""
class DispatcherTypeError(TypeError, DispatcherError):
"""Error raised when inappropriate signal-type specified (None)"""
class PluginTypeError(TypeError, LouieError):
"""Error raise when trying to install more than one plugin of a
certain type."""

108
wlauto/external/louie/plugin.py vendored Normal file
View File

@@ -0,0 +1,108 @@
"""Common plugins for Louie."""
from louie import dispatcher
from louie import error
def install_plugin(plugin):
cls = plugin.__class__
for p in dispatcher.plugins:
if p.__class__ is cls:
raise error.PluginTypeError(
'Plugin of type %r already installed.' % cls)
dispatcher.plugins.append(plugin)
def remove_plugin(plugin):
dispatcher.plugins.remove(plugin)
class Plugin(object):
"""Base class for Louie plugins.
Plugins are used to extend or alter the behavior of Louie
in a uniform way without having to modify the Louie code
itself.
"""
def is_live(self, receiver):
"""Return True if the receiver is still live.
Only called for receivers who have already been determined to
be live by default Louie semantics.
"""
return True
def wrap_receiver(self, receiver):
"""Return a callable that passes arguments to the receiver.
Useful when you want to change the behavior of all receivers.
"""
return receiver
class QtWidgetPlugin(Plugin):
"""A Plugin for Louie that knows how to handle Qt widgets
when using PyQt built with SIP 4 or higher.
Weak references are not useful when dealing with QWidget
instances, because even after a QWidget is closed and destroyed,
only the C++ object is destroyed. The Python 'shell' object
remains, but raises a RuntimeError when an attempt is made to call
an underlying QWidget method.
This plugin alleviates this behavior, and if a QWidget instance is
found that is just an empty shell, it prevents Louie from
dispatching to any methods on those objects.
"""
def __init__(self):
try:
import qt
except ImportError:
self.is_live = self._is_live_no_qt
else:
self.qt = qt
def is_live(self, receiver):
"""If receiver is a method on a QWidget, only return True if
it hasn't been destroyed."""
if (hasattr(receiver, 'im_self') and
isinstance(receiver.im_self, self.qt.QWidget)
):
try:
receiver.im_self.x()
except RuntimeError:
return False
return True
def _is_live_no_qt(self, receiver):
return True
class TwistedDispatchPlugin(Plugin):
"""Plugin for Louie that wraps all receivers in callables
that return Twisted Deferred objects.
When the wrapped receiver is called, it adds a call to the actual
receiver to the reactor event loop, and returns a Deferred that is
called back with the result.
"""
def __init__(self):
# Don't import reactor ourselves, but make access to it
# easier.
from twisted import internet
from twisted.internet.defer import Deferred
self._internet = internet
self._Deferred = Deferred
def wrap_receiver(self, receiver):
def wrapper(*args, **kw):
d = self._Deferred()
def called(dummy):
return receiver(*args, **kw)
d.addCallback(called)
self._internet.reactor.callLater(0, d.callback, None)
return d
return wrapper

128
wlauto/external/louie/prioritylist.py vendored Normal file
View File

@@ -0,0 +1,128 @@
"""OrderedList class
This class keeps its elements ordered according to their priority.
"""
from collections import defaultdict
import numbers
from bisect import insort
class PriorityList(object):
def __init__(self):
"""
Returns an OrderedReceivers object that externaly behaves
like a list but it maintains the order of its elements
according to their priority.
"""
self.elements = defaultdict(list)
self.is_ordered = True
self.priorities = []
self.size = 0
self._cached_elements = None
def __del__(self):
pass
def __iter__(self):
"""
this method makes PriorityList class iterable
"""
self._order_elements()
for priority in reversed(self.priorities): # highest priority first
for element in self.elements[priority]:
yield element
def __getitem__(self, index):
self._order_elements()
return self._to_list()[index]
def __delitem__(self, index):
self._order_elements()
if isinstance(index, numbers.Integral):
index = int(index)
if index < 0:
index_range = [len(self)+index]
else:
index_range = [index]
elif isinstance(index, slice):
index_range = range(index.start or 0, index.stop, index.step or 1)
else:
raise ValueError('Invalid index {}'.format(index))
current_global_offset = 0
priority_counts = {priority : count for (priority, count) in
zip(self.priorities, [len(self.elements[p]) for p in self.priorities])}
for priority in self.priorities:
if not index_range:
break
priority_offset = 0
while index_range:
del_index = index_range[0]
if priority_counts[priority] + current_global_offset <= del_index:
current_global_offset += priority_counts[priority]
break
within_priority_index = del_index - (current_global_offset + priority_offset)
self._delete(priority, within_priority_index)
priority_offset += 1
index_range.pop(0)
def __len__(self):
return self.size
def add(self, new_element, priority=0, force_ordering=True):
"""
adds a new item in the list.
- ``new_element`` the element to be inserted in the PriorityList
- ``priority`` is the priority of the element which specifies its
order withing the List
- ``force_ordering`` indicates whether elements should be ordered
right now. If set to False, ordering happens on demand (lazy)
"""
self._add_element(new_element, priority)
if priority not in self.priorities:
self._add_priority(priority, force_ordering)
def index(self, element):
return self._to_list().index(element)
def remove(self, element):
index = self.index(element)
self.__delitem__(index)
def _order_elements(self):
if not self.is_ordered:
self.priorities = sorted(self.priorities)
self.is_ordered = True
def _to_list(self):
if self._cached_elements == None:
self._order_elements()
self._cached_elements = []
for priority in self.priorities:
self._cached_elements += self.elements[priority]
return self._cached_elements
def _add_element(self, element, priority):
self.elements[priority].append(element)
self.size += 1
self._cached_elements = None
def _delete(self, priority, priority_index):
del self.elements[priority][priority_index]
self.size -= 1
if len(self.elements[priority]) == 0:
self.priorities.remove(priority)
self._cached_elements = None
def _add_priority(self, priority, force_ordering):
if force_ordering and self.is_ordered:
insort(self.priorities, priority)
elif not force_ordering:
self.priorities.append(priority)
self.is_ordered = False
elif not self.is_ordered:
self.priorities.append(priority)
self._order_elements()
else:
raise AssertionError('Should never get here.')

58
wlauto/external/louie/robustapply.py vendored Normal file
View File

@@ -0,0 +1,58 @@
"""Robust apply mechanism.
Provides a function 'call', which can sort out what arguments a given
callable object can take, and subset the given arguments to match only
those which are acceptable.
"""
def function(receiver):
"""Get function-like callable object for given receiver.
returns (function_or_method, codeObject, fromMethod)
If fromMethod is true, then the callable already has its first
argument bound.
"""
if hasattr(receiver, '__call__'):
# receiver is a class instance; assume it is callable.
# Reassign receiver to the actual method that will be called.
c = receiver.__call__
if hasattr(c, 'im_func') or hasattr(c, 'im_code'):
receiver = c
if hasattr(receiver, 'im_func'):
# receiver is an instance-method.
return receiver, receiver.im_func.func_code, 1
elif not hasattr(receiver, 'func_code'):
raise ValueError(
'unknown reciever type %s %s' % (receiver, type(receiver)))
return receiver, receiver.func_code, 0
def robust_apply(receiver, signature, *arguments, **named):
"""Call receiver with arguments and appropriate subset of named.
``signature`` is the callable used to determine the call signature
of the receiver, in case ``receiver`` is a callable wrapper of the
actual receiver."""
signature, code_object, startIndex = function(signature)
acceptable = code_object.co_varnames[
startIndex + len(arguments):
code_object.co_argcount
]
for name in code_object.co_varnames[
startIndex:startIndex + len(arguments)
]:
if named.has_key(name):
raise TypeError(
'Argument %r specified both positionally '
'and as a keyword for calling %r'
% (name, signature)
)
if not (code_object.co_flags & 8):
# fc does not have a **kwds type parameter, therefore
# remove unacceptable arguments.
for arg in named.keys():
if arg not in acceptable:
del named[arg]
return receiver(*arguments, **named)

179
wlauto/external/louie/saferef.py vendored Normal file
View File

@@ -0,0 +1,179 @@
"""Refactored 'safe reference from dispatcher.py"""
import weakref
import traceback
def safe_ref(target, on_delete=None):
"""Return a *safe* weak reference to a callable target.
- ``target``: The object to be weakly referenced, if it's a bound
method reference, will create a BoundMethodWeakref, otherwise
creates a simple weakref.
- ``on_delete``: If provided, will have a hard reference stored to
the callable to be called after the safe reference goes out of
scope with the reference object, (either a weakref or a
BoundMethodWeakref) as argument.
"""
if hasattr(target, 'im_self'):
if target.im_self is not None:
# Turn a bound method into a BoundMethodWeakref instance.
# Keep track of these instances for lookup by disconnect().
assert hasattr(target, 'im_func'), (
"safe_ref target %r has im_self, but no im_func, "
"don't know how to create reference"
% target
)
reference = BoundMethodWeakref(target=target, on_delete=on_delete)
return reference
if callable(on_delete):
return weakref.ref(target, on_delete)
else:
return weakref.ref(target)
class BoundMethodWeakref(object):
"""'Safe' and reusable weak references to instance methods.
BoundMethodWeakref objects provide a mechanism for referencing a
bound method without requiring that the method object itself
(which is normally a transient object) is kept alive. Instead,
the BoundMethodWeakref object keeps weak references to both the
object and the function which together define the instance method.
Attributes:
- ``key``: The identity key for the reference, calculated by the
class's calculate_key method applied to the target instance method.
- ``deletion_methods``: Sequence of callable objects taking single
argument, a reference to this object which will be called when
*either* the target object or target function is garbage
collected (i.e. when this object becomes invalid). These are
specified as the on_delete parameters of safe_ref calls.
- ``weak_self``: Weak reference to the target object.
- ``weak_func``: Weak reference to the target function.
Class Attributes:
- ``_all_instances``: Class attribute pointing to all live
BoundMethodWeakref objects indexed by the class's
calculate_key(target) method applied to the target objects.
This weak value dictionary is used to short-circuit creation so
that multiple references to the same (object, function) pair
produce the same BoundMethodWeakref instance.
"""
_all_instances = weakref.WeakValueDictionary()
def __new__(cls, target, on_delete=None, *arguments, **named):
"""Create new instance or return current instance.
Basically this method of construction allows us to
short-circuit creation of references to already- referenced
instance methods. The key corresponding to the target is
calculated, and if there is already an existing reference,
that is returned, with its deletion_methods attribute updated.
Otherwise the new instance is created and registered in the
table of already-referenced methods.
"""
key = cls.calculate_key(target)
current = cls._all_instances.get(key)
if current is not None:
current.deletion_methods.append(on_delete)
return current
else:
base = super(BoundMethodWeakref, cls).__new__(cls)
cls._all_instances[key] = base
base.__init__(target, on_delete, *arguments, **named)
return base
def __init__(self, target, on_delete=None):
"""Return a weak-reference-like instance for a bound method.
- ``target``: The instance-method target for the weak reference,
must have im_self and im_func attributes and be
reconstructable via the following, which is true of built-in
instance methods::
target.im_func.__get__( target.im_self )
- ``on_delete``: Optional callback which will be called when
this weak reference ceases to be valid (i.e. either the
object or the function is garbage collected). Should take a
single argument, which will be passed a pointer to this
object.
"""
def remove(weak, self=self):
"""Set self.isDead to True when method or instance is destroyed."""
methods = self.deletion_methods[:]
del self.deletion_methods[:]
try:
del self.__class__._all_instances[self.key]
except KeyError:
pass
for function in methods:
try:
if callable(function):
function(self)
except Exception:
try:
traceback.print_exc()
except AttributeError, e:
print ('Exception during saferef %s '
'cleanup function %s: %s' % (self, function, e))
self.deletion_methods = [on_delete]
self.key = self.calculate_key(target)
self.weak_self = weakref.ref(target.im_self, remove)
self.weak_func = weakref.ref(target.im_func, remove)
self.self_name = str(target.im_self)
self.func_name = str(target.im_func.__name__)
def calculate_key(cls, target):
"""Calculate the reference key for this reference.
Currently this is a two-tuple of the id()'s of the target
object and the target function respectively.
"""
return (id(target.im_self), id(target.im_func))
calculate_key = classmethod(calculate_key)
def __str__(self):
"""Give a friendly representation of the object."""
return "%s(%s.%s)" % (
self.__class__.__name__,
self.self_name,
self.func_name,
)
__repr__ = __str__
def __nonzero__(self):
"""Whether we are still a valid reference."""
return self() is not None
def __cmp__(self, other):
"""Compare with another reference."""
if not isinstance(other, self.__class__):
return cmp(self.__class__, type(other))
return cmp(self.key, other.key)
def __call__(self):
"""Return a strong reference to the bound method.
If the target cannot be retrieved, then will return None,
otherwise returns a bound instance method for our object and
function.
Note: You may call this method any number of times, as it does
not invalidate the reference.
"""
target = self.weak_self()
if target is not None:
function = self.weak_func()
if function is not None:
return function.__get__(target)
return None

39
wlauto/external/louie/sender.py vendored Normal file
View File

@@ -0,0 +1,39 @@
"""Sender classes."""
class _SENDER(type):
"""Base metaclass for sender classes."""
def __str__(cls):
return '<Sender: %s>' % (cls.__name__, )
class Any(object):
"""Used to represent either 'any sender'.
The Any class can be used with connect, disconnect, send, or
sendExact to denote that the sender paramater should react to any
sender, not just a particular sender.
"""
__metaclass__ = _SENDER
class Anonymous(object):
"""Singleton used to signal 'anonymous sender'.
The Anonymous class is used to signal that the sender of a message
is not specified (as distinct from being 'any sender').
Registering callbacks for Anonymous will only receive messages
sent without senders. Sending with anonymous will only send
messages to those receivers registered for Any or Anonymous.
Note: The default sender for connect is Any, while the default
sender for send is Anonymous. This has the effect that if you do
not specify any senders in either function then all messages are
routed as though there was a single sender (Anonymous) being used
everywhere.
"""
__metaclass__ = _SENDER

30
wlauto/external/louie/signal.py vendored Normal file
View File

@@ -0,0 +1,30 @@
"""Signal class.
This class is provided as a way to consistently define and document
signal types. Signal classes also have a useful string
representation.
Louie does not require you to use a subclass of Signal for signals.
"""
class _SIGNAL(type):
"""Base metaclass for signal classes."""
def __str__(cls):
return '<Signal: %s>' % (cls.__name__, )
class Signal(object):
__metaclass__ = _SIGNAL
class All(Signal):
"""Used to represent 'all signals'.
The All class can be used with connect, disconnect, send, or
sendExact to denote that the signal should react to all signals,
not just a particular signal.
"""

View File

View File

@@ -0,0 +1,5 @@
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))

0
wlauto/external/louie/test/fixture.py vendored Normal file
View File

View File

@@ -0,0 +1,154 @@
import unittest
import louie
from louie import dispatcher
def x(a):
return a
class Dummy(object):
pass
class Callable(object):
def __call__(self, a):
return a
def a(self, a):
return a
class TestDispatcher(unittest.TestCase):
def setUp(self):
louie.reset()
def _isclean(self):
"""Assert that everything has been cleaned up automatically"""
assert len(dispatcher.senders_back) == 0, dispatcher.senders_back
assert len(dispatcher.connections) == 0, dispatcher.connections
assert len(dispatcher.senders) == 0, dispatcher.senders
def test_Exact(self):
a = Dummy()
signal = 'this'
louie.connect(x, signal, a)
expected = [(x, a)]
result = louie.send('this', a, a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
louie.disconnect(x, signal, a)
assert len(list(louie.get_all_receivers(a, signal))) == 0
self._isclean()
def test_AnonymousSend(self):
a = Dummy()
signal = 'this'
louie.connect(x, signal)
expected = [(x, a)]
result = louie.send(signal, None, a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
louie.disconnect(x, signal)
assert len(list(louie.get_all_receivers(None, signal))) == 0
self._isclean()
def test_AnyRegistration(self):
a = Dummy()
signal = 'this'
louie.connect(x, signal, louie.Any)
expected = [(x, a)]
result = louie.send('this', object(), a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
louie.disconnect(x, signal, louie.Any)
expected = []
result = louie.send('this', object(), a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
assert len(list(louie.get_all_receivers(louie.Any, signal))) == 0
self._isclean()
def test_AllRegistration(self):
a = Dummy()
signal = 'this'
louie.connect(x, louie.All, a)
expected = [(x, a)]
result = louie.send('this', a, a=a)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
louie.disconnect(x, louie.All, a)
assert len(list(louie.get_all_receivers(a, louie.All))) == 0
self._isclean()
def test_GarbageCollected(self):
a = Callable()
b = Dummy()
signal = 'this'
louie.connect(a.a, signal, b)
expected = []
del a
result = louie.send('this', b, a=b)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
assert len(list(louie.get_all_receivers(b, signal))) == 0, (
"Remaining handlers: %s" % (louie.get_all_receivers(b, signal),))
self._isclean()
def test_GarbageCollectedObj(self):
class x:
def __call__(self, a):
return a
a = Callable()
b = Dummy()
signal = 'this'
louie.connect(a, signal, b)
expected = []
del a
result = louie.send('this', b, a=b)
assert result == expected, (
"Send didn't return expected result:\n\texpected:%s\n\tgot:%s"
% (expected, result))
assert len(list(louie.get_all_receivers(b, signal))) == 0, (
"Remaining handlers: %s" % (louie.get_all_receivers(b, signal),))
self._isclean()
def test_MultipleRegistration(self):
a = Callable()
b = Dummy()
signal = 'this'
louie.connect(a, signal, b)
louie.connect(a, signal, b)
louie.connect(a, signal, b)
louie.connect(a, signal, b)
louie.connect(a, signal, b)
louie.connect(a, signal, b)
result = louie.send('this', b, a=b)
assert len(result) == 1, result
assert len(list(louie.get_all_receivers(b, signal))) == 1, (
"Remaining handlers: %s" % (louie.get_all_receivers(b, signal),))
del a
del b
del result
self._isclean()
def test_robust(self):
"""Test the sendRobust function."""
def fails():
raise ValueError('this')
a = object()
signal = 'this'
louie.connect(fails, louie.All, a)
result = louie.send_robust('this', a, a=a)
err = result[0][1]
assert isinstance(err, ValueError)
assert err.args == ('this', )

View File

@@ -0,0 +1,145 @@
"""Louie plugin tests."""
import unittest
import louie
try:
import qt
if not hasattr(qt.qApp, 'for_testing'):
_app = qt.QApplication([])
_app.for_testing = True
qt.qApp = _app
except ImportError:
qt = None
class ReceiverBase(object):
def __init__(self):
self.args = []
self.live = True
def __call__(self, arg):
self.args.append(arg)
class Receiver1(ReceiverBase):
pass
class Receiver2(ReceiverBase):
pass
class Plugin1(louie.Plugin):
def is_live(self, receiver):
"""ReceiverBase instances are only live if their `live`
attribute is True"""
if isinstance(receiver, ReceiverBase):
return receiver.live
return True
class Plugin2(louie.Plugin):
def is_live(self, receiver):
"""Pretend all Receiver2 instances are not live."""
if isinstance(receiver, Receiver2):
return False
return True
def test_only_one_instance():
louie.reset()
plugin1a = Plugin1()
plugin1b = Plugin1()
louie.install_plugin(plugin1a)
# XXX: Move these tests into test cases so we can use unittest's
# 'assertRaises' method.
try:
louie.install_plugin(plugin1b)
except louie.error.PluginTypeError:
pass
else:
raise Exception('PluginTypeError not raised')
def test_is_live():
louie.reset()
# Create receivers.
receiver1a = Receiver1()
receiver1b = Receiver1()
receiver2a = Receiver2()
receiver2b = Receiver2()
# Connect signals.
louie.connect(receiver1a, 'sig')
louie.connect(receiver1b, 'sig')
louie.connect(receiver2a, 'sig')
louie.connect(receiver2b, 'sig')
# Check reception without plugins.
louie.send('sig', arg='foo')
assert receiver1a.args == ['foo']
assert receiver1b.args == ['foo']
assert receiver2a.args == ['foo']
assert receiver2b.args == ['foo']
# Install plugin 1.
plugin1 = Plugin1()
louie.install_plugin(plugin1)
# Make some receivers not live.
receiver1a.live = False
receiver2b.live = False
# Check reception.
louie.send('sig', arg='bar')
assert receiver1a.args == ['foo']
assert receiver1b.args == ['foo', 'bar']
assert receiver2a.args == ['foo', 'bar']
assert receiver2b.args == ['foo']
# Remove plugin 1, install plugin 2.
plugin2 = Plugin2()
louie.remove_plugin(plugin1)
louie.install_plugin(plugin2)
# Check reception.
louie.send('sig', arg='baz')
assert receiver1a.args == ['foo', 'baz']
assert receiver1b.args == ['foo', 'bar', 'baz']
assert receiver2a.args == ['foo', 'bar']
assert receiver2b.args == ['foo']
# Install plugin 1 alongside plugin 2.
louie.install_plugin(plugin1)
# Check reception.
louie.send('sig', arg='fob')
assert receiver1a.args == ['foo', 'baz']
assert receiver1b.args == ['foo', 'bar', 'baz', 'fob']
assert receiver2a.args == ['foo', 'bar']
assert receiver2b.args == ['foo']
if qt is not None:
def test_qt_plugin():
louie.reset()
# Create receivers.
class Receiver(qt.QWidget):
def __init__(self):
qt.QObject.__init__(self)
self.args = []
def receive(self, arg):
self.args.append(arg)
receiver1 = Receiver()
receiver2 = Receiver()
# Connect signals.
louie.connect(receiver1.receive, 'sig')
louie.connect(receiver2.receive, 'sig')
# Destroy receiver2 so only a shell is left.
receiver2.close(True)
# Check reception without plugins.
louie.send('sig', arg='foo')
assert receiver1.args == ['foo']
assert receiver2.args == ['foo']
# Install plugin.
plugin = louie.QtWidgetPlugin()
louie.install_plugin(plugin)
# Check reception with plugins.
louie.send('sig', arg='bar')
assert receiver1.args == ['foo', 'bar']
assert receiver2.args == ['foo']

View File

@@ -0,0 +1,41 @@
import unittest
import louie
from louie import dispatcher
class Callable(object):
def __init__(self, val):
self.val = val
def __call__(self):
return self.val
one = Callable(1)
two = Callable(2)
three = Callable(3)
class TestPriorityDispatcher(unittest.TestCase):
def test_ConnectNotify(self):
louie.connect(
two,
'one',
priority=200
)
louie.connect(
one,
'one',
priority=100
)
louie.connect(
three,
'one',
priority=300
)
result = [ i[1] for i in louie.send('one')]
if not result == [1, 2, 3]:
print result
assert(False)

View File

@@ -0,0 +1,62 @@
import unittest
import louie.prioritylist
from louie.prioritylist import PriorityList
#def populate_list(plist):
class TestPriorityList(unittest.TestCase):
def test_Insert(self):
pl = PriorityList()
elements = {3: "element 3",
2: "element 2",
1: "element 1",
5: "element 5",
4: "element 4"
}
for key in elements:
pl.add(elements[key], priority=key)
match = zip(sorted(elements.values()), pl[:])
for pair in match:
assert(pair[0]==pair[1])
def test_Delete(self):
pl = PriorityList()
elements = {2: "element 3",
1: "element 2",
0: "element 1",
4: "element 5",
3: "element 4"
}
for key in elements:
pl.add(elements[key], priority=key)
del elements[2]
del pl[2]
match = zip(sorted(elements.values()) , pl[:])
for pair in match:
assert(pair[0]==pair[1])
def test_Multiple(self):
pl = PriorityList()
pl.add('1', 1)
pl.add('2.1', 2)
pl.add('3', 3)
pl.add('2.2', 2)
it = iter(pl)
assert(it.next() == '1')
assert(it.next() == '2.1')
assert(it.next() == '2.2')
assert(it.next() == '3')
def test_IteratorBreak(self):
pl = PriorityList()
pl.add('1', 1)
pl.add('2.1', 2)
pl.add('3', 3)
pl.add('2.2', 2)
for i in pl:
if i == '2.1':
break
assert(pl.index('3') == 3)

View File

@@ -0,0 +1,34 @@
import unittest
from louie.robustapply import robust_apply
def no_argument():
pass
def one_argument(blah):
pass
def two_arguments(blah, other):
pass
class TestRobustApply(unittest.TestCase):
def test_01(self):
robust_apply(no_argument, no_argument)
def test_02(self):
self.assertRaises(TypeError, robust_apply, no_argument, no_argument,
'this' )
def test_03(self):
self.assertRaises(TypeError, robust_apply, one_argument, one_argument)
def test_04(self):
"""Raise error on duplication of a particular argument"""
self.assertRaises(TypeError, robust_apply, one_argument, one_argument,
'this', blah='that')

View File

@@ -0,0 +1,83 @@
import unittest
from louie.saferef import safe_ref
class _Sample1(object):
def x(self):
pass
def _sample2(obj):
pass
class _Sample3(object):
def __call__(self, obj):
pass
class TestSaferef(unittest.TestCase):
# XXX: The original tests had a test for closure, and it had an
# off-by-one problem, perhaps due to scope issues. It has been
# removed from this test suite.
def setUp(self):
ts = []
ss = []
for x in xrange(5000):
t = _Sample1()
ts.append(t)
s = safe_ref(t.x, self._closure)
ss.append(s)
ts.append(_sample2)
ss.append(safe_ref(_sample2, self._closure))
for x in xrange(30):
t = _Sample3()
ts.append(t)
s = safe_ref(t, self._closure)
ss.append(s)
self.ts = ts
self.ss = ss
self.closure_count = 0
def tearDown(self):
if hasattr(self, 'ts'):
del self.ts
if hasattr(self, 'ss'):
del self.ss
def test_In(self):
"""Test the `in` operator for safe references (cmp)"""
for t in self.ts[:50]:
assert safe_ref(t.x) in self.ss
def test_Valid(self):
"""Test that the references are valid (return instance methods)"""
for s in self.ss:
assert s()
def test_ShortCircuit(self):
"""Test that creation short-circuits to reuse existing references"""
sd = {}
for s in self.ss:
sd[s] = 1
for t in self.ts:
if hasattr(t, 'x'):
assert sd.has_key(safe_ref(t.x))
else:
assert sd.has_key(safe_ref(t))
def test_Representation(self):
"""Test that the reference object's representation works
XXX Doesn't currently check the results, just that no error
is raised
"""
repr(self.ss[-1])
def _closure(self, ref):
"""Dumb utility mechanism to increment deletion counter"""
self.closure_count += 1

8
wlauto/external/louie/version.py vendored Normal file
View File

@@ -0,0 +1,8 @@
"""Louie version information."""
NAME = 'Louie'
DESCRIPTION = 'Signal dispatching mechanism'
VERSION = '1.1'

7
wlauto/external/pmu_logger/Makefile vendored Executable file
View File

@@ -0,0 +1,7 @@
# To build the pmu_logger module use the following command line
# make ARCH=arm CROSS_COMPILE=arm-linux-gnueabi- -C ../kernel/out SUBDIRS=$PWD modules
# where
# CROSS_COMPILE - prefix of the arm linux compiler
# -C - location of the configured kernel source tree
obj-m := pmu_logger.o

35
wlauto/external/pmu_logger/README vendored Executable file
View File

@@ -0,0 +1,35 @@
The pmu_logger module provides the ability to periodically trace CCI PMU counters. The trace destinations can be ftrace buffer and/or kernel logs. This file gives a quick overview of the funcationality provided by the module and how to use it.
The pmu_logger module creates a directory in the debugfs filesystem called cci_pmu_logger which can be used to enable/disable the counters and control the events that are counted.
To configure the events being counted write the corresponding event id to the counter* files. The list of CCI PMU events can be found at http://arminfo.emea.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0470d/CJHICFBF.html.
The "period_jiffies" can be used to control the periodicity of tracing. It accepts values in kernel jiffies.
To enable tracing, write a 1 to "control". To disable write another 1 to "control". The files "enable_console" and "enable_ftrace" control where the trace is written to. To check if the counters are currently running or not, you can read the control file.
The current values of the counters can be read from the "values" file.
Eg. To trace, A15 and A7 snoop hit rate every 10 jiffies the following command are required -
trace-cmd reset
echo 0x63 > counter0
echo 0x6A > counter1
echo 0x83 > counter2
echo 0x8A > counter3
echo 10 > period_jiffies
trace-cmd start -b 20000 -e "sched:sched_wakeup"
echo 1 > control
# perform the activity for which you would like to collect the CCI PMU trace.
trace-cmd stop && trace-cmd extract
echo 1 > control
trace-cmd report trace.dat | grep print # shows the trace of the CCI PMU counters along with the cycle counter values.

294
wlauto/external/pmu_logger/pmu_logger.c vendored Executable file
View File

@@ -0,0 +1,294 @@
/* Copyright 2013-2015 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* pmu_logger.c - Kernel module to log the CCI PMU counters
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/timer.h>
#include <asm/io.h>
#define MODULE_NAME "cci_pmu_logger"
// CCI_BASE needs to be modified to point to the mapped location of CCI in
// memory on your device.
#define CCI_BASE 0x2C090000 // TC2
//#define CCI_BASE 0x10D20000
#define CCI_SIZE 0x00010000
#define PMCR 0x100
#define PMCR_CEN (1 << 0)
#define PMCR_RST (1 << 1)
#define PMCR_CCR (1 << 2)
#define PMCR_CCD (1 << 3)
#define PMCR_EX (1 << 4)
#define PMCR_DP (1 << 5)
#define CC_BASE 0x9000
#define PC0_BASE 0xA000
#define PC1_BASE 0xB000
#define PC2_BASE 0xC000
#define PC3_BASE 0xD000
#define PC_ESR 0x0
#define CNT_VALUE 0x4
#define CNT_CONTROL 0x8
#define CNT_ENABLE (1 << 0)
u32 counter0_event = 0x6A;
u32 counter1_event = 0x63;
u32 counter2_event = 0x8A;
u32 counter3_event = 0x83;
u32 enable_console = 0;
u32 enable_ftrace = 1;
void *cci_base = 0;
static struct dentry *module_debugfs_root;
static int enabled = false;
u32 delay = 10; //jiffies. This translates to 1 sample every 100 ms
struct timer_list timer;
static void call_after_delay(void)
{
timer.expires = jiffies + delay;
add_timer(&timer);
}
static void setup_and_call_after_delay(void (*fn)(unsigned long))
{
init_timer(&timer);
timer.data = (unsigned long)&timer;
timer.function = fn;
call_after_delay();
}
static void print_counter_configuration(void)
{
if (enable_ftrace)
trace_printk("Counter_0: %02x Counter_1: %02x Counter_2: %02x Counter_3: %02x\n", \
counter0_event, counter1_event, counter2_event, counter3_event);
if (enable_console)
printk("Counter_0: %02x Counter_1: %02x Counter_2: %02x Counter_3: %02x\n", \
counter0_event, counter1_event, counter2_event, counter3_event);
}
static void initialize_cci_pmu(void)
{
u32 val;
// Select the events counted
iowrite32(counter0_event, cci_base + PC0_BASE + PC_ESR);
iowrite32(counter1_event, cci_base + PC1_BASE + PC_ESR);
iowrite32(counter2_event, cci_base + PC2_BASE + PC_ESR);
iowrite32(counter3_event, cci_base + PC3_BASE + PC_ESR);
// Enable the individual PMU counters
iowrite32(CNT_ENABLE, cci_base + PC0_BASE + CNT_CONTROL);
iowrite32(CNT_ENABLE, cci_base + PC1_BASE + CNT_CONTROL);
iowrite32(CNT_ENABLE, cci_base + PC2_BASE + CNT_CONTROL);
iowrite32(CNT_ENABLE, cci_base + PC3_BASE + CNT_CONTROL);
iowrite32(CNT_ENABLE, cci_base + CC_BASE + CNT_CONTROL);
// Reset the counters and configure the Cycle Count Divider
val = ioread32(cci_base + PMCR);
iowrite32(val | PMCR_RST | PMCR_CCR | PMCR_CCD, cci_base + PMCR);
}
static void enable_cci_pmu_counters(void)
{
u32 val = ioread32(cci_base + PMCR);
iowrite32(val | PMCR_CEN, cci_base + PMCR);
}
static void disable_cci_pmu_counters(void)
{
u32 val = ioread32(cci_base + PMCR);
iowrite32(val & ~PMCR_CEN, cci_base + PMCR);
}
static void trace_values(unsigned long arg)
{
u32 cycles;
u32 counter[4];
cycles = ioread32(cci_base + CC_BASE + CNT_VALUE);
counter[0] = ioread32(cci_base + PC0_BASE + CNT_VALUE);
counter[1] = ioread32(cci_base + PC1_BASE + CNT_VALUE);
counter[2] = ioread32(cci_base + PC2_BASE + CNT_VALUE);
counter[3] = ioread32(cci_base + PC3_BASE + CNT_VALUE);
if (enable_ftrace)
trace_printk("Cycles: %08x Counter_0: %08x"
" Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \
cycles, counter[0], counter[1], counter[2], counter[3]);
if (enable_console)
printk("Cycles: %08x Counter_0: %08x"
" Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \
cycles, counter[0], counter[1], counter[2], counter[3]);
if (enabled) {
u32 val;
// Reset the counters
val = ioread32(cci_base + PMCR);
iowrite32(val | PMCR_RST | PMCR_CCR, cci_base + PMCR);
call_after_delay();
}
}
static ssize_t read_control(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
char status[16];
/* printk(KERN_DEBUG "%s\n", __func__); */
if (enabled)
snprintf(status, 16, "enabled\n");
else
snprintf(status, 16, "disabled\n");
return simple_read_from_buffer(buf, count, ppos, status, strlen(status));
}
static ssize_t write_control(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
if (enabled) {
disable_cci_pmu_counters();
enabled = false;
} else {
initialize_cci_pmu();
enable_cci_pmu_counters();
enabled = true;
print_counter_configuration();
setup_and_call_after_delay(trace_values);
}
return count;
}
static ssize_t read_values(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
char values[256];
/* u32 val; */
snprintf(values, 256, "Cycles: %08x Counter_0: %08x"
" Counter_1: %08x Counter_2: %08x Counter_3: %08x\n", \
ioread32(cci_base + CC_BASE + CNT_VALUE), \
ioread32(cci_base + PC0_BASE + CNT_VALUE), \
ioread32(cci_base + PC1_BASE + CNT_VALUE), \
ioread32(cci_base + PC2_BASE + CNT_VALUE), \
ioread32(cci_base + PC3_BASE + CNT_VALUE));
return simple_read_from_buffer(buf, count, ppos, values, strlen(values));
}
static const struct file_operations control_fops = {
.owner = THIS_MODULE,
.read = read_control,
.write = write_control,
};
static const struct file_operations value_fops = {
.owner = THIS_MODULE,
.read = read_values,
};
static int __init pmu_logger_init(void)
{
struct dentry *retval;
module_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL);
if (!module_debugfs_root || IS_ERR(module_debugfs_root)) {
printk(KERN_ERR "error creating debugfs dir.\n");
goto out;
}
retval = debugfs_create_file("control", S_IRUGO | S_IWUGO, module_debugfs_root, NULL, &control_fops);
if (!retval)
goto out;
retval = debugfs_create_file("values", S_IRUGO, module_debugfs_root, NULL, &value_fops);
if (!retval)
goto out;
retval = debugfs_create_bool("enable_console", S_IRUGO | S_IWUGO, module_debugfs_root, &enable_console);
if (!retval)
goto out;
retval = debugfs_create_bool("enable_ftrace", S_IRUGO | S_IWUGO, module_debugfs_root, &enable_ftrace);
if (!retval)
goto out;
retval = debugfs_create_u32("period_jiffies", S_IRUGO | S_IWUGO, module_debugfs_root, &delay);
if (!retval)
goto out;
retval = debugfs_create_x32("counter0", S_IRUGO | S_IWUGO, module_debugfs_root, &counter0_event);
if (!retval)
goto out;
retval = debugfs_create_x32("counter1", S_IRUGO | S_IWUGO, module_debugfs_root, &counter1_event);
if (!retval)
goto out;
retval = debugfs_create_x32("counter2", S_IRUGO | S_IWUGO, module_debugfs_root, &counter2_event);
if (!retval)
goto out;
retval = debugfs_create_x32("counter3", S_IRUGO | S_IWUGO, module_debugfs_root, &counter3_event);
if (!retval)
goto out;
cci_base = ioremap(CCI_BASE, CCI_SIZE);
if (!cci_base)
goto out;
printk(KERN_INFO "CCI PMU Logger loaded.\n");
return 0;
out:
debugfs_remove_recursive(module_debugfs_root);
return 1;
}
static void __exit pmu_logger_exit(void)
{
if (module_debugfs_root) {
debugfs_remove_recursive(module_debugfs_root);
module_debugfs_root = NULL;
}
if (cci_base)
iounmap(cci_base);
printk(KERN_INFO "CCI PMU Logger removed.\n");
}
module_init(pmu_logger_init);
module_exit(pmu_logger_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Punit Agrawal");
MODULE_DESCRIPTION("logger for CCI PMU counters");

BIN
wlauto/external/pmu_logger/pmu_logger.ko vendored Normal file

Binary file not shown.

11
wlauto/external/readenergy/Makefile vendored Normal file
View File

@@ -0,0 +1,11 @@
# To build:
#
# CROSS_COMPILE=aarch64-linux-gnu- make
#
CROSS_COMPILE?=aarch64-linux-gnu-
CC=$(CROSS_COMPILE)gcc
CFLAGS='-Wl,-static -Wl,-lc'
readenergy: readenergy.c
$(CC) $(CFLAGS) readenergy.c -o readenergy
cp readenergy ../../instrumentation/juno_energy/readenergy

BIN
wlauto/external/readenergy/readenergy vendored Executable file

Binary file not shown.

345
wlauto/external/readenergy/readenergy.c vendored Normal file
View File

@@ -0,0 +1,345 @@
/* Copyright 2014-2015 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* readenergy.c
*
* Reads APB energy registers in Juno and outputs the measurements (converted to appropriate units).
*
*/
#include <errno.h>
#include <fcntl.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
// The following values obtained from Juno TRM 2014/03/04 section 4.5
// Location of APB registers in memory
#define APB_BASE_MEMORY 0x1C010000
// APB energy counters start at offset 0xD0 from the base APB address.
#define BASE_INDEX 0xD0 / 4
// the one-past last APB counter
#define APB_SIZE 0x120
// Masks specifying the bits that contain the actual counter values
#define CMASK 0xFFF
#define VMASK 0xFFF
#define PMASK 0xFFFFFF
// Sclaing factor (divisor) or getting measured values from counters
#define SYS_ADC_CH0_PM1_SYS_SCALE 761
#define SYS_ADC_CH1_PM2_A57_SCALE 381
#define SYS_ADC_CH2_PM3_A53_SCALE 761
#define SYS_ADC_CH3_PM4_GPU_SCALE 381
#define SYS_ADC_CH4_VSYS_SCALE 1622
#define SYS_ADC_CH5_VA57_SCALE 1622
#define SYS_ADC_CH6_VA53_SCALE 1622
#define SYS_ADC_CH7_VGPU_SCALE 1622
#define SYS_POW_CH04_SYS_SCALE (SYS_ADC_CH0_PM1_SYS_SCALE * SYS_ADC_CH4_VSYS_SCALE)
#define SYS_POW_CH15_A57_SCALE (SYS_ADC_CH1_PM2_A57_SCALE * SYS_ADC_CH5_VA57_SCALE)
#define SYS_POW_CH26_A53_SCALE (SYS_ADC_CH2_PM3_A53_SCALE * SYS_ADC_CH6_VA53_SCALE)
#define SYS_POW_CH37_GPU_SCALE (SYS_ADC_CH3_PM4_GPU_SCALE * SYS_ADC_CH7_VGPU_SCALE)
#define SYS_ENM_CH0_SYS_SCALE 12348030000
#define SYS_ENM_CH1_A57_SCALE 6174020000
#define SYS_ENM_CH0_A53_SCALE 12348030000
#define SYS_ENM_CH0_GPU_SCALE 6174020000
// Original values prior to re-callibrations.
/*#define SYS_ADC_CH0_PM1_SYS_SCALE 819.2*/
/*#define SYS_ADC_CH1_PM2_A57_SCALE 409.6*/
/*#define SYS_ADC_CH2_PM3_A53_SCALE 819.2*/
/*#define SYS_ADC_CH3_PM4_GPU_SCALE 409.6*/
/*#define SYS_ADC_CH4_VSYS_SCALE 1638.4*/
/*#define SYS_ADC_CH5_VA57_SCALE 1638.4*/
/*#define SYS_ADC_CH6_VA53_SCALE 1638.4*/
/*#define SYS_ADC_CH7_VGPU_SCALE 1638.4*/
/*#define SYS_POW_CH04_SYS_SCALE (SYS_ADC_CH0_PM1_SYS_SCALE * SYS_ADC_CH4_VSYS_SCALE)*/
/*#define SYS_POW_CH15_A57_SCALE (SYS_ADC_CH1_PM2_A57_SCALE * SYS_ADC_CH5_VA57_SCALE)*/
/*#define SYS_POW_CH26_A53_SCALE (SYS_ADC_CH2_PM3_A53_SCALE * SYS_ADC_CH6_VA53_SCALE)*/
/*#define SYS_POW_CH37_GPU_SCALE (SYS_ADC_CH3_PM4_GPU_SCALE * SYS_ADC_CH7_VGPU_SCALE)*/
/*#define SYS_ENM_CH0_SYS_SCALE 13421772800.0*/
/*#define SYS_ENM_CH1_A57_SCALE 6710886400.0*/
/*#define SYS_ENM_CH0_A53_SCALE 13421772800.0*/
/*#define SYS_ENM_CH0_GPU_SCALE 6710886400.0*/
// Ignore individual errors but if see too many, abort.
#define ERROR_THRESHOLD 10
// Default counter poll period (in milliseconds).
#define DEFAULT_PERIOD 100
// A single reading from the energy meter. The values are the proper readings converted
// to appropriate units (e.g. Watts for power); they are *not* raw counter values.
struct reading
{
double sys_adc_ch0_pm1_sys;
double sys_adc_ch1_pm2_a57;
double sys_adc_ch2_pm3_a53;
double sys_adc_ch3_pm4_gpu;
double sys_adc_ch4_vsys;
double sys_adc_ch5_va57;
double sys_adc_ch6_va53;
double sys_adc_ch7_vgpu;
double sys_pow_ch04_sys;
double sys_pow_ch15_a57;
double sys_pow_ch26_a53;
double sys_pow_ch37_gpu;
double sys_enm_ch0_sys;
double sys_enm_ch1_a57;
double sys_enm_ch0_a53;
double sys_enm_ch0_gpu;
};
inline uint64_t join_64bit_register(uint32_t *buffer, int index)
{
uint64_t result = 0;
result |= buffer[index];
result |= (uint64_t)(buffer[index+1]) << 32;
return result;
}
int nsleep(const struct timespec *req, struct timespec *rem)
{
struct timespec temp_rem;
if (nanosleep(req, rem) == -1)
{
if (errno == EINTR)
{
nsleep(rem, &temp_rem);
}
else
{
return errno;
}
}
else
{
return 0;
}
}
void print_help()
{
fprintf(stderr, "Usage: readenergy [-t PERIOD] -o OUTFILE\n\n"
"Read Juno energy counters every PERIOD milliseconds, writing them\n"
"to OUTFILE in CSV format until SIGTERM is received.\n\n"
"Parameters:\n"
" PERIOD is the counter poll period in milliseconds.\n"
" (Defaults to 100 milliseconds.)\n"
" OUTFILE is the output file path\n");
}
// debugging only...
inline void dprint(char *msg)
{
fprintf(stderr, "%s\n", msg);
sync();
}
// -------------------------------------- config ----------------------------------------------------
struct config
{
struct timespec period;
char *output_file;
};
void config_init_period_from_millis(struct config *this, long millis)
{
this->period.tv_sec = (time_t)(millis / 1000);
this->period.tv_nsec = (millis % 1000) * 1000000;
}
void config_init(struct config *this, int argc, char *argv[])
{
this->output_file = NULL;
config_init_period_from_millis(this, DEFAULT_PERIOD);
int opt;
while ((opt = getopt(argc, argv, "ht:o:")) != -1)
{
switch(opt)
{
case 't':
config_init_period_from_millis(this, atol(optarg));
break;
case 'o':
this->output_file = optarg;
break;
case 'h':
print_help();
exit(EXIT_SUCCESS);
break;
default:
fprintf(stderr, "ERROR: Unexpected option %s\n\n", opt);
print_help();
exit(EXIT_FAILURE);
}
}
if (this->output_file == NULL)
{
fprintf(stderr, "ERROR: Mandatory -o option not specified.\n\n");
print_help();
exit(EXIT_FAILURE);
}
}
// -------------------------------------- /config ---------------------------------------------------
// -------------------------------------- emeter ----------------------------------------------------
struct emeter
{
int fd;
FILE *out;
void *mmap_base;
};
void emeter_init(struct emeter *this, char *outfile)
{
this->out = fopen(outfile, "w");
if (this->out == NULL)
{
fprintf(stderr, "ERROR: Could not open output file %s; got %s\n", outfile, strerror(errno));
exit(EXIT_FAILURE);
}
this->fd = open("/dev/mem", O_RDONLY);
if(this->fd < 0)
{
fprintf(stderr, "ERROR: Can't open /dev/mem; got %s\n", strerror(errno));
fclose(this->out);
exit(EXIT_FAILURE);
}
this->mmap_base = mmap(NULL, APB_SIZE, PROT_READ, MAP_SHARED, this->fd, APB_BASE_MEMORY);
if (this->mmap_base == MAP_FAILED)
{
fprintf(stderr, "ERROR: mmap failed; got %s\n", strerror(errno));
close(this->fd);
fclose(this->out);
exit(EXIT_FAILURE);
}
fprintf(this->out, "sys_curr,a57_curr,a53_curr,gpu_curr,"
"sys_volt,a57_volt,a53_volt,gpu_volt,"
"sys_pow,a57_pow,a53_pow,gpu_pow,"
"sys_cenr,a57_cenr,a53_cenr,gpu_cenr\n");
}
void emeter_read_measurements(struct emeter *this, struct reading *reading)
{
uint32_t *buffer = (uint32_t *)this->mmap_base;
reading->sys_adc_ch0_pm1_sys = (double)(CMASK & buffer[BASE_INDEX+0]) / SYS_ADC_CH0_PM1_SYS_SCALE;
reading->sys_adc_ch1_pm2_a57 = (double)(CMASK & buffer[BASE_INDEX+1]) / SYS_ADC_CH1_PM2_A57_SCALE;
reading->sys_adc_ch2_pm3_a53 = (double)(CMASK & buffer[BASE_INDEX+2]) / SYS_ADC_CH2_PM3_A53_SCALE;
reading->sys_adc_ch3_pm4_gpu = (double)(CMASK & buffer[BASE_INDEX+3]) / SYS_ADC_CH3_PM4_GPU_SCALE;
reading->sys_adc_ch4_vsys = (double)(VMASK & buffer[BASE_INDEX+4]) / SYS_ADC_CH4_VSYS_SCALE;
reading->sys_adc_ch5_va57 = (double)(VMASK & buffer[BASE_INDEX+5]) / SYS_ADC_CH5_VA57_SCALE;
reading->sys_adc_ch6_va53 = (double)(VMASK & buffer[BASE_INDEX+6]) / SYS_ADC_CH6_VA53_SCALE;
reading->sys_adc_ch7_vgpu = (double)(VMASK & buffer[BASE_INDEX+7]) / SYS_ADC_CH7_VGPU_SCALE;
reading->sys_pow_ch04_sys = (double)(PMASK & buffer[BASE_INDEX+8]) / SYS_POW_CH04_SYS_SCALE;
reading->sys_pow_ch15_a57 = (double)(PMASK & buffer[BASE_INDEX+9]) / SYS_POW_CH15_A57_SCALE;
reading->sys_pow_ch26_a53 = (double)(PMASK & buffer[BASE_INDEX+10]) / SYS_POW_CH26_A53_SCALE;
reading->sys_pow_ch37_gpu = (double)(PMASK & buffer[BASE_INDEX+11]) / SYS_POW_CH37_GPU_SCALE;
reading->sys_enm_ch0_sys = (double)join_64bit_register(buffer, BASE_INDEX+12) / SYS_ENM_CH0_SYS_SCALE;
reading->sys_enm_ch1_a57 = (double)join_64bit_register(buffer, BASE_INDEX+14) / SYS_ENM_CH1_A57_SCALE;
reading->sys_enm_ch0_a53 = (double)join_64bit_register(buffer, BASE_INDEX+16) / SYS_ENM_CH0_A53_SCALE;
reading->sys_enm_ch0_gpu = (double)join_64bit_register(buffer, BASE_INDEX+18) / SYS_ENM_CH0_GPU_SCALE;
}
void emeter_take_reading(struct emeter *this)
{
static struct reading reading;
int error_count = 0;
emeter_read_measurements(this, &reading);
int ret = fprintf(this->out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
reading.sys_adc_ch0_pm1_sys,
reading.sys_adc_ch1_pm2_a57,
reading.sys_adc_ch2_pm3_a53,
reading.sys_adc_ch3_pm4_gpu,
reading.sys_adc_ch4_vsys,
reading.sys_adc_ch5_va57,
reading.sys_adc_ch6_va53,
reading.sys_adc_ch7_vgpu,
reading.sys_pow_ch04_sys,
reading.sys_pow_ch15_a57,
reading.sys_pow_ch26_a53,
reading.sys_pow_ch37_gpu,
reading.sys_enm_ch0_sys,
reading.sys_enm_ch1_a57,
reading.sys_enm_ch0_a53,
reading.sys_enm_ch0_gpu);
if (ret < 0)
{
fprintf(stderr, "ERROR: while writing a meter reading: %s\n", strerror(errno));
if (++error_count > ERROR_THRESHOLD)
exit(EXIT_FAILURE);
}
}
void emeter_finalize(struct emeter *this)
{
if (munmap(this->mmap_base, APB_SIZE) == -1)
{
// Report the error but don't bother doing anything else, as we're not gonna do
// anything with emeter after this point anyway.
fprintf(stderr, "ERROR: munmap failed; got %s\n", strerror(errno));
}
close(this->fd);
fclose(this->out);
}
// -------------------------------------- /emeter ----------------------------------------------------
int done = 0;
void term_handler(int signum)
{
done = 1;
}
int main(int argc, char *argv[])
{
struct sigaction action;
memset(&action, 0, sizeof(struct sigaction));
action.sa_handler = term_handler;
sigaction(SIGTERM, &action, NULL);
struct config config;
struct emeter emeter;
config_init(&config, argc, argv);
emeter_init(&emeter, config.output_file);
struct timespec remaining;
while (!done)
{
emeter_take_reading(&emeter);
nsleep(&config.period, &remaining);
}
emeter_finalize(&emeter);
return EXIT_SUCCESS;
}

12
wlauto/external/revent/Makefile vendored Normal file
View File

@@ -0,0 +1,12 @@
# CROSS_COMPILE=aarch64-linux-gnu- make
#
CC=gcc
CFLAGS=-static -lc
revent: revent.c
$(CROSS_COMPILE)$(CC) $(CFLAGS) revent.c -o revent
clean:
rm -rf revent
.PHONY: clean

598
wlauto/external/revent/revent.c vendored Normal file
View File

@@ -0,0 +1,598 @@
/* Copyright 2012-2015 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <limits.h>
#include <linux/input.h>
#include <sys/stat.h>
#ifdef ANDROID
#include <android/log.h>
#endif
#define die(args...) do { \
fprintf(stderr, "ERROR: "); \
fprintf(stderr, args); \
exit(EXIT_FAILURE); \
} while(0)
#define dprintf(args...) if (verbose) printf(args)
#define INPDEV_MAX_DEVICES 16
#define INPDEV_MAX_PATH 30
#ifndef ANDROID
int strlcpy(char *dest, char *source, size_t size)
{
strncpy(dest, source, size-1);
dest[size-1] = '\0';
return size;
}
#endif
typedef enum {
FALSE=0,
TRUE
} bool_t;
typedef enum {
RECORD=0,
REPLAY,
DUMP,
INFO,
INVALID
} revent_mode_t;
typedef struct {
revent_mode_t mode;
int record_time;
int device_number;
char *file;
} revent_args_t;
typedef struct {
size_t id_pathc; /* Count of total paths so far. */
char id_pathv[INPDEV_MAX_DEVICES][INPDEV_MAX_PATH]; /* List of paths matching pattern. */
} inpdev_t;
typedef struct {
int dev_idx;
struct input_event event;
} replay_event_t;
typedef struct {
int num_fds;
int num_events;
int *fds;
replay_event_t *events;
} replay_buffer_t;
bool_t verbose = FALSE;
bool_t is_numeric(char *string)
{
int len = strlen(string);
int i = 0;
while(i < len)
{
if(!isdigit(string[i]))
return FALSE;
i++;
}
return TRUE;
}
off_t get_file_size(const char *filename) {
struct stat st;
if (stat(filename, &st) == 0)
return st.st_size;
die("Cannot determine size of %s: %s\n", filename, strerror(errno));
}
int inpdev_init(inpdev_t **inpdev, int devid)
{
int i;
int fd;
int num_devices;
*inpdev = malloc(sizeof(inpdev_t));
(*inpdev)->id_pathc = 0;
if (devid == -1) {
// device id was not specified so we want to record from all available input devices.
for(i = 0; i < INPDEV_MAX_DEVICES; ++i)
{
sprintf((*inpdev)->id_pathv[(*inpdev)->id_pathc], "/dev/input/event%d", i);
fd = open((*inpdev)->id_pathv[(*inpdev)->id_pathc], O_RDONLY);
if(fd > 0)
{
close(fd);
dprintf("opened %s\n", (*inpdev)->id_pathv[(*inpdev)->id_pathc]);
(*inpdev)->id_pathc++;
}
else
{
dprintf("could not open %s\n", (*inpdev)->id_pathv[(*inpdev)->id_pathc]);
}
}
}
else {
// device id was specified so record just that device.
sprintf((*inpdev)->id_pathv[0], "/dev/input/event%d", devid);
fd = open((*inpdev)->id_pathv[0], O_RDONLY);
if(fd > 0)
{
close(fd);
dprintf("opened %s\n", (*inpdev)->id_pathv[0]);
(*inpdev)->id_pathc++;
}
else
{
die("could not open %s\n", (*inpdev)->id_pathv[0]);
}
}
return 0;
}
int inpdev_close(inpdev_t *inpdev)
{
free(inpdev);
return 0;
}
void printDevProperties(const char* aDev)
{
int fd = -1;
char name[256]= "Unknown";
if ((fd = open(aDev, O_RDONLY)) < 0)
die("could not open %s\n", aDev);
if(ioctl(fd, EVIOCGNAME(sizeof(name)), name) < 0)
die("evdev ioctl failed on %s\n", aDev);
printf("The device on %s says its name is %s\n",
aDev, name);
close(fd);
}
void dump(const char *logfile)
{
int fdin = open(logfile, O_RDONLY);
if (fdin < 0) die("Could not open eventlog %s\n", logfile);
int nfds;
size_t rb = read(fdin, &nfds, sizeof(nfds));
if (rb != sizeof(nfds)) die("problems reading eventlog\n");
int *fds = malloc(sizeof(int)*nfds);
if (!fds) die("out of memory\n");
int len;
int i;
char buf[INPDEV_MAX_PATH];
inpdev_t *inpdev = malloc(sizeof(inpdev_t));
inpdev->id_pathc = 0;
for (i=0; i<nfds; i++) {
memset(buf, 0, sizeof(buf));
rb = read(fdin, &len, sizeof(len));
if (rb != sizeof(len)) die("problems reading eventlog\n");
rb = read(fdin, &buf[0], len);
if (rb != len) die("problems reading eventlog\n");
strlcpy(inpdev->id_pathv[inpdev->id_pathc], buf, INPDEV_MAX_PATH);
inpdev->id_pathv[inpdev->id_pathc][INPDEV_MAX_PATH-1] = '\0';
inpdev->id_pathc++;
}
struct input_event ev;
int count = 0;
while(1) {
int idx;
rb = read(fdin, &idx, sizeof(idx));
if (rb != sizeof(idx)) break;
rb = read(fdin, &ev, sizeof(ev));
if (rb < (int)sizeof(ev)) break;
printf("%10u.%-6u %30s type %2d code %3d value %4d\n",
(unsigned int)ev.time.tv_sec, (unsigned int)ev.time.tv_usec,
inpdev->id_pathv[idx], ev.type, ev.code, ev.value);
count++;
}
printf("\nTotal: %d events\n", count);
close(fdin);
free(inpdev);
}
int replay_buffer_init(replay_buffer_t **buffer, const char *logfile)
{
*buffer = malloc(sizeof(replay_buffer_t));
replay_buffer_t *buff = *buffer;
off_t fsize = get_file_size(logfile);
buff->events = (replay_event_t *)malloc((size_t)fsize);
if (!buff->events)
die("out of memory\n");
int fdin = open(logfile, O_RDONLY);
if (fdin < 0)
die("Could not open eventlog %s\n", logfile);
size_t rb = read(fdin, &(buff->num_fds), sizeof(buff->num_fds));
if (rb!=sizeof(buff->num_fds))
die("problems reading eventlog\n");
buff->fds = malloc(sizeof(int) * buff->num_fds);
if (!buff->fds)
die("out of memory\n");
int len, i;
char path_buff[256]; // should be more than enough
for (i = 0; i < buff->num_fds; i++) {
memset(path_buff, 0, sizeof(path_buff));
rb = read(fdin, &len, sizeof(len));
if (rb!=sizeof(len))
die("problems reading eventlog\n");
rb = read(fdin, &path_buff[0], len);
if (rb != len)
die("problems reading eventlog\n");
buff->fds[i] = open(path_buff, O_WRONLY | O_NDELAY);
if (buff->fds[i] < 0)
die("could not open device file %s\n", path_buff);
}
struct timeval start_time;
replay_event_t rep_ev;
buff->num_events = 0;
while(1) {
int idx;
rb = read(fdin, &rep_ev, sizeof(rep_ev));
if (rb < (int)sizeof(rep_ev))
break;
if (buff->num_events == 0) {
start_time = rep_ev.event.time;
}
timersub(&(rep_ev.event.time), &start_time, &(rep_ev.event.time));
memcpy(&(buff->events[buff->num_events]), &rep_ev, sizeof(rep_ev));
buff->num_events++;
}
close(fdin);
return 0;
}
int replay_buffer_close(replay_buffer_t *buff)
{
free(buff->fds);
free(buff->events);
free(buff);
return 0;
}
int replay_buffer_play(replay_buffer_t *buff)
{
int i = 0, rb;
struct timeval start_time, now, desired_time, last_event_delta, delta;
memset(&last_event_delta, 0, sizeof(struct timeval));
gettimeofday(&start_time, NULL);
while (i < buff->num_events) {
gettimeofday(&now, NULL);
timeradd(&start_time, &last_event_delta, &desired_time);
if (timercmp(&desired_time, &now, >)) {
timersub(&desired_time, &now, &delta);
useconds_t d = (useconds_t)delta.tv_sec * 1000000 + delta.tv_usec;
dprintf("now %u.%u desiredtime %u.%u sleeping %u uS\n",
(unsigned int)now.tv_sec, (unsigned int)now.tv_usec,
(unsigned int)desired_time.tv_sec, (unsigned int)desired_time.tv_usec, d);
usleep(d);
}
int idx = (buff->events[i]).dev_idx;
struct input_event ev = (buff->events[i]).event;
while((i < buff->num_events) && !timercmp(&ev.time, &last_event_delta, !=)) {
rb = write(buff->fds[idx], &ev, sizeof(ev));
if (rb!=sizeof(ev))
die("problems writing\n");
dprintf("replayed event: type %d code %d value %d\n", ev.type, ev.code, ev.value);
i++;
idx = (buff->events[i]).dev_idx;
ev = (buff->events[i]).event;
}
last_event_delta = ev.time;
}
}
void replay(const char *logfile)
{
replay_buffer_t *replay_buffer;
replay_buffer_init(&replay_buffer, logfile);
#ifdef ANDROID
__android_log_write(ANDROID_LOG_INFO, "REVENT", "Replay starting");
#endif
replay_buffer_play(replay_buffer);
#ifdef ANDROID
__android_log_write(ANDROID_LOG_INFO, "REVENT", "Replay complete");
#endif
replay_buffer_close(replay_buffer);
}
void record(inpdev_t *inpdev, int delay, const char *logfile)
{
fd_set readfds;
FILE* fdout;
struct input_event ev;
int i;
int maxfd = 0;
int keydev=0;
int* fds = malloc(sizeof(int)*inpdev->id_pathc);
if (!fds) die("out of memory\n");
fdout = fopen(logfile, "wb");
if (!fdout) die("Could not open eventlog %s\n", logfile);
fwrite(&inpdev->id_pathc, sizeof(inpdev->id_pathc), 1, fdout);
for (i=0; i<inpdev->id_pathc; i++) {
int len = strlen(inpdev->id_pathv[i]);
fwrite(&len, sizeof(len), 1, fdout);
fwrite(inpdev->id_pathv[i], len, 1, fdout);
}
for (i=0; i < inpdev->id_pathc; i++)
{
fds[i] = open(inpdev->id_pathv[i], O_RDONLY);
if (fds[i]>maxfd) maxfd = fds[i];
dprintf("opened %s with %d\n", inpdev->id_pathv[i], fds[i]);
if (fds[i]<0) die("could not open \%s\n", inpdev->id_pathv[i]);
}
int count =0;
struct timeval tout;
while(1)
{
FD_ZERO(&readfds);
FD_SET(STDIN_FILENO, &readfds);
for (i=0; i < inpdev->id_pathc; i++)
FD_SET(fds[i], &readfds);
/* wait for input */
tout.tv_sec = delay;
tout.tv_usec = 0;
int r = select(maxfd+1, &readfds, NULL, NULL, &tout);
/* dprintf("got %d (err %d)\n", r, errno); */
if (!r) break;
if (FD_ISSET(STDIN_FILENO, &readfds)) {
// in this case the key down for the return key will be recorded
// so we need to up the key up
memset(&ev, 0, sizeof(ev));
ev.type = EV_KEY;
ev.code = KEY_ENTER;
ev.value = 0;
gettimeofday(&ev.time, NULL);
fwrite(&keydev, sizeof(keydev), 1, fdout);
fwrite(&ev, sizeof(ev), 1, fdout);
memset(&ev, 0, sizeof(ev)); // SYN
gettimeofday(&ev.time, NULL);
fwrite(&keydev, sizeof(keydev), 1, fdout);
fwrite(&ev, sizeof(ev), 1, fdout);
dprintf("added fake return exiting...\n");
break;
}
for (i=0; i < inpdev->id_pathc; i++)
{
if (FD_ISSET(fds[i], &readfds))
{
dprintf("Got event from %s\n", inpdev->id_pathv[i]);
memset(&ev, 0, sizeof(ev));
size_t rb = read(fds[i], (void*) &ev, sizeof(ev));
dprintf("%d event: type %d code %d value %d\n",
(unsigned int)rb, ev.type, ev.code, ev.value);
if (ev.type == EV_KEY && ev.code == KEY_ENTER && ev.value == 1)
keydev = i;
fwrite(&i, sizeof(i), 1, fdout);
fwrite(&ev, sizeof(ev), 1, fdout);
count++;
}
}
}
for (i=0; i < inpdev->id_pathc; i++)
{
close(fds[i]);
}
fclose(fdout);
free(fds);
dprintf("Recorded %d events\n", count);
}
void usage()
{
printf("usage:\n revent [-h] [-v] COMMAND [OPTIONS] \n"
"\n"
" Options:\n"
" -h print this help message and quit.\n"
" -v enable verbose output.\n"
"\n"
" Commands:\n"
" record [-t SECONDS] [-d DEVICE] FILE\n"
" Record input event. stops after return on STDIN (or, optionally, \n"
" a fixed delay)\n"
"\n"
" FILE file into which events will be recorded.\n"
" -t SECONDS time, in seconds, for which to record events.\n"
" if not specifed, recording will continue until\n"
" return key is pressed.\n"
" -d DEVICE the number of the input device form which\n"
" events will be recoreded. If not specified, \n"
" all available inputs will be used.\n"
"\n"
" replay FILE\n"
" replays previously recorded events from the specified file.\n"
"\n"
" FILE file into which events will be recorded.\n"
"\n"
" dump FILE\n"
" dumps the contents of the specified event log to STDOUT in\n"
" human-readable form.\n"
"\n"
" FILE event log which will be dumped.\n"
"\n"
" info\n"
" shows info about each event char device\n"
"\n"
);
}
void revent_args_init(revent_args_t **rargs, int argc, char** argv)
{
*rargs = malloc(sizeof(revent_args_t));
revent_args_t *revent_args = *rargs;
revent_args->mode = INVALID;
revent_args->record_time = INT_MAX;
revent_args->device_number = -1;
revent_args->file = NULL;
int opt;
while ((opt = getopt(argc, argv, "ht:d:v")) != -1)
{
switch (opt) {
case 'h':
usage();
exit(0);
break;
case 't':
if (is_numeric(optarg)) {
revent_args->record_time = atoi(optarg);
dprintf("timeout: %d\n", revent_args->record_time);
} else {
die("-t parameter must be numeric; got %s.\n", optarg);
}
break;
case 'd':
if (is_numeric(optarg)) {
revent_args->device_number = atoi(optarg);
dprintf("device: %d\n", revent_args->device_number);
} else {
die("-d parameter must be numeric; got %s.\n", optarg);
}
break;
case 'v':
verbose = TRUE;
break;
default:
die("Unexpected option: %c", opt);
}
}
int next_arg = optind;
if (next_arg == argc) {
usage();
die("Must specify a command.\n");
}
if (!strcmp(argv[next_arg], "record"))
revent_args->mode = RECORD;
else if (!strcmp(argv[next_arg], "replay"))
revent_args->mode = REPLAY;
else if (!strcmp(argv[next_arg], "dump"))
revent_args->mode = DUMP;
else if (!strcmp(argv[next_arg], "info"))
revent_args->mode = INFO;
else {
usage();
die("Unknown command -- %s\n", argv[next_arg]);
}
next_arg++;
if (next_arg != argc) {
revent_args->file = argv[next_arg];
dprintf("file: %s\n", revent_args->file);
next_arg++;
if (next_arg != argc) {
die("Trailling arguments (use -h for help).\n");
}
}
if ((revent_args->mode != RECORD) && (revent_args->record_time != INT_MAX)) {
die("-t parameter is only valid for \"record\" command.\n");
}
if ((revent_args->mode != RECORD) && (revent_args->device_number != -1)) {
die("-d parameter is only valid for \"record\" command.\n");
}
if ((revent_args->mode == INFO) && (revent_args->file != NULL)) {
die("File path cannot be specified for \"info\" command.\n");
}
if (((revent_args->mode == RECORD) || (revent_args->mode == REPLAY)) && (revent_args->file == NULL)) {
die("Must specify a file for recording/replaying (use -h for help).\n");
}
}
int revent_args_close(revent_args_t *rargs)
{
free(rargs);
return 0;
}
int main(int argc, char** argv)
{
int i;
char *logfile = NULL;
revent_args_t *rargs;
revent_args_init(&rargs, argc, argv);
inpdev_t *inpdev;
inpdev_init(&inpdev, rargs->device_number);
switch(rargs->mode) {
case RECORD:
record(inpdev, rargs->record_time, rargs->file);
break;
case REPLAY:
replay(rargs->file);
break;
case DUMP:
dump(rargs->file);
break;
case INFO:
for (i = 0; i < inpdev->id_pathc; i++) {
printDevProperties(inpdev->id_pathv[i]);
}
};
inpdev_close(inpdev);
revent_args_close(rargs);
return 0;
}

92
wlauto/external/terminalsize.py vendored Normal file
View File

@@ -0,0 +1,92 @@
# Taken from
# https://gist.github.com/jtriley/1108174
import os
import shlex
import struct
import platform
import subprocess
def get_terminal_size():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
print "default"
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
if __name__ == "__main__":
sizex, sizey = get_terminal_size()
print 'width =', sizex, 'height =', sizey

21
wlauto/external/uiauto/build.sh vendored Executable file
View File

@@ -0,0 +1,21 @@
#!/bin/bash
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
ant build
cp bin/classes/com/arm/wlauto/uiauto/BaseUiAutomation.class ../../common

92
wlauto/external/uiauto/build.xml vendored Normal file
View File

@@ -0,0 +1,92 @@
<?xml version="1.0" encoding="UTF-8"?>
<project name="com.arm.wlauto.uiauto" default="help">
<!-- The local.properties file is created and updated by the 'android' tool.
It contains the path to the SDK. It should *NOT* be checked into
Version Control Systems. -->
<property file="local.properties" />
<!-- The ant.properties file can be created by you. It is only edited by the
'android' tool to add properties to it.
This is the place to change some Ant specific build properties.
Here are some properties you may want to change/update:
source.dir
The name of the source directory. Default is 'src'.
out.dir
The name of the output directory. Default is 'bin'.
For other overridable properties, look at the beginning of the rules
files in the SDK, at tools/ant/build.xml
Properties related to the SDK location or the project target should
be updated using the 'android' tool with the 'update' action.
This file is an integral part of the build system for your
application and should be checked into Version Control Systems.
-->
<property file="ant.properties" />
<!-- if sdk.dir was not set from one of the property file, then
get it from the ANDROID_HOME env var.
This must be done before we load project.properties since
the proguard config can use sdk.dir -->
<property environment="env" />
<condition property="sdk.dir" value="${env.ANDROID_HOME}">
<isset property="env.ANDROID_HOME" />
</condition>
<!-- The project.properties file is created and updated by the 'android'
tool, as well as ADT.
This contains project specific properties such as project target, and library
dependencies. Lower level build properties are stored in ant.properties
(or in .classpath for Eclipse projects).
This file is an integral part of the build system for your
application and should be checked into Version Control Systems. -->
<loadproperties srcFile="project.properties" />
<!-- quick check on sdk.dir -->
<fail
message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_HOME environment variable."
unless="sdk.dir"
/>
<!--
Import per project custom build rules if present at the root of the project.
This is the place to put custom intermediary targets such as:
-pre-build
-pre-compile
-post-compile (This is typically used for code obfuscation.
Compiled code location: ${out.classes.absolute.dir}
If this is not done in place, override ${out.dex.input.absolute.dir})
-post-package
-post-build
-pre-clean
-->
<import file="custom_rules.xml" optional="true" />
<!-- Import the actual build file.
To customize existing targets, there are two options:
- Customize only one target:
- copy/paste the target into this file, *before* the
<import> task.
- customize it to your needs.
- Customize the whole content of build.xml
- copy/paste the content of the rules files (minus the top node)
into this file, replacing the <import> task.
- customize to your needs.
***********************
****** IMPORTANT ******
***********************
In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
in order to avoid having your file be overridden by tools such as "android update project"
-->
<!-- version-tag: VERSION_TAG -->
<import file="${sdk.dir}/tools/ant/uibuild.xml" />
</project>

View File

@@ -0,0 +1,14 @@
# This file is automatically generated by Android Tools.
# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
#
# This file must be checked in Version Control Systems.
#
# To customize properties used by the Ant build system edit
# "ant.properties", and override values to adapt the script to your
# project structure.
#
# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
# Project target.
target=android-17

View File

@@ -0,0 +1,113 @@
/* Copyright 2013-2015 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.arm.wlauto.uiauto;
import java.io.File;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.concurrent.TimeoutException;
import android.app.Activity;
import android.os.Bundle;
// Import the uiautomator libraries
import com.android.uiautomator.core.UiObject;
import com.android.uiautomator.core.UiObjectNotFoundException;
import com.android.uiautomator.core.UiScrollable;
import com.android.uiautomator.core.UiSelector;
import com.android.uiautomator.testrunner.UiAutomatorTestCase;
public class BaseUiAutomation extends UiAutomatorTestCase {
public void sleep(int second) {
super.sleep(second * 1000);
}
public boolean takeScreenshot(String name) {
Bundle params = getParams();
String png_dir = params.getString("workdir");
try {
return getUiDevice().takeScreenshot(new File(png_dir, name + ".png"));
} catch(NoSuchMethodError e) {
return true;
}
}
public void waitText(String text) throws UiObjectNotFoundException {
waitText(text, 600);
}
public void waitText(String text, int second) throws UiObjectNotFoundException {
UiSelector selector = new UiSelector();
UiObject text_obj = new UiObject(selector.text(text)
.className("android.widget.TextView"));
waitObject(text_obj, second);
}
public void waitObject(UiObject obj) throws UiObjectNotFoundException {
waitObject(obj, 600);
}
public void waitObject(UiObject obj, int second) throws UiObjectNotFoundException {
if (! obj.waitForExists(second * 1000)){
throw new UiObjectNotFoundException("UiObject is not found: "
+ obj.getSelector().toString());
}
}
public boolean waitUntilNoObject(UiObject obj, int second) {
return obj.waitUntilGone(second * 1000);
}
public void clearLogcat() throws Exception {
Runtime.getRuntime().exec("logcat -c");
}
public void waitForLogcatText(String searchText, long timeout) throws Exception {
long startTime = System.currentTimeMillis();
Process process = Runtime.getRuntime().exec("logcat");
BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()));
String line;
long currentTime = System.currentTimeMillis();
boolean found = false;
while ((currentTime - startTime) < timeout){
sleep(2); // poll every two seconds
while((line=reader.readLine())!=null) {
if (line.contains(searchText)) {
found = true;
break;
}
}
if (found) {
break;
}
currentTime = System.currentTimeMillis();
}
process.destroy();
if ((currentTime - startTime) >= timeout) {
throw new TimeoutException("Timed out waiting for Logcat text \"%s\"".format(searchText));
}
}
}