1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2024-10-05 18:31:12 +01:00

wa/workloads: pep8 fixes

This commit is contained in:
Marc Bonnici 2018-07-03 13:23:16 +01:00
parent 185bff9029
commit 6e94cbd36b
18 changed files with 57 additions and 53 deletions

View File

@ -69,6 +69,7 @@ class AdobeReader(ApkUiautoWorkload):
self.asset_directory = self.target.path.join(self.target.external_storage,
'Android', 'data',
'com.adobe.reader', 'files')
def init_resources(self, context):
super(AdobeReader, self).init_resources(context)
# Only accept certain file formats

View File

@ -17,6 +17,7 @@ import re
from wa import ApkUiautoWorkload
from wa.framework.exception import WorkloadError
class Androbench(ApkUiautoWorkload):
name = 'androbench'
@ -43,14 +44,14 @@ class Androbench(ApkUiautoWorkload):
expected_results = len(self.regex_matches)
logcat_file = context.get_artifact_path('logcat')
with open(logcat_file) as fh:
for line in fh:
for line in fh:
for regex in self.regex_matches:
match = regex.search(line)
if match:
if match:
result = float(match.group(1))
entry = regex.pattern.rsplit(None, 1)[0]
context.add_metric(entry, result, 'MB/s', lower_is_better=False)
expected_results -= 1
if expected_results > 0:
raise WorkloadError("The Androbench workload has failed. Expected {} scores, Detected {} scores."
.format(len(self.regex_matches), expected_results))
msg = "The Androbench workload has failed. Expected {} scores, Detected {} scores."
raise WorkloadError(msg.format(len(self.regex_matches), expected_results))

View File

@ -16,6 +16,7 @@ import re
from wa import ApkUiautoWorkload, WorkloadError
class Antutu(ApkUiautoWorkload):
name = 'antutu'

View File

@ -89,7 +89,6 @@ class ApacheBenchmark(Workload):
self.path)
self.output = None
def run(self, context):
self.logger.debug(self.command)
self.output, _ = check_output(self.command, timeout=300, shell=True)
@ -111,7 +110,7 @@ class ApacheBenchmark(Workload):
completed = int(get_line(fh, 'Complete requests').split(':')[1].strip())
failed = int(get_line(fh, 'Failed requests').split(':')[1].strip())
fail_rate = failed / completed * 100
fail_rate = failed / completed * 100
context.add_metric('failed_request', fail_rate, units='percent',
lower_is_better=True)

View File

@ -18,7 +18,6 @@ from wa import ApkUiautoWorkload, Parameter
from wa.framework import pluginloader
class Applaunch(ApkUiautoWorkload):
name = 'applaunch'
@ -85,7 +84,7 @@ class Applaunch(ApkUiautoWorkload):
description.
"""),
Parameter('applaunch_iterations', kind=int, default=1,
description="""
description="""
Number of iterations of the application launch
"""),
]

View File

@ -46,7 +46,7 @@ class Deepbench(Workload):
"""
parameters = [
Parameter('test', default='gemm',
Parameter('test', default='gemm',
allowed_values=['gemm', 'conv', 'sparse'],
description='''
Specifies which of the available benchmarks will be run.

View File

@ -34,17 +34,17 @@ from devlib.utils.android import grant_app_permissions
# Regexps for benchmark synchronization
REGEXPS = {
'start' : '.*Displayed com.google.android.exoplayer2.demo/.PlayerActivity',
'duration' : '.*period \[(?P<duration>[0-9]+.*)\]',
'end' : '.*state \[.+, .+, E\]',
'start': '.*Displayed com.google.android.exoplayer2.demo/.PlayerActivity',
'duration': '.*period \[(?P<duration>[0-9]+.*)\]',
'end': '.*state \[.+, .+, E\]',
'dropped_frames': '.*droppedFrames \[(?P<session_time>[0-9]+\.[0-9]+), (?P<count>[0-9]+)\]'
}
DOWNLOAD_URLS = {
'mp4_1080p': 'http://distribution.bbb3d.renderfarming.net/video/mp4/bbb_sunflower_1080p_30fps_normal.mp4',
'mov_720p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_h264.mov',
'mov_480p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_480p_h264.mov',
'mov_720p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_h264.mov',
'mov_480p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_480p_h264.mov',
'ogg_128kbps': 'http://upload.wikimedia.org/wikipedia/commons/c/ca/Tchaikovsky_-_Romeo_and_Juliet_Ouverture_-_Antal_Dorati_(1959).ogg',
}
@ -200,12 +200,12 @@ class ExoPlayer(ApkWorkload):
if self.duration:
self.logger.info('Waiting {} seconds before ending playback'
.format(self.duration))
.format(self.duration))
time.sleep(self.duration)
else:
self.logger.info('Waiting for playback completion ({} seconds)'
.format(media_duration_s))
self.monitor.wait_for(REGEXPS['end'], timeout = media_duration_s + 30)
.format(media_duration_s))
self.monitor.wait_for(REGEXPS['end'], timeout=media_duration_s + 30)
def update_output(self, context):
regex = re.compile(REGEXPS['dropped_frames'])

View File

@ -24,6 +24,7 @@ from wa import ApkUiautoWorkload, Parameter
from wa.framework.exception import ConfigError, WorkloadError
from wa.utils.misc import capitalize
class Geekbench(ApkUiautoWorkload):
name = 'geekbench'
@ -168,7 +169,7 @@ class Geekbench(ApkUiautoWorkload):
for section in data['sections']:
context.add_metric(namemify(section['name'] + '_score', i), section['score'])
context.add_metric(namemify(section['name'] + '_multicore_score', i),
section['multicore_score'])
section['multicore_score'])
def update_result_4(self, context):
outfile_glob = self.target.path.join(self.target.package_data_directory, self.package, 'files', '*gb*')
@ -196,6 +197,7 @@ class Geekbench(ApkUiautoWorkload):
update_result_5 = update_result_4
class GBWorkload(object):
"""
Geekbench workload (not to be confused with WA's workloads). This is a single test run by
@ -403,6 +405,7 @@ class GeekbenchCorproate(Geekbench):
override=True)
]
def namemify(basename, i):
return basename + (' {}'.format(i) if i else '')

View File

@ -83,7 +83,7 @@ class Googlephotos(ApkUiautoWorkload):
for i, f in enumerate(self.test_images):
orig_file_path = self.target.path.join(d, f)
new_dir = self.target.path.join(e, 'wa', 'wa-{}'.format(i+1))
new_dir = self.target.path.join(e, 'wa', 'wa-{}'.format(i + 1))
new_file_path = self.target.path.join(new_dir, f)
self.target.execute('mkdir -p {}'.format(new_dir))

View File

@ -60,7 +60,6 @@ class HWUITest(Workload):
super(HWUITest, self).__init__(target, *args, **kwargs)
HWUITest.target_exe = None
@once
def initialize(self, context):
host_exe = context.get_resource(Executable(self,
@ -77,7 +76,7 @@ class HWUITest(Workload):
def extract_results(self, context):
if not self.output:
return
return
outfile = os.path.join(context.output_directory, 'hwuitest.output')
with open(outfile, 'w') as wfh:
wfh.write(self.output)
@ -110,7 +109,7 @@ class HWUITest(Workload):
match['percent'],
"%",
classifiers={"loop": count,
"frames": self.frames})
"frames": self.frames})
else:
match = normal.match(value_string).groupdict()
context.add_metric(metric,

View File

@ -118,7 +118,7 @@ class Jankbench(ApkWorkload):
if self.pull_results_db:
target_file = self.target.path.join(self.target.package_data_directory,
self.package, 'databases', self.results_db_file)
host_file = os.path.join(context.output_directory,self.results_db_file)
host_file = os.path.join(context.output_directory, self.results_db_file)
self.target.pull(target_file, host_file, as_root=True)
context.add_artifact('jankbench-results', host_file, 'data')

View File

@ -149,8 +149,8 @@ class Lmbench(Workload):
parts = []
if self.cpus:
parts.append('{} taskset {} {}'.format(self.target.busybox,
self.cpus.mask(),
self.target_exe))
self.cpus.mask(),
self.target_exe))
else:
parts.append(self.target_exe)
if self.parallelism is not None:

View File

@ -125,8 +125,8 @@ class Meabo(Workload):
description='''
Sets which cores each phase is run on.
''',
constraint=lambda x: all(v>=-1 for v in x),
default=[-1]*10,
constraint=lambda x: all(v >= -1 for v in x),
default=[-1] * 10,
),
Parameter(
'num_hwcntrs',
@ -144,7 +144,7 @@ class Meabo(Workload):
description='''
Controls which phases to run.
''',
constraint=lambda x: all(0 < v <=10 for v in x),
constraint=lambda x: all(0 < v <= 10 for v in x),
default=list(range(1, 11)),
),
Parameter(
@ -167,7 +167,7 @@ class Meabo(Workload):
''',
constraint=lambda x: 0 <= x <= 1,
default=1,
),
),
Parameter(
'llist_size',
kind=int,
@ -176,7 +176,7 @@ class Meabo(Workload):
''',
constraint=lambda x: x > 0,
default=16777216,
),
),
Parameter(
'num_particles',
kind=int,
@ -290,7 +290,7 @@ class Meabo(Workload):
# We need to calculate the phase mask
phase_mask = 0
for phase in self.run_phases:
phase_mask |= 1<<(phase-1)
phase_mask |= 1 << (phase - 1)
self.command += ' -P {:d}'.format(phase_mask)

View File

@ -61,6 +61,7 @@ class Memcpy(Workload):
cores will be used.
'''),
]
@once
def initialize(self, context):
self.binary_name = 'memcpy'

View File

@ -47,7 +47,7 @@ class Openssl(Workload):
parameters = [
Parameter('algorithm', default='aes-256-cbc',
allowed_values = EVP_NEW + CIPHER_PKI,
allowed_values=EVP_NEW + CIPHER_PKI,
description='''
Algorithm to benchmark.
'''),
@ -71,8 +71,8 @@ class Openssl(Workload):
if self.use_system_binary:
try:
cmd = '{0} md5sum < $({0} which openssl)'
output = self.target.execute(cmd.format(self.target.busybox))
md5hash = output.split()[0]
output = self.target.execute(cmd.format(self.target.busybox))
md5hash = output.split()[0]
version = self.target.execute('openssl version').strip()
context.update_metadata('hashes', 'openssl', md5hash)
context.update_metadata('versions', 'openssl', version)
@ -115,9 +115,9 @@ class Openssl(Workload):
if not line.startswith('+F'):
continue
parts = line.split(':')
parts = line.split(':')
if parts[0] == '+F': # evp ciphers
for bs, value in zip(BLOCK_SIZES, list(map(float, parts[3:]))):
for bs, value in zip(BLOCK_SIZES, list(map(float, parts[3:]))):
value = value / 2**20 # to MB
context.add_metric('score', value, 'MB/s',
classifiers={'block_size': bs})
@ -126,31 +126,31 @@ class Openssl(Workload):
sign = float(parts[3])
verify = float(parts[4])
context.add_metric('sign', sign, 'seconds',
classifiers={'key_length': key_len})
classifiers={'key_length': key_len})
context.add_metric('verify', verify, 'seconds',
classifiers={'key_length': key_len})
classifiers={'key_length': key_len})
elif parts[0] == '+F4': # ecdsa
ec_idx = int(parts[1])
key_len = int(parts[2])
sign = float(parts[3])
verify = float(parts[4])
context.add_metric('sign', sign, 'seconds',
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
context.add_metric('verify', verify, 'seconds',
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
elif parts[0] == '+F5': # ecdh
ec_idx = int(parts[1])
key_len = int(parts[2])
op_time = float(parts[3])
ops_per_sec = float(parts[4])
context.add_metric('op', op_time, 'seconds',
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
context.add_metric('ops_per_sec', ops_per_sec, 'Hz',
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
classifiers={'key_length': key_len,
'curve': ECD[ec_idx]})
else:
self.logger.warning('Unexpected result: "{}"'.format(line))

View File

@ -19,12 +19,13 @@ import zipfile
from wa import ApkUiautoWorkload
from wa.framework.exception import WorkloadError
class PcMark(ApkUiautoWorkload):
name = 'pcmark'
package_names = ['com.futuremark.pcmark.android.benchmark']
regex_matches = [re.compile(r'PcmaWebV2Score>([\d.]+)'),
re.compile(r'PcmaVideoEditingScore>([\d.]+)'),
regex_matches = [re.compile(r'PcmaWebV2Score>([\d.]+)'),
re.compile(r'PcmaVideoEditingScore>([\d.]+)'),
re.compile(r'PcmaDataManipulationScore>([\d.]+)'),
re.compile(r'PcmaPhotoEditingV2Score>([\d.]+)'),
re.compile(r'PcmaWorkv2Score>([\d.]+)'),

View File

@ -24,7 +24,7 @@ from wa.utils.misc import unique
class Speedometer(UiautoWorkload):
name = 'speedometer'
regex=re.compile(r'Speedometer Score ([\d.]+)')
regex = re.compile(r'Speedometer Score ([\d.]+)')
versions = ['1.0', '2.0']
description = '''
A workload to execute the speedometer web based benchmark

View File

@ -65,7 +65,6 @@ class Vellamo(ApkUiautoWorkload):
'listed, ``2`` -- the second, etc. Only valid for version ``3.0``.'))
]
def setup(self, context):
self.gui.uiauto_params['version'] = self.version
self.gui.uiauto_params['browserToUse'] = self.browser
@ -118,13 +117,13 @@ class Vellamo(ApkUiautoWorkload):
for benchmark in parser.benchmarks:
benchmark.name = benchmark.name.replace(' ', '_')
context.add_metric('{}_Total'.format(benchmark.name),
benchmark.score)
benchmark.score)
for name, score in list(benchmark.metrics.items()):
name = name.replace(' ', '_')
context.add_metric('{}_{}'.format(benchmark.name,
name), score)
name), score)
context.add_artifact('vellamo_output', kind='raw',
path=filename)
path=filename)
def update_output_v3_2(self, context):
device_file = self.target.path.join(self.target.package_data_directory,