1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2025-09-02 11:22:41 +01:00

wa/workloads: pep8 fixes

This commit is contained in:
Marc Bonnici
2018-07-03 13:23:16 +01:00
parent 185bff9029
commit 6e94cbd36b
18 changed files with 57 additions and 53 deletions

View File

@@ -69,6 +69,7 @@ class AdobeReader(ApkUiautoWorkload):
self.asset_directory = self.target.path.join(self.target.external_storage, self.asset_directory = self.target.path.join(self.target.external_storage,
'Android', 'data', 'Android', 'data',
'com.adobe.reader', 'files') 'com.adobe.reader', 'files')
def init_resources(self, context): def init_resources(self, context):
super(AdobeReader, self).init_resources(context) super(AdobeReader, self).init_resources(context)
# Only accept certain file formats # Only accept certain file formats

View File

@@ -17,6 +17,7 @@ import re
from wa import ApkUiautoWorkload from wa import ApkUiautoWorkload
from wa.framework.exception import WorkloadError from wa.framework.exception import WorkloadError
class Androbench(ApkUiautoWorkload): class Androbench(ApkUiautoWorkload):
name = 'androbench' name = 'androbench'
@@ -52,5 +53,5 @@ class Androbench(ApkUiautoWorkload):
context.add_metric(entry, result, 'MB/s', lower_is_better=False) context.add_metric(entry, result, 'MB/s', lower_is_better=False)
expected_results -= 1 expected_results -= 1
if expected_results > 0: if expected_results > 0:
raise WorkloadError("The Androbench workload has failed. Expected {} scores, Detected {} scores." msg = "The Androbench workload has failed. Expected {} scores, Detected {} scores."
.format(len(self.regex_matches), expected_results)) raise WorkloadError(msg.format(len(self.regex_matches), expected_results))

View File

@@ -16,6 +16,7 @@ import re
from wa import ApkUiautoWorkload, WorkloadError from wa import ApkUiautoWorkload, WorkloadError
class Antutu(ApkUiautoWorkload): class Antutu(ApkUiautoWorkload):
name = 'antutu' name = 'antutu'

View File

@@ -89,7 +89,6 @@ class ApacheBenchmark(Workload):
self.path) self.path)
self.output = None self.output = None
def run(self, context): def run(self, context):
self.logger.debug(self.command) self.logger.debug(self.command)
self.output, _ = check_output(self.command, timeout=300, shell=True) self.output, _ = check_output(self.command, timeout=300, shell=True)
@@ -111,7 +110,7 @@ class ApacheBenchmark(Workload):
completed = int(get_line(fh, 'Complete requests').split(':')[1].strip()) completed = int(get_line(fh, 'Complete requests').split(':')[1].strip())
failed = int(get_line(fh, 'Failed requests').split(':')[1].strip()) failed = int(get_line(fh, 'Failed requests').split(':')[1].strip())
fail_rate = failed / completed * 100 fail_rate = failed / completed * 100
context.add_metric('failed_request', fail_rate, units='percent', context.add_metric('failed_request', fail_rate, units='percent',
lower_is_better=True) lower_is_better=True)

View File

@@ -18,7 +18,6 @@ from wa import ApkUiautoWorkload, Parameter
from wa.framework import pluginloader from wa.framework import pluginloader
class Applaunch(ApkUiautoWorkload): class Applaunch(ApkUiautoWorkload):
name = 'applaunch' name = 'applaunch'
@@ -85,7 +84,7 @@ class Applaunch(ApkUiautoWorkload):
description. description.
"""), """),
Parameter('applaunch_iterations', kind=int, default=1, Parameter('applaunch_iterations', kind=int, default=1,
description=""" description="""
Number of iterations of the application launch Number of iterations of the application launch
"""), """),
] ]

View File

@@ -46,7 +46,7 @@ class Deepbench(Workload):
""" """
parameters = [ parameters = [
Parameter('test', default='gemm', Parameter('test', default='gemm',
allowed_values=['gemm', 'conv', 'sparse'], allowed_values=['gemm', 'conv', 'sparse'],
description=''' description='''
Specifies which of the available benchmarks will be run. Specifies which of the available benchmarks will be run.

View File

@@ -34,17 +34,17 @@ from devlib.utils.android import grant_app_permissions
# Regexps for benchmark synchronization # Regexps for benchmark synchronization
REGEXPS = { REGEXPS = {
'start' : '.*Displayed com.google.android.exoplayer2.demo/.PlayerActivity', 'start': '.*Displayed com.google.android.exoplayer2.demo/.PlayerActivity',
'duration' : '.*period \[(?P<duration>[0-9]+.*)\]', 'duration': '.*period \[(?P<duration>[0-9]+.*)\]',
'end' : '.*state \[.+, .+, E\]', 'end': '.*state \[.+, .+, E\]',
'dropped_frames': '.*droppedFrames \[(?P<session_time>[0-9]+\.[0-9]+), (?P<count>[0-9]+)\]' 'dropped_frames': '.*droppedFrames \[(?P<session_time>[0-9]+\.[0-9]+), (?P<count>[0-9]+)\]'
} }
DOWNLOAD_URLS = { DOWNLOAD_URLS = {
'mp4_1080p': 'http://distribution.bbb3d.renderfarming.net/video/mp4/bbb_sunflower_1080p_30fps_normal.mp4', 'mp4_1080p': 'http://distribution.bbb3d.renderfarming.net/video/mp4/bbb_sunflower_1080p_30fps_normal.mp4',
'mov_720p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_h264.mov', 'mov_720p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_h264.mov',
'mov_480p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_480p_h264.mov', 'mov_480p': 'http://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_480p_h264.mov',
'ogg_128kbps': 'http://upload.wikimedia.org/wikipedia/commons/c/ca/Tchaikovsky_-_Romeo_and_Juliet_Ouverture_-_Antal_Dorati_(1959).ogg', 'ogg_128kbps': 'http://upload.wikimedia.org/wikipedia/commons/c/ca/Tchaikovsky_-_Romeo_and_Juliet_Ouverture_-_Antal_Dorati_(1959).ogg',
} }
@@ -200,12 +200,12 @@ class ExoPlayer(ApkWorkload):
if self.duration: if self.duration:
self.logger.info('Waiting {} seconds before ending playback' self.logger.info('Waiting {} seconds before ending playback'
.format(self.duration)) .format(self.duration))
time.sleep(self.duration) time.sleep(self.duration)
else: else:
self.logger.info('Waiting for playback completion ({} seconds)' self.logger.info('Waiting for playback completion ({} seconds)'
.format(media_duration_s)) .format(media_duration_s))
self.monitor.wait_for(REGEXPS['end'], timeout = media_duration_s + 30) self.monitor.wait_for(REGEXPS['end'], timeout=media_duration_s + 30)
def update_output(self, context): def update_output(self, context):
regex = re.compile(REGEXPS['dropped_frames']) regex = re.compile(REGEXPS['dropped_frames'])

View File

@@ -24,6 +24,7 @@ from wa import ApkUiautoWorkload, Parameter
from wa.framework.exception import ConfigError, WorkloadError from wa.framework.exception import ConfigError, WorkloadError
from wa.utils.misc import capitalize from wa.utils.misc import capitalize
class Geekbench(ApkUiautoWorkload): class Geekbench(ApkUiautoWorkload):
name = 'geekbench' name = 'geekbench'
@@ -168,7 +169,7 @@ class Geekbench(ApkUiautoWorkload):
for section in data['sections']: for section in data['sections']:
context.add_metric(namemify(section['name'] + '_score', i), section['score']) context.add_metric(namemify(section['name'] + '_score', i), section['score'])
context.add_metric(namemify(section['name'] + '_multicore_score', i), context.add_metric(namemify(section['name'] + '_multicore_score', i),
section['multicore_score']) section['multicore_score'])
def update_result_4(self, context): def update_result_4(self, context):
outfile_glob = self.target.path.join(self.target.package_data_directory, self.package, 'files', '*gb*') outfile_glob = self.target.path.join(self.target.package_data_directory, self.package, 'files', '*gb*')
@@ -196,6 +197,7 @@ class Geekbench(ApkUiautoWorkload):
update_result_5 = update_result_4 update_result_5 = update_result_4
class GBWorkload(object): class GBWorkload(object):
""" """
Geekbench workload (not to be confused with WA's workloads). This is a single test run by Geekbench workload (not to be confused with WA's workloads). This is a single test run by
@@ -403,6 +405,7 @@ class GeekbenchCorproate(Geekbench):
override=True) override=True)
] ]
def namemify(basename, i): def namemify(basename, i):
return basename + (' {}'.format(i) if i else '') return basename + (' {}'.format(i) if i else '')

View File

@@ -83,7 +83,7 @@ class Googlephotos(ApkUiautoWorkload):
for i, f in enumerate(self.test_images): for i, f in enumerate(self.test_images):
orig_file_path = self.target.path.join(d, f) orig_file_path = self.target.path.join(d, f)
new_dir = self.target.path.join(e, 'wa', 'wa-{}'.format(i+1)) new_dir = self.target.path.join(e, 'wa', 'wa-{}'.format(i + 1))
new_file_path = self.target.path.join(new_dir, f) new_file_path = self.target.path.join(new_dir, f)
self.target.execute('mkdir -p {}'.format(new_dir)) self.target.execute('mkdir -p {}'.format(new_dir))

View File

@@ -60,7 +60,6 @@ class HWUITest(Workload):
super(HWUITest, self).__init__(target, *args, **kwargs) super(HWUITest, self).__init__(target, *args, **kwargs)
HWUITest.target_exe = None HWUITest.target_exe = None
@once @once
def initialize(self, context): def initialize(self, context):
host_exe = context.get_resource(Executable(self, host_exe = context.get_resource(Executable(self,
@@ -77,7 +76,7 @@ class HWUITest(Workload):
def extract_results(self, context): def extract_results(self, context):
if not self.output: if not self.output:
return return
outfile = os.path.join(context.output_directory, 'hwuitest.output') outfile = os.path.join(context.output_directory, 'hwuitest.output')
with open(outfile, 'w') as wfh: with open(outfile, 'w') as wfh:
wfh.write(self.output) wfh.write(self.output)
@@ -110,7 +109,7 @@ class HWUITest(Workload):
match['percent'], match['percent'],
"%", "%",
classifiers={"loop": count, classifiers={"loop": count,
"frames": self.frames}) "frames": self.frames})
else: else:
match = normal.match(value_string).groupdict() match = normal.match(value_string).groupdict()
context.add_metric(metric, context.add_metric(metric,

View File

@@ -118,7 +118,7 @@ class Jankbench(ApkWorkload):
if self.pull_results_db: if self.pull_results_db:
target_file = self.target.path.join(self.target.package_data_directory, target_file = self.target.path.join(self.target.package_data_directory,
self.package, 'databases', self.results_db_file) self.package, 'databases', self.results_db_file)
host_file = os.path.join(context.output_directory,self.results_db_file) host_file = os.path.join(context.output_directory, self.results_db_file)
self.target.pull(target_file, host_file, as_root=True) self.target.pull(target_file, host_file, as_root=True)
context.add_artifact('jankbench-results', host_file, 'data') context.add_artifact('jankbench-results', host_file, 'data')

View File

@@ -149,8 +149,8 @@ class Lmbench(Workload):
parts = [] parts = []
if self.cpus: if self.cpus:
parts.append('{} taskset {} {}'.format(self.target.busybox, parts.append('{} taskset {} {}'.format(self.target.busybox,
self.cpus.mask(), self.cpus.mask(),
self.target_exe)) self.target_exe))
else: else:
parts.append(self.target_exe) parts.append(self.target_exe)
if self.parallelism is not None: if self.parallelism is not None:

View File

@@ -125,8 +125,8 @@ class Meabo(Workload):
description=''' description='''
Sets which cores each phase is run on. Sets which cores each phase is run on.
''', ''',
constraint=lambda x: all(v>=-1 for v in x), constraint=lambda x: all(v >= -1 for v in x),
default=[-1]*10, default=[-1] * 10,
), ),
Parameter( Parameter(
'num_hwcntrs', 'num_hwcntrs',
@@ -144,7 +144,7 @@ class Meabo(Workload):
description=''' description='''
Controls which phases to run. Controls which phases to run.
''', ''',
constraint=lambda x: all(0 < v <=10 for v in x), constraint=lambda x: all(0 < v <= 10 for v in x),
default=list(range(1, 11)), default=list(range(1, 11)),
), ),
Parameter( Parameter(
@@ -167,7 +167,7 @@ class Meabo(Workload):
''', ''',
constraint=lambda x: 0 <= x <= 1, constraint=lambda x: 0 <= x <= 1,
default=1, default=1,
), ),
Parameter( Parameter(
'llist_size', 'llist_size',
kind=int, kind=int,
@@ -176,7 +176,7 @@ class Meabo(Workload):
''', ''',
constraint=lambda x: x > 0, constraint=lambda x: x > 0,
default=16777216, default=16777216,
), ),
Parameter( Parameter(
'num_particles', 'num_particles',
kind=int, kind=int,
@@ -290,7 +290,7 @@ class Meabo(Workload):
# We need to calculate the phase mask # We need to calculate the phase mask
phase_mask = 0 phase_mask = 0
for phase in self.run_phases: for phase in self.run_phases:
phase_mask |= 1<<(phase-1) phase_mask |= 1 << (phase - 1)
self.command += ' -P {:d}'.format(phase_mask) self.command += ' -P {:d}'.format(phase_mask)

View File

@@ -61,6 +61,7 @@ class Memcpy(Workload):
cores will be used. cores will be used.
'''), '''),
] ]
@once @once
def initialize(self, context): def initialize(self, context):
self.binary_name = 'memcpy' self.binary_name = 'memcpy'

View File

@@ -47,7 +47,7 @@ class Openssl(Workload):
parameters = [ parameters = [
Parameter('algorithm', default='aes-256-cbc', Parameter('algorithm', default='aes-256-cbc',
allowed_values = EVP_NEW + CIPHER_PKI, allowed_values=EVP_NEW + CIPHER_PKI,
description=''' description='''
Algorithm to benchmark. Algorithm to benchmark.
'''), '''),
@@ -71,8 +71,8 @@ class Openssl(Workload):
if self.use_system_binary: if self.use_system_binary:
try: try:
cmd = '{0} md5sum < $({0} which openssl)' cmd = '{0} md5sum < $({0} which openssl)'
output = self.target.execute(cmd.format(self.target.busybox)) output = self.target.execute(cmd.format(self.target.busybox))
md5hash = output.split()[0] md5hash = output.split()[0]
version = self.target.execute('openssl version').strip() version = self.target.execute('openssl version').strip()
context.update_metadata('hashes', 'openssl', md5hash) context.update_metadata('hashes', 'openssl', md5hash)
context.update_metadata('versions', 'openssl', version) context.update_metadata('versions', 'openssl', version)
@@ -115,9 +115,9 @@ class Openssl(Workload):
if not line.startswith('+F'): if not line.startswith('+F'):
continue continue
parts = line.split(':') parts = line.split(':')
if parts[0] == '+F': # evp ciphers if parts[0] == '+F': # evp ciphers
for bs, value in zip(BLOCK_SIZES, list(map(float, parts[3:]))): for bs, value in zip(BLOCK_SIZES, list(map(float, parts[3:]))):
value = value / 2**20 # to MB value = value / 2**20 # to MB
context.add_metric('score', value, 'MB/s', context.add_metric('score', value, 'MB/s',
classifiers={'block_size': bs}) classifiers={'block_size': bs})
@@ -126,31 +126,31 @@ class Openssl(Workload):
sign = float(parts[3]) sign = float(parts[3])
verify = float(parts[4]) verify = float(parts[4])
context.add_metric('sign', sign, 'seconds', context.add_metric('sign', sign, 'seconds',
classifiers={'key_length': key_len}) classifiers={'key_length': key_len})
context.add_metric('verify', verify, 'seconds', context.add_metric('verify', verify, 'seconds',
classifiers={'key_length': key_len}) classifiers={'key_length': key_len})
elif parts[0] == '+F4': # ecdsa elif parts[0] == '+F4': # ecdsa
ec_idx = int(parts[1]) ec_idx = int(parts[1])
key_len = int(parts[2]) key_len = int(parts[2])
sign = float(parts[3]) sign = float(parts[3])
verify = float(parts[4]) verify = float(parts[4])
context.add_metric('sign', sign, 'seconds', context.add_metric('sign', sign, 'seconds',
classifiers={'key_length': key_len, classifiers={'key_length': key_len,
'curve': ECD[ec_idx]}) 'curve': ECD[ec_idx]})
context.add_metric('verify', verify, 'seconds', context.add_metric('verify', verify, 'seconds',
classifiers={'key_length': key_len, classifiers={'key_length': key_len,
'curve': ECD[ec_idx]}) 'curve': ECD[ec_idx]})
elif parts[0] == '+F5': # ecdh elif parts[0] == '+F5': # ecdh
ec_idx = int(parts[1]) ec_idx = int(parts[1])
key_len = int(parts[2]) key_len = int(parts[2])
op_time = float(parts[3]) op_time = float(parts[3])
ops_per_sec = float(parts[4]) ops_per_sec = float(parts[4])
context.add_metric('op', op_time, 'seconds', context.add_metric('op', op_time, 'seconds',
classifiers={'key_length': key_len, classifiers={'key_length': key_len,
'curve': ECD[ec_idx]}) 'curve': ECD[ec_idx]})
context.add_metric('ops_per_sec', ops_per_sec, 'Hz', context.add_metric('ops_per_sec', ops_per_sec, 'Hz',
classifiers={'key_length': key_len, classifiers={'key_length': key_len,
'curve': ECD[ec_idx]}) 'curve': ECD[ec_idx]})
else: else:
self.logger.warning('Unexpected result: "{}"'.format(line)) self.logger.warning('Unexpected result: "{}"'.format(line))

View File

@@ -19,6 +19,7 @@ import zipfile
from wa import ApkUiautoWorkload from wa import ApkUiautoWorkload
from wa.framework.exception import WorkloadError from wa.framework.exception import WorkloadError
class PcMark(ApkUiautoWorkload): class PcMark(ApkUiautoWorkload):
name = 'pcmark' name = 'pcmark'

View File

@@ -24,7 +24,7 @@ from wa.utils.misc import unique
class Speedometer(UiautoWorkload): class Speedometer(UiautoWorkload):
name = 'speedometer' name = 'speedometer'
regex=re.compile(r'Speedometer Score ([\d.]+)') regex = re.compile(r'Speedometer Score ([\d.]+)')
versions = ['1.0', '2.0'] versions = ['1.0', '2.0']
description = ''' description = '''
A workload to execute the speedometer web based benchmark A workload to execute the speedometer web based benchmark

View File

@@ -65,7 +65,6 @@ class Vellamo(ApkUiautoWorkload):
'listed, ``2`` -- the second, etc. Only valid for version ``3.0``.')) 'listed, ``2`` -- the second, etc. Only valid for version ``3.0``.'))
] ]
def setup(self, context): def setup(self, context):
self.gui.uiauto_params['version'] = self.version self.gui.uiauto_params['version'] = self.version
self.gui.uiauto_params['browserToUse'] = self.browser self.gui.uiauto_params['browserToUse'] = self.browser
@@ -118,13 +117,13 @@ class Vellamo(ApkUiautoWorkload):
for benchmark in parser.benchmarks: for benchmark in parser.benchmarks:
benchmark.name = benchmark.name.replace(' ', '_') benchmark.name = benchmark.name.replace(' ', '_')
context.add_metric('{}_Total'.format(benchmark.name), context.add_metric('{}_Total'.format(benchmark.name),
benchmark.score) benchmark.score)
for name, score in list(benchmark.metrics.items()): for name, score in list(benchmark.metrics.items()):
name = name.replace(' ', '_') name = name.replace(' ', '_')
context.add_metric('{}_{}'.format(benchmark.name, context.add_metric('{}_{}'.format(benchmark.name,
name), score) name), score)
context.add_artifact('vellamo_output', kind='raw', context.add_artifact('vellamo_output', kind='raw',
path=filename) path=filename)
def update_output_v3_2(self, context): def update_output_v3_2(self, context):
device_file = self.target.path.join(self.target.package_data_directory, device_file = self.target.path.join(self.target.package_data_directory,