1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2025-09-04 04:12:42 +01:00

Add support for Python 3

Add support for running under Python 3, while maintaining compatibility
with Python 2.

See http://python-future.org/compatible_idioms.html for more details
behind these changes.
This commit is contained in:
Sergei Trofimov
2018-05-30 13:58:49 +01:00
committed by Marc Bonnici
parent c3ddb31d4d
commit b3de85455a
53 changed files with 377 additions and 384 deletions

View File

@@ -18,7 +18,13 @@
import re
import os
import time
import urllib
from future.standard_library import install_aliases
install_aliases()
import urllib.request
import urllib.parse
import urllib.error
from wa import ApkWorkload, Parameter, ConfigError, WorkloadError
from wa.framework.configuration.core import settings
@@ -81,7 +87,7 @@ class ExoPlayer(ApkWorkload):
Playback duration of the video file. This becomes the duration of the workload.
If provided must be shorter than the length of the media.
"""),
Parameter('format', allowed_values=DOWNLOAD_URLS.keys(),
Parameter('format', allowed_values=list(DOWNLOAD_URLS.keys()),
description="""
Specifies which format video file to play. Default is {}
""".format(default_format)),
@@ -137,7 +143,7 @@ class ExoPlayer(ApkWorkload):
filename = '{}_{}'.format(format_resolution, os.path.basename(url))
filepath = os.path.join(self.video_directory, filename)
self.logger.info('Downloading {} to {}...'.format(url, filepath))
urllib.urlretrieve(url, filepath)
urllib.request.urlretrieve(url, filepath)
return filepath
else:
if len(files) > 1:
@@ -172,7 +178,7 @@ class ExoPlayer(ApkWorkload):
self.play_cmd = 'am start -a {} -d "file://{}"'.format(self.action,
self.device_video_file)
self.monitor = self.target.get_logcat_monitor(REGEXPS.values())
self.monitor = self.target.get_logcat_monitor(list(REGEXPS.values()))
self.monitor.start()
def run(self, context):

View File

@@ -84,7 +84,7 @@ class Hackbench(Workload):
results_file = context.get_artifact_path('hackbench-results')
with open(results_file) as fh:
for line in fh:
for label, (regex, units) in regex_map.iteritems():
for label, (regex, units) in regex_map.items():
match = regex.search(line)
if match:
context.add_metric(label, float(match.group(1)), units)

View File

@@ -15,7 +15,7 @@
# pylint: disable=E1101,W0201,E0203
from __future__ import division
import os
import re
import select
@@ -23,6 +23,7 @@ import json
import threading
import sqlite3
import subprocess
import sys
from copy import copy
import pandas as pd
@@ -143,7 +144,7 @@ class Jankbench(ApkWorkload):
for test_name, rep in results.index:
test_results = results.ix[test_name, rep]
for metric, value in test_results.iteritems():
for metric, value in test_results.items():
context.add_metric(metric, value, units=None, lower_is_better=True,
classifiers={'test_name': test_name, 'rep': rep})
@@ -222,6 +223,8 @@ class JankbenchRunMonitor(threading.Thread):
ready, _, _ = select.select([proc.stdout, proc.stderr], [], [], 2)
if ready:
line = ready[0].readline()
if sys.version_info[0] == 3:
line = line.decode(sys.stdout.encoding)
if self.regex.search(line):
self.run_ended.set()

View File

@@ -145,7 +145,7 @@ class Meabo(Workload):
Controls which phases to run.
''',
constraint=lambda x: all(0 < v <=10 for v in x),
default=range(1, 11),
default=list(range(1, 11)),
),
Parameter(
'threads',

View File

@@ -102,7 +102,7 @@ class Openssl(Workload):
parts = line.split(':')
if parts[0] == '+F': # evp ciphers
for bs, value in zip(BLOCK_SIZES, map(float, parts[3:])):
for bs, value in zip(BLOCK_SIZES, list(map(float, parts[3:]))):
value = value / 2**20 # to MB
context.add_metric('score', value, 'MB/s',
classifiers={'block_size': bs})

View File

@@ -135,16 +135,16 @@ class Sysbench(Workload):
with open(self.host_results_file) as fh:
find_line_with('General statistics:', fh)
extract_metric('total time', fh.next(), context.output)
extract_metric('total number of events', fh.next(), context.output, lower_is_better=False)
extract_metric('total time', next(fh), context.output)
extract_metric('total number of events', next(fh), context.output, lower_is_better=False)
find_line_with('response time:', fh)
extract_metric('min', fh.next(), context.output, 'response time ')
extract_metric('avg', fh.next(), context.output, 'response time ')
extract_metric('max', fh.next(), context.output, 'response time ')
extract_metric('approx. 95 percentile', fh.next(), context.output)
extract_metric('min', next(fh), context.output, 'response time ')
extract_metric('avg', next(fh), context.output, 'response time ')
extract_metric('max', next(fh), context.output, 'response time ')
extract_metric('approx. 95 percentile', next(fh), context.output)
find_line_with('Threads fairness:', fh)
extract_threads_fairness_metric('events', fh.next(), context.output)
extract_threads_fairness_metric('execution time', fh.next(), context.output)
extract_threads_fairness_metric('events', next(fh), context.output)
extract_threads_fairness_metric('execution time', next(fh), context.output)
def teardown(self, context):
self.target.remove(self.target_results_file)
@@ -155,7 +155,7 @@ class Sysbench(Workload):
def _build_command(self, **parameters):
param_strings = ['--{}={}'.format(k.replace('_', '-'), v)
for k, v in parameters.iteritems()]
for k, v in parameters.items()]
if self.file_test_mode:
param_strings.append('--file-test-mode={}'.format(self.file_test_mode))
sysbench_command = '{} {} {} run'.format(self.target_binary, ' '.join(param_strings), self.cmd_params)

View File

@@ -17,7 +17,7 @@ import os
import json
import re
from HTMLParser import HTMLParser
from html.parser import HTMLParser
from wa import ApkUiautoWorkload, Parameter
from wa.utils.types import list_of_strs
@@ -48,7 +48,7 @@ class Vellamo(ApkUiautoWorkload):
'3.0': ['Browser', 'Metal', 'Multi'],
'3.2.4': ['Browser', 'Metal', 'Multi'],
}
valid_versions = benchmark_types.keys()
valid_versions = list(benchmark_types.keys())
summary_metrics = None
parameters = [
@@ -119,7 +119,7 @@ class Vellamo(ApkUiautoWorkload):
benchmark.name = benchmark.name.replace(' ', '_')
context.add_metric('{}_Total'.format(benchmark.name),
benchmark.score)
for name, score in benchmark.metrics.items():
for name, score in list(benchmark.metrics.items()):
name = name.replace(' ', '_')
context.add_metric('{}_{}'.format(benchmark.name,
name), score)