1
0
mirror of https://github.com/ARM-software/workload-automation.git synced 2025-03-28 05:28:42 +00:00

vellamo: Fixed geting values from logcat

The previous method of getting results out of logcat does not work
if the format of logcat changes.
This commit is contained in:
Sebastian Goscik 2016-05-17 16:19:37 +01:00
parent f02b6d5fd9
commit 9652801cce

View File

@ -15,6 +15,9 @@
import os import os
import logging import logging
import json
import re
from HTMLParser import HTMLParser from HTMLParser import HTMLParser
from collections import defaultdict, OrderedDict from collections import defaultdict, OrderedDict
from distutils.version import StrictVersion from distutils.version import StrictVersion
@ -127,7 +130,7 @@ class Vellamo(AndroidUiAutoBenchmark):
name = name.replace(' ', '_') name = name.replace(' ', '_')
context.result.add_metric('{}_{}'.format(benchmark.name, name), score) context.result.add_metric('{}_{}'.format(benchmark.name, name), score)
context.add_iteration_artifact('vellamo_output', kind='raw', path=filename) context.add_iteration_artifact('vellamo_output', kind='raw', path=filename)
def update_result_v3_2(self, context): def update_result_v3_2(self, context):
device_file = self.device.path.join(self.device.package_data_directory, device_file = self.device.path.join(self.device.package_data_directory,
self.package, self.package,
@ -143,25 +146,21 @@ class Vellamo(AndroidUiAutoBenchmark):
name = result['id'] name = result['id']
score = result['score'] score = result['score']
context.result.add_metric(name, score) context.result.add_metric(name, score)
def non_root_update_result(self, context): def non_root_update_result(self, context):
failed = [] failed = []
with open(self.logcat_log) as logcat: with open(self.logcat_log) as fh:
metrics = OrderedDict() iteration_result_regex = re.compile("VELLAMO RESULT: (Browser|Metal|Multicore) (\d+)")
for line in logcat: for line in fh:
if 'VELLAMO RESULT:' in line:
info = line.split(':')
parts = info[2].split(" ")
metric = parts[1].strip()
value = int(parts[2].strip())
metrics[metric] = value
if 'VELLAMO ERROR:' in line: if 'VELLAMO ERROR:' in line:
self.logger.warning("Browser crashed during benchmark, results may not be accurate") self.logger.warning("Browser crashed during benchmark, results may not be accurate")
for key, value in metrics.iteritems(): result = iteration_result_regex.findall(line)
key = key.replace(' ', '_') if result:
context.result.add_metric(key, value) for (metric, score) in result:
if value == 0: if not score:
failed.append(key) failed.append(metric)
else:
context.result.add_metric(metric, score)
if failed: if failed:
raise WorkloadError("The following benchmark groups failed: {}".format(", ".join(failed))) raise WorkloadError("The following benchmark groups failed: {}".format(", ".join(failed)))