mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-03-30 23:48:44 +01:00
vellamo: Fixed geting values from logcat
The previous method of getting results out of logcat does not work if the format of logcat changes.
This commit is contained in:
parent
f02b6d5fd9
commit
9652801cce
@ -15,6 +15,9 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
from HTMLParser import HTMLParser
|
from HTMLParser import HTMLParser
|
||||||
from collections import defaultdict, OrderedDict
|
from collections import defaultdict, OrderedDict
|
||||||
from distutils.version import StrictVersion
|
from distutils.version import StrictVersion
|
||||||
@ -146,22 +149,18 @@ class Vellamo(AndroidUiAutoBenchmark):
|
|||||||
|
|
||||||
def non_root_update_result(self, context):
|
def non_root_update_result(self, context):
|
||||||
failed = []
|
failed = []
|
||||||
with open(self.logcat_log) as logcat:
|
with open(self.logcat_log) as fh:
|
||||||
metrics = OrderedDict()
|
iteration_result_regex = re.compile("VELLAMO RESULT: (Browser|Metal|Multicore) (\d+)")
|
||||||
for line in logcat:
|
for line in fh:
|
||||||
if 'VELLAMO RESULT:' in line:
|
|
||||||
info = line.split(':')
|
|
||||||
parts = info[2].split(" ")
|
|
||||||
metric = parts[1].strip()
|
|
||||||
value = int(parts[2].strip())
|
|
||||||
metrics[metric] = value
|
|
||||||
if 'VELLAMO ERROR:' in line:
|
if 'VELLAMO ERROR:' in line:
|
||||||
self.logger.warning("Browser crashed during benchmark, results may not be accurate")
|
self.logger.warning("Browser crashed during benchmark, results may not be accurate")
|
||||||
for key, value in metrics.iteritems():
|
result = iteration_result_regex.findall(line)
|
||||||
key = key.replace(' ', '_')
|
if result:
|
||||||
context.result.add_metric(key, value)
|
for (metric, score) in result:
|
||||||
if value == 0:
|
if not score:
|
||||||
failed.append(key)
|
failed.append(metric)
|
||||||
|
else:
|
||||||
|
context.result.add_metric(metric, score)
|
||||||
if failed:
|
if failed:
|
||||||
raise WorkloadError("The following benchmark groups failed: {}".format(", ".join(failed)))
|
raise WorkloadError("The following benchmark groups failed: {}".format(", ".join(failed)))
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user