mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-01-31 10:11:17 +00:00
Unit tests
This commit is contained in:
parent
02138c60cc
commit
e258999e0a
1
wlauto/tests/data/test-agenda-bad-syntax.yaml
Normal file
1
wlauto/tests/data/test-agenda-bad-syntax.yaml
Normal file
@ -0,0 +1 @@
|
||||
[ewqh
|
1
wlauto/tests/data/test-agenda-not-dict.yaml
Normal file
1
wlauto/tests/data/test-agenda-not-dict.yaml
Normal file
@ -0,0 +1 @@
|
||||
Test
|
@ -24,6 +24,7 @@ from nose.tools import assert_equal, assert_in, raises
|
||||
|
||||
from wlauto.core.agenda import Agenda
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.utils.serializer import SerializerSyntaxError
|
||||
|
||||
|
||||
YAML_TEST_FILE = os.path.join(os.path.dirname(__file__), 'data', 'test-agenda.yaml')
|
||||
@ -35,7 +36,7 @@ workloads:
|
||||
test: 1
|
||||
"""
|
||||
invalid_agenda = StringIO(invalid_agenda_text)
|
||||
invalid_agenda.name = 'invalid1'
|
||||
invalid_agenda.name = 'invalid1.yaml'
|
||||
|
||||
duplicate_agenda_text = """
|
||||
global:
|
||||
@ -49,13 +50,13 @@ workloads:
|
||||
workload_name: andebench
|
||||
"""
|
||||
duplicate_agenda = StringIO(duplicate_agenda_text)
|
||||
duplicate_agenda.name = 'invalid2'
|
||||
duplicate_agenda.name = 'invalid2.yaml'
|
||||
|
||||
short_agenda_text = """
|
||||
workloads: [antutu, linpack, andebench]
|
||||
"""
|
||||
short_agenda = StringIO(short_agenda_text)
|
||||
short_agenda.name = 'short'
|
||||
short_agenda.name = 'short.yaml'
|
||||
|
||||
default_ids_agenda_text = """
|
||||
workloads:
|
||||
@ -69,7 +70,7 @@ workloads:
|
||||
- vellamo
|
||||
"""
|
||||
default_ids_agenda = StringIO(default_ids_agenda_text)
|
||||
default_ids_agenda.name = 'default_ids'
|
||||
default_ids_agenda.name = 'default_ids.yaml'
|
||||
|
||||
sectioned_agenda_text = """
|
||||
sections:
|
||||
@ -91,7 +92,7 @@ workloads:
|
||||
- nenamark
|
||||
"""
|
||||
sectioned_agenda = StringIO(sectioned_agenda_text)
|
||||
sectioned_agenda.name = 'sectioned'
|
||||
sectioned_agenda.name = 'sectioned.yaml'
|
||||
|
||||
dup_sectioned_agenda_text = """
|
||||
sections:
|
||||
@ -105,7 +106,7 @@ workloads:
|
||||
- nenamark
|
||||
"""
|
||||
dup_sectioned_agenda = StringIO(dup_sectioned_agenda_text)
|
||||
dup_sectioned_agenda.name = 'dup-sectioned'
|
||||
dup_sectioned_agenda.name = 'dup-sectioned.yaml'
|
||||
|
||||
caps_agenda_text = """
|
||||
config:
|
||||
@ -120,17 +121,17 @@ workloads:
|
||||
name: linpack
|
||||
"""
|
||||
caps_agenda = StringIO(caps_agenda_text)
|
||||
caps_agenda.name = 'caps'
|
||||
caps_agenda.name = 'caps.yaml'
|
||||
|
||||
bad_syntax_agenda_text = """
|
||||
config:
|
||||
# tab on the following line
|
||||
reboot_policy: never
|
||||
reboot_policy: never
|
||||
workloads:
|
||||
- antutu
|
||||
"""
|
||||
bad_syntax_agenda = StringIO(bad_syntax_agenda_text)
|
||||
bad_syntax_agenda.name = 'bad_syntax'
|
||||
bad_syntax_agenda.name = 'bad_syntax.yaml'
|
||||
|
||||
section_ids_test_text = """
|
||||
config:
|
||||
@ -145,7 +146,7 @@ sections:
|
||||
- id: bar
|
||||
"""
|
||||
section_ids_agenda = StringIO(section_ids_test_text)
|
||||
section_ids_agenda.name = 'section_ids'
|
||||
section_ids_agenda.name = 'section_ids.yaml'
|
||||
|
||||
|
||||
class AgendaTest(TestCase):
|
||||
@ -154,42 +155,18 @@ class AgendaTest(TestCase):
|
||||
agenda = Agenda(YAML_TEST_FILE)
|
||||
assert_equal(len(agenda.workloads), 4)
|
||||
|
||||
def test_duplicate_id(self):
|
||||
try:
|
||||
Agenda(duplicate_agenda)
|
||||
except ConfigError, e:
|
||||
assert_in('duplicate', e.message.lower()) # pylint: disable=E1101
|
||||
else:
|
||||
raise Exception('ConfigError was not raised for an agenda with duplicate ids.')
|
||||
|
||||
def test_yaml_missing_field(self):
|
||||
try:
|
||||
Agenda(invalid_agenda_text)
|
||||
Agenda(invalid_agenda)
|
||||
except ConfigError, e:
|
||||
assert_in('workload name', e.message)
|
||||
else:
|
||||
raise Exception('ConfigError was not raised for an invalid agenda.')
|
||||
|
||||
def test_defaults(self):
|
||||
agenda = Agenda(short_agenda)
|
||||
assert_equal(len(agenda.workloads), 3)
|
||||
assert_equal(agenda.workloads[0].workload_name, 'antutu')
|
||||
assert_equal(agenda.workloads[0].id, '1')
|
||||
|
||||
def test_default_id_assignment(self):
|
||||
agenda = Agenda(default_ids_agenda)
|
||||
assert_equal(agenda.workloads[0].id, '2')
|
||||
assert_equal(agenda.workloads[3].id, '3')
|
||||
|
||||
def test_sections(self):
|
||||
agenda = Agenda(sectioned_agenda)
|
||||
assert_equal(agenda.sections[0].workloads[0].workload_name, 'antutu')
|
||||
assert_equal(agenda.sections[1].runtime_parameters['dp'], 'three')
|
||||
|
||||
@raises(ConfigError)
|
||||
def test_dup_sections(self):
|
||||
Agenda(dup_sectioned_agenda)
|
||||
|
||||
@raises(ConfigError)
|
||||
@raises(SerializerSyntaxError)
|
||||
def test_bad_syntax(self):
|
||||
Agenda(bad_syntax_agenda)
|
||||
|
292
wlauto/tests/test_configuration.py
Normal file
292
wlauto/tests/test_configuration.py
Normal file
@ -0,0 +1,292 @@
|
||||
# pylint: disable=R0201
|
||||
|
||||
from unittest import TestCase
|
||||
|
||||
from nose.tools import assert_equal, assert_is
|
||||
from mock.mock import MagicMock, Mock
|
||||
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.core.configuration.tree import Node
|
||||
from wlauto.core.configuration.configuration import (ConfigurationPoint, Configuration,
|
||||
JobsConfiguration)
|
||||
|
||||
# A1
|
||||
# / \
|
||||
# B1 B2
|
||||
# / \ / \
|
||||
# C1 C2 C3 C4
|
||||
# \
|
||||
# D1
|
||||
a1 = Node("A1")
|
||||
b1 = a1.add_section("B1")
|
||||
b2 = a1.add_section("B2")
|
||||
c1 = b1.add_section("C1")
|
||||
c2 = b1.add_section("C2")
|
||||
c3 = b2.add_section("C3")
|
||||
c4 = b2.add_section("C4")
|
||||
d1 = c2.add_section("D1")
|
||||
|
||||
|
||||
class NodeTest(TestCase):
|
||||
|
||||
def test_node(self):
|
||||
node = Node(1)
|
||||
assert_equal(node.config, 1)
|
||||
assert_is(node.parent, None)
|
||||
assert_equal(node.workloads, [])
|
||||
assert_equal(node.children, [])
|
||||
|
||||
def test_add_workload(self):
|
||||
node = Node(1)
|
||||
node.add_workload(2)
|
||||
assert_equal(node.workloads, [2])
|
||||
|
||||
def test_add_section(self):
|
||||
node = Node(1)
|
||||
new_node = node.add_section(2)
|
||||
assert_equal(len(node.children), 1)
|
||||
assert_is(node.children[0], new_node)
|
||||
assert_is(new_node.parent, node)
|
||||
assert_equal(node.is_leaf, False)
|
||||
assert_equal(new_node.is_leaf, True)
|
||||
|
||||
def test_descendants(self):
|
||||
for got, expected in zip(b1.descendants(), [c1, d1, c2]):
|
||||
print "GOT:{} EXPECTED:{}".format(got.config, expected.config)
|
||||
assert_is(got, expected)
|
||||
print "----"
|
||||
for got, expected in zip(a1.descendants(), [c1, d1, c2, b1, c3, c4, b2]):
|
||||
print "GOT:{} EXPECTED:{}".format(got.config, expected.config)
|
||||
assert_is(got, expected)
|
||||
|
||||
def test_ancestors(self):
|
||||
for got, expected in zip(d1.ancestors(), [c2, b1, a1]):
|
||||
print "GOT:{} EXPECTED:{}".format(got.config, expected.config)
|
||||
assert_is(got, expected)
|
||||
for _ in a1.ancestors():
|
||||
raise Exception("A1 is the root, it shouldn't have ancestors")
|
||||
|
||||
def test_leaves(self):
|
||||
for got, expected in zip(a1.leaves(), [c1, d1, c3, c4]):
|
||||
print "GOT:{} EXPECTED:{}".format(got.config, expected.config)
|
||||
assert_is(got, expected)
|
||||
print "----"
|
||||
for got, expected in zip(d1.leaves(), [d1]):
|
||||
print "GOT:{} EXPECTED:{}".format(got.config, expected.config)
|
||||
assert_is(got, expected)
|
||||
|
||||
|
||||
class ConfigurationPointTest(TestCase):
|
||||
|
||||
def test_match(self):
|
||||
cp1 = ConfigurationPoint("test1", aliases=["foo", "bar"])
|
||||
cp2 = ConfigurationPoint("test2", aliases=["fizz", "buzz"])
|
||||
|
||||
assert_equal(cp1.match("test1"), True)
|
||||
assert_equal(cp1.match("foo"), True)
|
||||
assert_equal(cp1.match("bar"), True)
|
||||
assert_equal(cp1.match("fizz"), False)
|
||||
assert_equal(cp1.match("NOT VALID"), False)
|
||||
|
||||
assert_equal(cp2.match("test2"), True)
|
||||
assert_equal(cp2.match("fizz"), True)
|
||||
assert_equal(cp2.match("buzz"), True)
|
||||
assert_equal(cp2.match("foo"), False)
|
||||
assert_equal(cp2.match("NOT VALID"), False)
|
||||
|
||||
def test_set_value(self):
|
||||
cp1 = ConfigurationPoint("test", default="hello")
|
||||
cp2 = ConfigurationPoint("test", mandatory=True)
|
||||
cp3 = ConfigurationPoint("test", mandatory=True, default="Hello")
|
||||
cp4 = ConfigurationPoint("test", default=["hello"], merge=True, kind=list)
|
||||
cp5 = ConfigurationPoint("test", kind=int)
|
||||
cp6 = ConfigurationPoint("test5", kind=list, allowed_values=[1, 2, 3, 4, 5])
|
||||
|
||||
mock = Mock()
|
||||
mock.name = "ConfigurationPoint Unit Test"
|
||||
|
||||
# Testing defaults and basic functionality
|
||||
cp1.set_value(mock)
|
||||
assert_equal(mock.test, "hello")
|
||||
cp1.set_value(mock, value="there")
|
||||
assert_equal(mock.test, "there")
|
||||
|
||||
# Testing mandatory flag
|
||||
err_msg = 'No values specified for mandatory parameter "test" in ' \
|
||||
'ConfigurationPoint Unit Test'
|
||||
with self.assertRaisesRegexp(ConfigError, err_msg):
|
||||
cp2.set_value(mock)
|
||||
cp3.set_value(mock) # Should ignore mandatory
|
||||
assert_equal(mock.test, "Hello")
|
||||
|
||||
# Testing Merging - not in depth that is done in the unit test for merge_config
|
||||
cp4.set_value(mock, value=["there"])
|
||||
assert_equal(mock.test, ["Hello", "there"])
|
||||
|
||||
# Testing type conversion
|
||||
cp5.set_value(mock, value="100")
|
||||
assert_equal(isinstance(mock.test, int), True)
|
||||
msg = 'Bad value "abc" for test; must be an integer'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
cp5.set_value(mock, value="abc")
|
||||
|
||||
# Testing that validation is not called when no value is set
|
||||
# if it is it will error because it cannot iterate over None
|
||||
cp6.set_value(mock)
|
||||
|
||||
def test_validation(self):
|
||||
def is_even(value):
|
||||
if value % 2:
|
||||
return False
|
||||
return True
|
||||
|
||||
cp1 = ConfigurationPoint("test", kind=int, allowed_values=[1, 2, 3, 4, 5])
|
||||
cp2 = ConfigurationPoint("test", kind=list, allowed_values=[1, 2, 3, 4, 5])
|
||||
cp3 = ConfigurationPoint("test", kind=int, constraint=is_even)
|
||||
cp4 = ConfigurationPoint("test", kind=list, mandatory=True, allowed_values=[1, 99])
|
||||
mock = MagicMock()
|
||||
mock.name = "ConfigurationPoint Validation Unit Test"
|
||||
|
||||
# Test allowed values
|
||||
cp1.validate_value(mock.name, 1)
|
||||
with self.assertRaises(ConfigError):
|
||||
cp1.validate_value(mock.name, 100)
|
||||
with self.assertRaises(ConfigError):
|
||||
cp1.validate_value(mock.name, [1, 2, 3])
|
||||
|
||||
# Test allowed values for lists
|
||||
cp2.validate_value(mock.name, [1, 2, 3])
|
||||
with self.assertRaises(ConfigError):
|
||||
cp2.validate_value(mock.name, [1, 2, 100])
|
||||
|
||||
# Test constraints
|
||||
cp3.validate_value(mock.name, 2)
|
||||
cp3.validate_value(mock.name, 4)
|
||||
cp3.validate_value(mock.name, 6)
|
||||
msg = '"3" failed constraint validation for "test" in "ConfigurationPoint' \
|
||||
' Validation Unit Test".'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
cp3.validate_value(mock.name, 3)
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
ConfigurationPoint("test", constraint=100)
|
||||
|
||||
# Test "validate" methods
|
||||
mock.test = None
|
||||
# Mandatory config point not set
|
||||
with self.assertRaises(ConfigError):
|
||||
cp4.validate(mock)
|
||||
cp1.validate(mock) # cp1 doesnt have mandatory set
|
||||
cp4.set_value(mock, value=[99])
|
||||
cp4.validate(mock)
|
||||
|
||||
def test_get_type_name(self):
|
||||
def dummy():
|
||||
pass
|
||||
types = [str, list, int, dummy]
|
||||
names = ["str", "list", "integer", "dummy"]
|
||||
for kind, name in zip(types, names):
|
||||
cp = ConfigurationPoint("test", kind=kind)
|
||||
assert_equal(cp.get_type_name(), name)
|
||||
|
||||
|
||||
# Subclass just to add some config points to use in testing
|
||||
class TestConfiguration(Configuration):
|
||||
name = "Test Config"
|
||||
__configuration = [
|
||||
ConfigurationPoint("test1", default="hello"),
|
||||
ConfigurationPoint("test2", mandatory=True),
|
||||
ConfigurationPoint("test3", default=["hello"], merge=True, kind=list),
|
||||
ConfigurationPoint("test4", kind=int, default=123),
|
||||
ConfigurationPoint("test5", kind=list, allowed_values=[1, 2, 3, 4, 5]),
|
||||
]
|
||||
configuration = {cp.name: cp for cp in __configuration}
|
||||
|
||||
|
||||
class ConfigurationTest(TestCase):
|
||||
|
||||
def test(self):
|
||||
# Test loading defaults
|
||||
cfg = TestConfiguration()
|
||||
expected = {
|
||||
"test1": "hello",
|
||||
"test2": None,
|
||||
"test3": ["hello"],
|
||||
"test4": 123,
|
||||
"test5": None,
|
||||
}
|
||||
# If a cfg point is not set an attribute with value None should still be created
|
||||
for name, value in expected.iteritems():
|
||||
assert_equal(getattr(cfg, name), value)
|
||||
|
||||
# Testing pre finalization "set"
|
||||
cfg.set("test1", "there")
|
||||
assert_equal(cfg.test1, "there") # pylint: disable=E1101
|
||||
with self.assertRaisesRegexp(ConfigError, 'Unknown Test Config configuration "nope"'):
|
||||
cfg.set("nope", 123)
|
||||
|
||||
# Testing setting values from a dict
|
||||
new_values = {
|
||||
"test1": "This",
|
||||
"test2": "is",
|
||||
"test3": ["a"],
|
||||
"test4": 7357,
|
||||
"test5": [5],
|
||||
}
|
||||
cfg.update_config(new_values)
|
||||
new_values["test3"] = ["hello", "a"] # This cfg point has merge == True
|
||||
for k, v in new_values.iteritems():
|
||||
assert_equal(getattr(cfg, k), v)
|
||||
|
||||
# Test finalization
|
||||
|
||||
# This is a madatory cfg point so finalization should fail
|
||||
cfg.configuration["test2"].set_value(cfg, value=None, check_mandatory=False)
|
||||
msg = 'No value specified for mandatory parameter "test2" in Test Config'
|
||||
with self.assertRaisesRegexp(ConfigError, msg):
|
||||
cfg.finalize()
|
||||
assert_equal(cfg._finalized, False) # pylint: disable=W0212
|
||||
|
||||
# Valid finalization
|
||||
cfg.set("test2", "is")
|
||||
cfg.finalize()
|
||||
assert_equal(cfg._finalized, True) # pylint: disable=W0212
|
||||
|
||||
# post finalization set should failed
|
||||
with self.assertRaises(RuntimeError):
|
||||
cfg.set("test2", "is")
|
||||
|
||||
|
||||
class JobsConfigurationTest(TestCase):
|
||||
|
||||
def test_set_global_config(self):
|
||||
jc = JobsConfiguration()
|
||||
|
||||
jc.set_global_config("workload_name", "test")
|
||||
assert_equal(jc.root_node.config.workload_name, "test")
|
||||
# Aliased names (e.g. "name") should be resolved by the parser
|
||||
# before being passed here.
|
||||
|
||||
with self.assertRaises(ConfigError):
|
||||
jc.set_global_config("unknown", "test")
|
||||
|
||||
jc.finalise_global_config()
|
||||
with self.assertRaises(RuntimeError):
|
||||
jc.set_global_config("workload_name", "test")
|
||||
|
||||
def test_tree_manipulation(self):
|
||||
jc = JobsConfiguration()
|
||||
|
||||
workloads = [123, "hello", True]
|
||||
for w in workloads:
|
||||
jc.add_workload(w)
|
||||
assert_equal(jc.root_node.workloads, workloads)
|
||||
|
||||
jc.add_section("section", workloads)
|
||||
assert_equal(jc.root_node.children[0].config, "section")
|
||||
assert_equal(jc.root_node.workloads, workloads)
|
||||
|
||||
def test_generate_job_specs(self):
|
||||
|
||||
# disable_instruments
|
||||
# only_run_ids
|
231
wlauto/tests/test_parsers.py
Normal file
231
wlauto/tests/test_parsers.py
Normal file
@ -0,0 +1,231 @@
|
||||
import os
|
||||
from unittest import TestCase
|
||||
|
||||
from nose.tools import assert_equal # pylint: disable=E0611
|
||||
from mock.mock import Mock, MagicMock, call
|
||||
|
||||
from wlauto.exceptions import ConfigError
|
||||
from wlauto.core.configuration.parsers import (get_aliased_param,
|
||||
_load_file, ConfigParser, EnvironmentVarsParser,
|
||||
CommandLineArgsParser)
|
||||
from wlauto.core.configuration import (WAConfiguration, RunConfiguration, JobsConfiguration,
|
||||
PluginCache)
|
||||
from wlauto.utils.types import toggle_set
|
||||
|
||||
|
||||
class TestFunctions(TestCase):
|
||||
|
||||
def test_load_file(self):
|
||||
# This does not test read_pod
|
||||
|
||||
# Non-existant file
|
||||
with self.assertRaises(ValueError):
|
||||
_load_file("THIS-IS-NOT-A-FILE", "test file")
|
||||
base_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
# Top level entry not a dict
|
||||
with self.assertRaisesRegexp(ConfigError, r".+ does not contain a valid test file structure; top level must be a dict\."):
|
||||
_load_file(os.path.join(base_path, "data", "test-agenda-not-dict.yaml"), "test file")
|
||||
|
||||
# Yaml syntax error
|
||||
with self.assertRaisesRegexp(ConfigError, r"Error parsing test file .+: Syntax Error on line 1"):
|
||||
_load_file(os.path.join(base_path, "data", "test-agenda-bad-syntax.yaml"), "test file")
|
||||
|
||||
# Ideal case
|
||||
_load_file(os.path.join(base_path, "data", "test-agenda.yaml"), "test file")
|
||||
|
||||
def test_get_aliased_param(self):
|
||||
# Ideal case
|
||||
d_correct = {"workload_parameters": [1, 2, 3],
|
||||
"instruments": [2, 3, 4],
|
||||
"some_other_param": 1234}
|
||||
assert_equal(get_aliased_param(d_correct, [
|
||||
'workload_parameters',
|
||||
'workload_params',
|
||||
'params'
|
||||
], default=[], pop=False), [1, 2, 3])
|
||||
|
||||
# Two aliases for the same parameter given
|
||||
d_duplicate = {"workload_parameters": [1, 2, 3],
|
||||
"workload_params": [2, 3, 4]}
|
||||
with self.assertRaises(ConfigError):
|
||||
get_aliased_param(d_duplicate, [
|
||||
'workload_parameters',
|
||||
'workload_params',
|
||||
'params'
|
||||
], default=[])
|
||||
|
||||
# Empty dict
|
||||
d_none = {}
|
||||
assert_equal(get_aliased_param(d_none, [
|
||||
'workload_parameters',
|
||||
'workload_params',
|
||||
'params'
|
||||
], default=[]), [])
|
||||
|
||||
# Aliased parameter not present in dict
|
||||
d_not_present = {"instruments": [2, 3, 4],
|
||||
"some_other_param": 1234}
|
||||
assert_equal(get_aliased_param(d_not_present, [
|
||||
'workload_parameters',
|
||||
'workload_params',
|
||||
'params'
|
||||
], default=1), 1)
|
||||
|
||||
# Testing pop functionality
|
||||
assert_equal("workload_parameters" in d_correct, True)
|
||||
get_aliased_param(d_correct, [
|
||||
'workload_parameters',
|
||||
'workload_params',
|
||||
'params'
|
||||
], default=[])
|
||||
assert_equal("workload_parameters" in d_correct, False)
|
||||
|
||||
|
||||
class TestConfigParser(TestCase):
|
||||
|
||||
def test_error_cases(self):
|
||||
wa_config = Mock(spec=WAConfiguration)
|
||||
wa_config.configuration = WAConfiguration.configuration
|
||||
run_config = Mock(spec=RunConfiguration)
|
||||
run_config.configuration = RunConfiguration.configuration
|
||||
config_parser = ConfigParser(wa_config,
|
||||
run_config,
|
||||
Mock(spec=JobsConfiguration),
|
||||
Mock(spec=PluginCache))
|
||||
|
||||
# "run_name" can only be in agenda config sections
|
||||
#' and is handled by AgendaParser
|
||||
err = 'Error in "Unit test":\n' \
|
||||
'"run_name" can only be specified in the config section of an agenda'
|
||||
with self.assertRaisesRegexp(ConfigError, err):
|
||||
config_parser.load({"run_name": "test"}, "Unit test")
|
||||
|
||||
# Instrument and result_processor lists in the same config cannot
|
||||
# have conflicting entries.
|
||||
err = 'Error in "Unit test":\n' \
|
||||
'"instrumentation" and "result_processors" have conflicting entries:'
|
||||
with self.assertRaisesRegexp(ConfigError, err):
|
||||
config_parser.load({"instruments": ["one", "two", "three"],
|
||||
"result_processors": ["~one", "~two", "~three"]},
|
||||
"Unit test")
|
||||
|
||||
def test_config_points(self):
|
||||
wa_config = Mock(spec=WAConfiguration)
|
||||
wa_config.configuration = WAConfiguration.configuration
|
||||
|
||||
run_config = Mock(spec=RunConfiguration)
|
||||
run_config.configuration = RunConfiguration.configuration
|
||||
|
||||
jobs_config = Mock(spec=JobsConfiguration)
|
||||
plugin_cache = Mock(spec=PluginCache)
|
||||
config_parser = ConfigParser(wa_config, run_config, jobs_config, plugin_cache)
|
||||
|
||||
cfg = {
|
||||
"assets_repository": "/somewhere/",
|
||||
"logging": "verbose",
|
||||
"project": "some project",
|
||||
"project_stage": "stage 1",
|
||||
"iterations": 9001,
|
||||
"workload_name": "name"
|
||||
}
|
||||
config_parser.load(cfg, "Unit test")
|
||||
wa_config.set.assert_has_calls([
|
||||
call("assets_repository", "/somewhere/"),
|
||||
call("logging", "verbose")
|
||||
], any_order=True)
|
||||
run_config.set.assert_has_calls([
|
||||
call("project", "some project"),
|
||||
call("project_stage", "stage 1")
|
||||
], any_order=True)
|
||||
print jobs_config.set_global_config.call_args_list
|
||||
jobs_config.set_global_config.assert_has_calls([
|
||||
call("iterations", 9001),
|
||||
call("workload_name", "name"),
|
||||
call("instrumentation", toggle_set()),
|
||||
call("instrumentation", toggle_set())
|
||||
], any_order=True)
|
||||
|
||||
# Test setting global instruments including a non-conflicting duplicate ("two")
|
||||
jobs_config.reset_mock()
|
||||
instruments_and_result_processors = {
|
||||
"instruments": ["one", "two"],
|
||||
"result_processors": ["two", "three"]
|
||||
}
|
||||
config_parser.load(instruments_and_result_processors, "Unit test")
|
||||
jobs_config.set_global_config.assert_has_calls([
|
||||
call("instrumentation", toggle_set(["one", "two"])),
|
||||
call("instrumentation", toggle_set(["two", "three"]))
|
||||
], any_order=True)
|
||||
|
||||
# Testing a empty config
|
||||
jobs_config.reset_mock()
|
||||
config_parser.load({}, "Unit test")
|
||||
jobs_config.set_global_config.assert_has_calls([], any_order=True)
|
||||
wa_config.set.assert_has_calls([], any_order=True)
|
||||
run_config.set.assert_has_calls([], any_order=True)
|
||||
|
||||
|
||||
class TestEnvironmentVarsParser(TestCase):
|
||||
|
||||
def test_environmentvarsparser(self):
|
||||
wa_config = Mock(spec=WAConfiguration)
|
||||
calls = [call('user_directory', '/testdir'),
|
||||
call('plugin_paths', ['/test', '/some/other/path', '/testy/mc/test/face'])]
|
||||
|
||||
# Valid env vars
|
||||
valid_environ = {"WA_USER_DIRECTORY": "/testdir",
|
||||
"WA_PLUGIN_PATHS": "/test:/some/other/path:/testy/mc/test/face"}
|
||||
EnvironmentVarsParser(wa_config, valid_environ)
|
||||
wa_config.set.assert_has_calls(calls)
|
||||
|
||||
# Alternative env var name
|
||||
wa_config.reset_mock()
|
||||
alt_valid_environ = {"WA_USER_DIRECTORY": "/testdir",
|
||||
"WA_EXTENSION_PATHS": "/test:/some/other/path:/testy/mc/test/face"}
|
||||
EnvironmentVarsParser(wa_config, alt_valid_environ)
|
||||
wa_config.set.assert_has_calls(calls)
|
||||
|
||||
# Test that WA_EXTENSION_PATHS gets merged with WA_PLUGIN_PATHS.
|
||||
# Also checks that other enviroment variables don't cause errors
|
||||
wa_config.reset_mock()
|
||||
calls = [call('user_directory', '/testdir'),
|
||||
call('plugin_paths', ['/test', '/some/other/path']),
|
||||
call('plugin_paths', ['/testy/mc/test/face'])]
|
||||
ext_and_plgin = {"WA_USER_DIRECTORY": "/testdir",
|
||||
"WA_PLUGIN_PATHS": "/test:/some/other/path",
|
||||
"WA_EXTENSION_PATHS": "/testy/mc/test/face",
|
||||
"RANDOM_VAR": "random_value"}
|
||||
EnvironmentVarsParser(wa_config, ext_and_plgin)
|
||||
# If any_order=True then the calls can be in any order, but they must all appear
|
||||
wa_config.set.assert_has_calls(calls, any_order=True)
|
||||
|
||||
# No WA enviroment variables present
|
||||
wa_config.reset_mock()
|
||||
EnvironmentVarsParser(wa_config, {"RANDOM_VAR": "random_value"})
|
||||
wa_config.set.assert_not_called()
|
||||
|
||||
|
||||
class TestCommandLineArgsParser(TestCase):
|
||||
wa_config = Mock(spec=WAConfiguration)
|
||||
run_config = Mock(spec=RunConfiguration)
|
||||
jobs_config = Mock(spec=JobsConfiguration)
|
||||
|
||||
cmd_args = MagicMock(
|
||||
verbosity=1,
|
||||
output_directory="my_results",
|
||||
instruments_to_disable=["abc", "def", "ghi"],
|
||||
only_run_ids=["wk1", "s1_wk4"],
|
||||
some_other_setting="value123"
|
||||
)
|
||||
CommandLineArgsParser(cmd_args, wa_config, run_config, jobs_config)
|
||||
wa_config.set.assert_has_calls([call("verbosity", 1)], any_order=True)
|
||||
run_config.set.assert_has_calls([call("output_directory", "my_results")], any_order=True)
|
||||
jobs_config.disable_instruments.assert_has_calls([
|
||||
call(toggle_set(["~abc", "~def", "~ghi"]))
|
||||
], any_order=True)
|
||||
jobs_config.only_run_ids.assert_has_calls([call(["wk1", "s1_wk4"])], any_order=True)
|
||||
|
||||
|
||||
class TestAgendaParser(TestCase):
|
||||
pass
|
@ -21,7 +21,7 @@ from nose.tools import raises, assert_equal, assert_not_equal # pylint: disable
|
||||
|
||||
from wlauto.utils.android import check_output
|
||||
from wlauto.utils.misc import merge_dicts, merge_lists, TimeoutError
|
||||
from wlauto.utils.types import list_or_integer, list_or_bool, caseless_string, arguments, enable_disable_list
|
||||
from wlauto.utils.types import list_or_integer, list_or_bool, caseless_string, arguments, toggle_set
|
||||
|
||||
|
||||
class TestCheckOutput(TestCase):
|
||||
@ -89,10 +89,10 @@ class TestTypes(TestCase):
|
||||
['--foo', '7', '--bar', 'fizz buzz'])
|
||||
assert_equal(arguments(['test', 42]), ['test', '42'])
|
||||
|
||||
def enable_disable_list_test():
|
||||
def toggle_set_test():
|
||||
|
||||
a = enable_disable_list(['qaz', 'qwert', 'asd', '~fgh', '~seb'])
|
||||
b = enable_disable_list(['qaz', 'xyz', '~asd', 'fgh', '~seb'])
|
||||
a = toggle_set(['qaz', 'qwert', 'asd', '~fgh', '~seb'])
|
||||
b = toggle_set(['qaz', 'xyz', '~asd', 'fgh', '~seb'])
|
||||
|
||||
a_into_b = ['qaz', 'xyz', '~seb', 'qwert', 'asd', '~fgh']
|
||||
assert_equal(a.merge_into(b), a_into_b)
|
||||
@ -104,3 +104,6 @@ class TestTypes(TestCase):
|
||||
|
||||
assert_equal(a.values(), ['qaz', 'qwert', 'asd'])
|
||||
assert_equal(b.merge_with(a).values(), ['qaz', 'xyz', 'qwert', 'asd'])
|
||||
|
||||
assert_equal(a.values(), ['qaz', 'qwert', 'asd'])
|
||||
assert_equal(a.conflicts_with(b), ['~asd', '~fgh'])
|
||||
|
Loading…
x
Reference in New Issue
Block a user