From dc6d9676f2ed173867c0e9e565bf0cadcefd9114 Mon Sep 17 00:00:00 2001
From: Sergei Trofimov <sergei.trofimov@arm.com>
Date: Thu, 9 Feb 2017 09:09:00 +0000
Subject: [PATCH 1/8] Fixing things up to a point where "list" and "show"
 commands  work.

---
 setup.py                                   |   2 +-
 wlauto/commands/run.py                     |   6 +-
 wlauto/core/command.py                     |   2 +-
 wlauto/core/configuration/__init__.py      |   1 -
 wlauto/core/configuration/configuration.py | 124 +++++++++------------
 wlauto/core/configuration/parsers.py       |  14 +--
 wlauto/core/entry_point.py                 |  10 +-
 wlauto/core/host.py                        |  33 ++++++
 wlauto/core/plugin.py                      |   8 +-
 wlauto/core/pluginloader.py                |   3 +-
 wlauto/exceptions.py                       |   4 +-
 wlauto/tests/test_parsers.py               |  45 +-------
 wlauto/utils/misc.py                       |   2 +-
 wlauto/utils/serializer.py                 |   9 +-
 wlauto/utils/types.py                      |   1 +
 15 files changed, 115 insertions(+), 149 deletions(-)
 create mode 100644 wlauto/core/host.py

diff --git a/setup.py b/setup.py
index 31221afe..99152713 100644
--- a/setup.py
+++ b/setup.py
@@ -78,7 +78,7 @@ params = dict(
         'pyYAML',  # YAML-formatted agenda parsing
         'requests',  # Fetch assets over HTTP
         'devlib',  # Interacting with devices
-        'louie'  # Handles signal callbacks
+        'louie'  # callbacks dispatch
     ],
     extras_require={
         'other': ['jinja2', 'pandas>=0.13.1'],
diff --git a/wlauto/commands/run.py b/wlauto/commands/run.py
index afea7a10..197a2fe9 100644
--- a/wlauto/commands/run.py
+++ b/wlauto/commands/run.py
@@ -22,9 +22,9 @@ import wlauto
 from wlauto import Command, settings
 from wlauto.core.execution import Executor
 from wlauto.utils.log import add_log_file
-from wlauto.core.configuration import RunConfiguration, WAConfiguration
+from wlauto.core.configuration import RunConfiguration
 from wlauto.core import pluginloader
-from wlauto.core.configuration_parsers import Agenda, ConfigFile, EnvrironmentVars, CommandLineArgs
+from wlauto.core.configuration.parsers import AgendaParser, ConfigParser, CommandLineArgsParser
 
 
 class RunCommand(Command):
@@ -74,7 +74,7 @@ class RunCommand(Command):
 
         # STAGE 1: Gather configuratation
 
-        env = EnvrironmentVars()
+        env = EnvironmentVars()
         args = CommandLineArgs(args)
 
         # STAGE 2.1a: Early WAConfiguration, required to find config files
diff --git a/wlauto/core/command.py b/wlauto/core/command.py
index bad6b615..47f00c2e 100644
--- a/wlauto/core/command.py
+++ b/wlauto/core/command.py
@@ -21,7 +21,7 @@ from wlauto.core.version import get_wa_version
 
 
 def init_argument_parser(parser):
-    parser.add_argument('-c', '--config', help='specify an additional config.py', action='append')
+    parser.add_argument('-c', '--config', help='specify an additional config.py', action='append', default=[])
     parser.add_argument('-v', '--verbose', action='count',
                         help='The scripts will produce verbose output.')
     parser.add_argument('--version', action='version', version='%(prog)s {}'.format(get_wa_version()))
diff --git a/wlauto/core/configuration/__init__.py b/wlauto/core/configuration/__init__.py
index 87c7c08c..a3593794 100644
--- a/wlauto/core/configuration/__init__.py
+++ b/wlauto/core/configuration/__init__.py
@@ -13,7 +13,6 @@
 # limitations under the License.
 #
 from wlauto.core.configuration.configuration import (settings,
-                                                     WAConfiguration,
                                                      RunConfiguration,
                                                      JobGenerator,
                                                      ConfigurationPoint)
diff --git a/wlauto/core/configuration/configuration.py b/wlauto/core/configuration/configuration.py
index d42deef6..837a8191 100644
--- a/wlauto/core/configuration/configuration.py
+++ b/wlauto/core/configuration/configuration.py
@@ -492,36 +492,11 @@ class CpuFreqParameters(object):
 class Configuration(object):
 
     config_points = []
-    name = ""
+    name = ''
+
     # The below line must be added to all subclasses
     configuration = {cp.name: cp for cp in config_points}
 
-    def __init__(self):
-        # Load default values for configuration points
-        for confpoint in self.configuration.itervalues():
-            confpoint.set_value(self, check_mandatory=False)
-
-    def set(self, name, value, check_mandatory=True):
-        if name not in self.configuration:
-            raise ConfigError('Unknown {} configuration "{}"'.format(self.name, name))
-        self.configuration[name].set_value(self, value, check_mandatory=check_mandatory)
-
-    def update_config(self, values, check_mandatory=True):
-        for k, v in values.iteritems():
-            self.set(k, v, check_mandatory=check_mandatory)
-
-    def validate(self):
-        for cfg_point in self.configuration.itervalues():
-            cfg_point.validate(self)
-
-    def to_pod(self):
-        pod = {}
-        for cfg_point_name in self.configuration.iterkeys():
-            value = getattr(self, cfg_point_name, None)
-            if value is not None:
-                pod[cfg_point_name] = value
-        return pod
-
     @classmethod
     # pylint: disable=unused-argument
     def from_pod(cls, pod, plugin_cache):
@@ -535,11 +510,46 @@ class Configuration(object):
         instance.validate()
         return instance
 
+    def __init__(self):
+        for confpoint in self.config_points:
+            confpoint.set_value(self, check_mandatory=False)
+
+    def set(self, name, value, check_mandatory=True):
+        if name not in self.configuration:
+            raise ConfigError('Unknown {} configuration "{}"'.format(self.name, name))
+        self.configuration[name].set_value(self, value, check_mandatory=check_mandatory)
+
+    def update_config(self, values, check_mandatory=True):
+        for k, v in values.iteritems():
+            self.set(k, v, check_mandatory=check_mandatory)
+
+    def validate(self):
+        for cfg_point in self.config_points:
+            cfg_point.validate(self)
+
+    def to_pod(self):
+        pod = {}
+        for cfg_point_name in self.configuration.iterkeys():
+            value = getattr(self, cfg_point_name, None)
+            if value is not None:
+                pod[cfg_point_name] = value
+        return pod
+
 
 # This configuration for the core WA framework
 class WAConfiguration(Configuration):
 
     name = "WA Configuration"
+
+    plugin_packages = [
+        'wlauto.commands',
+        'wlauto.workloads',
+        'wlauto.instrumentation',
+        'wlauto.result_processors',
+        'wlauto.managers',
+        'wlauto.resource_getters',
+    ]
+
     config_points = [
         ConfigurationPoint(
             'user_directory',
@@ -550,48 +560,6 @@ class WAConfiguration(Configuration):
             kind=str,
             default=os.path.join(os.path.expanduser('~'), '.workload_automation'),
         ),
-        ConfigurationPoint(
-            'plugin_packages',
-            kind=list_of_strings,
-            default=[
-                'wlauto.commands',
-                'wlauto.workloads',
-                'wlauto.instrumentation',
-                'wlauto.result_processors',
-                'wlauto.managers',
-                'wlauto.resource_getters',
-            ],
-            description="""
-            List of packages that will be scanned for WA plugins.
-            """,
-        ),
-        ConfigurationPoint(
-            'plugin_paths',
-            kind=list_of_strings,
-            default=[
-                'workloads',
-                'instruments',
-                'targets',
-                'processors',
-
-                # Legacy
-                'managers',
-                'result_processors',
-            ],
-            description="""
-            List of paths that will be scanned for WA plugins.
-            """,
-            merge=True
-        ),
-        ConfigurationPoint(
-            'plugin_ignore_paths',
-            kind=list_of_strings,
-            default=[],
-            description="""
-            List of (sub)paths that will be ignored when scanning
-            ``plugin_paths`` for WA plugins.
-            """,
-        ),
         ConfigurationPoint(
             'assets_repository',
             description="""
@@ -623,7 +591,7 @@ class WAConfiguration(Configuration):
             Verbosity of console output.
             """,
         ),
-        ConfigurationPoint(  # TODO: Needs some format for dates ect/ comes from cfg
+        ConfigurationPoint(  # TODO: Needs some format for dates etc/ comes from cfg
             'default_output_directory',
             default="wa_output",
             description="""
@@ -636,7 +604,19 @@ class WAConfiguration(Configuration):
 
     @property
     def dependencies_directory(self):
-        return "{}/dependencies/".format(self.user_directory)
+        return os.path.join(self.user_directory, 'dependencies')
+
+    @property
+    def plugins_directory(self):
+        return os.path.join(self.user_directory, 'plugins')
+
+
+    def __init__(self, environ):
+        super(WAConfiguration, self).__init__()
+        user_directory = environ.pop('WA_USER_DIRECTORY', '')
+        if user_directory:
+            self.set('user_directory', user_directory)
+
 
 
 # This is generic top-level configuration for WA runs.
@@ -1029,4 +1009,4 @@ class JobGenerator(object):
 
                 yield job_spec
 
-settings = WAConfiguration()
+settings = WAConfiguration(os.environ)
diff --git a/wlauto/core/configuration/parsers.py b/wlauto/core/configuration/parsers.py
index aa1dccda..6c5af0d9 100644
--- a/wlauto/core/configuration/parsers.py
+++ b/wlauto/core/configuration/parsers.py
@@ -283,23 +283,11 @@ class AgendaParser(object):
             raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
 
 
-class EnvironmentVarsParser(object):
-    def __init__(self, wa_config, environ):
-        user_directory = environ.pop('WA_USER_DIRECTORY', '')
-        if user_directory:
-            wa_config.set('user_directory', user_directory)
-        plugin_paths = environ.pop('WA_PLUGIN_PATHS', '')
-        if plugin_paths:
-            wa_config.set('plugin_paths', plugin_paths.split(os.pathsep))
-        ext_paths = environ.pop('WA_EXTENSION_PATHS', '')
-        if ext_paths:
-            wa_config.set('plugin_paths', ext_paths.split(os.pathsep))
-
-
 # Command line options are parsed in the "run" command. This is used to send
 # certain arguments to the correct configuration points and keep a record of
 # how WA was invoked
 class CommandLineArgsParser(object):
+
     def __init__(self, cmd_args, wa_config, jobs_config):
         wa_config.set("verbosity", cmd_args.verbosity)
         # TODO: Is this correct? Does there need to be a third output dir param
diff --git a/wlauto/core/entry_point.py b/wlauto/core/entry_point.py
index 09cbfece..8855a55d 100644
--- a/wlauto/core/entry_point.py
+++ b/wlauto/core/entry_point.py
@@ -24,6 +24,7 @@ import warnings
 from wlauto.core.configuration import settings
 from wlauto.core import pluginloader
 from wlauto.core.command import init_argument_parser
+from wlauto.core.host import init_user_directory
 from wlauto.exceptions import WAError, ConfigError
 from wlauto.utils.misc import get_traceback
 from wlauto.utils.log import init_logging
@@ -45,7 +46,11 @@ def load_commands(subparsers):
 
 
 def main():
+    if not os.path.exists(settings.user_directory):
+        init_user_directory()
+
     try:
+
         description = ("Execute automated workloads on a remote device and process "
                        "the resulting output.\n\nUse \"wa <subcommand> -h\" to see "
                        "help for individual subcommands.")
@@ -57,10 +62,7 @@ def main():
         commands = load_commands(parser.add_subparsers(dest='command'))  # each command will add its own subparser
         args = parser.parse_args()
 
-        #TODO: Set this stuff properly, i.e dont use settings (if possible)
-        #settings.set("verbosity", args.verbose)
-        #settings.load_user_config()
-        #settings.debug = args.debug
+        settings.set("verbosity", args.verbose)
 
         for config in args.config:
             if not os.path.exists(config):
diff --git a/wlauto/core/host.py b/wlauto/core/host.py
new file mode 100644
index 00000000..33810b93
--- /dev/null
+++ b/wlauto/core/host.py
@@ -0,0 +1,33 @@
+import os
+
+from wlauto.core.configuration import settings
+
+def init_user_directory(overwrite_existing=False):  # pylint: disable=R0914
+    """
+    Initialise a fresh user directory. 
+    """
+    if os.path.exists(settings.user_directory):
+        if not overwrite_existing:
+            raise RuntimeError('Environment {} already exists.'.format(settings.user_directory))
+        shutil.rmtree(settings.user_directory)
+
+    os.makedirs(settings.user_directory)
+    os.makedirs(settings.dependencies_directory)
+    os.makedirs(settings.plugins_directory)
+
+    # TODO: generate default config.yaml here
+
+    if os.getenv('USER') == 'root':
+        # If running with sudo on POSIX, change the ownership to the real user.
+        real_user = os.getenv('SUDO_USER')
+        if real_user:
+            import pwd  # done here as module won't import on win32
+            user_entry = pwd.getpwnam(real_user)
+            uid, gid = user_entry.pw_uid, user_entry.pw_gid
+            os.chown(settings.user_directory, uid, gid)
+            # why, oh why isn't there a recusive=True option for os.chown?
+            for root, dirs, files in os.walk(settings.user_directory):
+                for d in dirs:
+                    os.chown(os.path.join(root, d), uid, gid)
+                for f in files: 
+                    os.chown(os.path.join(root, f), uid, gid)
diff --git a/wlauto/core/plugin.py b/wlauto/core/plugin.py
index 2d737560..f614169b 100644
--- a/wlauto/core/plugin.py
+++ b/wlauto/core/plugin.py
@@ -25,13 +25,14 @@ from collections import OrderedDict, defaultdict
 from itertools import chain
 from copy import copy
 
-from wlauto.exceptions import NotFoundError, LoaderError, ValidationError, ConfigError
+from wlauto.exceptions import NotFoundError, LoaderError, ValidationError, ConfigError, HostError
 from wlauto.utils.misc import (ensure_directory_exists as _d,
                                walk_modules, load_class, merge_dicts_simple, get_article)
 from wlauto.core.configuration import settings
 from wlauto.utils.types import identifier, boolean
 from wlauto.core.configuration.configuration import ConfigurationPoint as Parameter
 
+
 MODNAME_TRANS = string.maketrans(':/\\.', '____')
 
 
@@ -697,10 +698,9 @@ class PluginLoader(object):
             for package in packages:
                 for module in walk_modules(package):
                     self._discover_in_module(module)
-        except ImportError as e:
-            source = getattr(e, 'path', package)
+        except HostError as e:
             message = 'Problem loading plugins from {}: {}'
-            raise LoaderError(message.format(source, e.message))
+            raise LoaderError(message.format(e.module, str(e.orig_exc)))
 
     def _discover_from_paths(self, paths, ignore_paths):
         paths = paths or []
diff --git a/wlauto/core/pluginloader.py b/wlauto/core/pluginloader.py
index 0aa8dd3f..dde6b828 100644
--- a/wlauto/core/pluginloader.py
+++ b/wlauto/core/pluginloader.py
@@ -38,8 +38,7 @@ class __LoaderWrapper(object):
         from wlauto.core.plugin import PluginLoader
         from wlauto.core.configuration import settings
         self._loader = PluginLoader(settings.plugin_packages,
-                                    settings.plugin_paths,
-                                    settings.plugin_ignore_paths)
+                                    [settings.plugins_directory], [])
 
     def update(self, packages=None, paths=None, ignore_paths=None):
         if not self._loader:
diff --git a/wlauto/exceptions.py b/wlauto/exceptions.py
index 67999e57..bd4a0bb6 100644
--- a/wlauto/exceptions.py
+++ b/wlauto/exceptions.py
@@ -14,7 +14,9 @@
 #
 
 
-from wlauto.utils.misc import get_traceback, TimeoutError  # NOQA pylint: disable=W0611
+from wlauto.utils.misc import get_traceback
+
+from devlib.exception import DevlibError, HostError, TargetError, TimeoutError
 
 
 class WAError(Exception):
diff --git a/wlauto/tests/test_parsers.py b/wlauto/tests/test_parsers.py
index 6f9e75eb..763d2c7f 100644
--- a/wlauto/tests/test_parsers.py
+++ b/wlauto/tests/test_parsers.py
@@ -8,8 +8,8 @@ from mock.mock import Mock, MagicMock, call
 from wlauto.exceptions import ConfigError
 from wlauto.core.configuration.parsers import *  # pylint: disable=wildcard-import
 from wlauto.core.configuration.parsers import _load_file, _collect_valid_id, _resolve_params_alias
-from wlauto.core.configuration import (WAConfiguration, RunConfiguration, JobGenerator,
-                                       PluginCache, ConfigurationPoint)
+from wlauto.core.configuration import RunConfiguration, JobGenerator, PluginCache, ConfigurationPoint
+from wlauto.core.configuration.configuration import WAConfiguration
 from wlauto.utils.types import toggle_set, reset_counter
 
 
@@ -125,9 +125,6 @@ class TestFunctions(TestCase):
         with self.assertRaises(ConfigError):
             _resolve_params_alias(test, "new_name")
 
-    def test_construct_valid_entry(self):
-        raise Exception()
-
 
 class TestConfigParser(TestCase):
 
@@ -362,44 +359,6 @@ class TestAgendaParser(TestCase):
         assert_equal(workload['workload_name'], "test")
 
 
-class TestEnvironmentVarsParser(TestCase):
-
-    def test_environmentvarsparser(self):
-        wa_config = Mock(spec=WAConfiguration)
-        calls = [call('user_directory', '/testdir'),
-                 call('plugin_paths', ['/test', '/some/other/path', '/testy/mc/test/face'])]
-
-        # Valid env vars
-        valid_environ = {"WA_USER_DIRECTORY": "/testdir",
-                         "WA_PLUGIN_PATHS": "/test:/some/other/path:/testy/mc/test/face"}
-        EnvironmentVarsParser(wa_config, valid_environ)
-        wa_config.set.assert_has_calls(calls)
-
-        # Alternative env var name
-        wa_config.reset_mock()
-        alt_valid_environ = {"WA_USER_DIRECTORY": "/testdir",
-                             "WA_EXTENSION_PATHS": "/test:/some/other/path:/testy/mc/test/face"}
-        EnvironmentVarsParser(wa_config, alt_valid_environ)
-        wa_config.set.assert_has_calls(calls)
-
-        # Test that WA_EXTENSION_PATHS gets merged with WA_PLUGIN_PATHS.
-        # Also checks that other enviroment variables don't cause errors
-        wa_config.reset_mock()
-        calls = [call('user_directory', '/testdir'),
-                 call('plugin_paths', ['/test', '/some/other/path']),
-                 call('plugin_paths', ['/testy/mc/test/face'])]
-        ext_and_plgin = {"WA_USER_DIRECTORY": "/testdir",
-                         "WA_PLUGIN_PATHS": "/test:/some/other/path",
-                         "WA_EXTENSION_PATHS": "/testy/mc/test/face",
-                         "RANDOM_VAR": "random_value"}
-        EnvironmentVarsParser(wa_config, ext_and_plgin)
-        # If any_order=True then the calls can be in any order, but they must all appear
-        wa_config.set.assert_has_calls(calls, any_order=True)
-
-        # No WA enviroment variables present
-        wa_config.reset_mock()
-        EnvironmentVarsParser(wa_config, {"RANDOM_VAR": "random_value"})
-        wa_config.set.assert_not_called()
 
 
 class TestCommandLineArgsParser(TestCase):
diff --git a/wlauto/utils/misc.py b/wlauto/utils/misc.py
index cf170810..3f6d7e8b 100644
--- a/wlauto/utils/misc.py
+++ b/wlauto/utils/misc.py
@@ -492,7 +492,7 @@ def merge_config_values(base, other):
            are treated as atomic, and not mergeable.
         s: A sequence. Anything iterable that is not a dict or
            a string (strings are considered scalars).
-        m: A key-value mapping. ``dict`` and it's derivatives.
+        m: A key-value mapping. ``dict`` and its derivatives.
         n: ``None``.
         o: A mergeable object; this is an object that implements both
           ``merge_with`` and ``merge_into`` methods.
diff --git a/wlauto/utils/serializer.py b/wlauto/utils/serializer.py
index 821e01b1..eb2d893e 100644
--- a/wlauto/utils/serializer.py
+++ b/wlauto/utils/serializer.py
@@ -51,7 +51,7 @@ import yaml as _yaml
 import dateutil.parser
 
 from wlauto.exceptions import SerializerSyntaxError
-from wlauto.utils.types import regex_type
+from wlauto.utils.types import regex_type, none_type
 from wlauto.utils.misc import isiterable
 
 
@@ -70,12 +70,14 @@ POD_TYPES = [
     tuple,
     dict,
     set,
-    basestring,
+    str,
+    unicode,
     int,
     float,
     bool,
     datetime,
-    regex_type
+    regex_type,
+    none_type,
 ]
 
 class WAJSONEncoder(_json.JSONEncoder):
@@ -257,3 +259,4 @@ def _read_pod(fh, fmt=None):
 
 def is_pod(obj):
     return type(obj) in POD_TYPES
+
diff --git a/wlauto/utils/types.py b/wlauto/utils/types.py
index 94f257f2..c23f8215 100644
--- a/wlauto/utils/types.py
+++ b/wlauto/utils/types.py
@@ -169,6 +169,7 @@ list_or_bool = list_or(boolean)
 
 
 regex_type = type(re.compile(''))
+none_type = type(None)
 
 
 def regex(value):

From 3d8503b056a8baa6451b3af2cf72ad59add078f8 Mon Sep 17 00:00:00 2001
From: Sergei Trofimov <sergei.trofimov@arm.com>
Date: Thu, 9 Feb 2017 15:38:28 +0000
Subject: [PATCH 2/8] Fixed run command to the point of invoking the Executor

---
 wlauto/commands/create.py                  |   2 +-
 wlauto/commands/list.py                    |   2 +-
 wlauto/commands/record.py                  |   2 +-
 wlauto/commands/run.py                     | 154 ++-------
 wlauto/commands/show.py                    |   2 +-
 wlauto/core/command.py                     |  24 +-
 wlauto/core/configuration/configuration.py |  10 +-
 wlauto/core/configuration/parsers.py       | 362 +++++++++++----------
 wlauto/core/entry_point.py                 |  29 +-
 wlauto/core/execution.py                   |  47 +--
 wlauto/core/state.py                       |  28 ++
 11 files changed, 310 insertions(+), 352 deletions(-)
 create mode 100644 wlauto/core/state.py

diff --git a/wlauto/commands/create.py b/wlauto/commands/create.py
index fd8e02f5..b520a208 100644
--- a/wlauto/commands/create.py
+++ b/wlauto/commands/create.py
@@ -117,7 +117,7 @@ class CreateWorkloadSubcommand(CreateSubcommand):
                                          'should place the APK file into the workload\'s directory at the ' +
                                          'same level as the __init__.py.')
 
-    def execute(self, args):  # pylint: disable=R0201
+    def execute(self, state, args):  # pylint: disable=R0201
         where = args.path or 'local'
         check_name = not args.force
 
diff --git a/wlauto/commands/list.py b/wlauto/commands/list.py
index 1de4db80..f261c7eb 100644
--- a/wlauto/commands/list.py
+++ b/wlauto/commands/list.py
@@ -39,7 +39,7 @@ class ListCommand(Command):
         self.parser.add_argument('-p', '--platform', help='Only list results that are supported by '
                                                           'the specified platform')
 
-    def execute(self, args):
+    def execute(self, state, args):
         filters = {}
         if args.name:
             filters['name'] = args.name
diff --git a/wlauto/commands/record.py b/wlauto/commands/record.py
index 49cddae5..23cf5410 100644
--- a/wlauto/commands/record.py
+++ b/wlauto/commands/record.py
@@ -78,7 +78,7 @@ class RecordCommand(Command):
             args.suffix += "."
 
     # pylint: disable=W0201
-    def execute(self, args):
+    def execute(self, state, args):
         self.validate_args(args)
         self.logger.info("Connecting to device...")
 
diff --git a/wlauto/commands/run.py b/wlauto/commands/run.py
index 197a2fe9..5374a195 100644
--- a/wlauto/commands/run.py
+++ b/wlauto/commands/run.py
@@ -20,11 +20,13 @@ import shutil
 
 import wlauto
 from wlauto import Command, settings
-from wlauto.core.execution import Executor
-from wlauto.utils.log import add_log_file
-from wlauto.core.configuration import RunConfiguration
 from wlauto.core import pluginloader
-from wlauto.core.configuration.parsers import AgendaParser, ConfigParser, CommandLineArgsParser
+from wlauto.core.configuration import RunConfiguration
+from wlauto.core.configuration.parsers import AgendaParser, ConfigParser
+from wlauto.core.execution import Executor
+from wlauto.exceptions import NotFoundError, ConfigError
+from wlauto.utils.log import add_log_file
+from wlauto.utils.types import toggle_set
 
 
 class RunCommand(Command):
@@ -32,103 +34,6 @@ class RunCommand(Command):
     name = 'run'
     description = 'Execute automated workloads on a remote device and process the resulting output.'
 
-    def initialize(self, context):
-        self.parser.add_argument('agenda', metavar='AGENDA',
-                                 help="""
-                                 Agenda for this workload automation run. This defines which
-                                 workloads will be executed, how many times, with which
-                                 tunables, etc.  See example agendas in {} for an example of
-                                 how this file should be structured.
-                                 """.format(os.path.dirname(wlauto.__file__)))
-        self.parser.add_argument('-d', '--output-directory', metavar='DIR', default=None,
-                                 help="""
-                                 Specify a directory where the output will be generated. If
-                                 the directory already exists, the script will abort unless -f
-                                 option (see below) is used, in which case the contents of the
-                                 directory will be overwritten. If this option is not specified,
-                                 then {} will be used instead.
-                                 """.format("settings.default_output_directory"))  # TODO: Fix this!
-        self.parser.add_argument('-f', '--force', action='store_true',
-                                 help="""
-                                 Overwrite output directory if it exists. By default, the script
-                                 will abort in this situation to prevent accidental data loss.
-                                 """)
-        self.parser.add_argument('-i', '--id', action='append', dest='only_run_ids', metavar='ID',
-                                 help="""
-                                 Specify a workload spec ID from an agenda to run. If this is
-                                 specified, only that particular spec will be run, and other
-                                 workloads in the agenda will be ignored. This option may be
-                                 used to specify multiple IDs.
-                                 """)
-        self.parser.add_argument('--disable', action='append', dest='instruments_to_disable',
-                                 metavar='INSTRUMENT', help="""
-                                 Specify an instrument to disable from the command line. This
-                                 equivalent to adding "~{metavar}" to the instrumentation list in
-                                 the agenda. This can be used to temporarily disable a troublesome
-                                 instrument for a particular run without introducing permanent
-                                 change to the config (which one might then forget to revert).
-                                 This option may be specified multiple times.
-                                 """)
-
-    def execute(self, args):
-
-        # STAGE 1: Gather configuratation
-
-        env = EnvironmentVars()
-        args = CommandLineArgs(args)
-
-        # STAGE 2.1a: Early WAConfiguration, required to find config files
-        if env.user_directory:
-            settings.set("user_directory", env.user_directory)
-        if env.plugin_paths:
-            settings.set("plugin_paths", env.plugin_paths)
-        # STAGE 1 continued
-
-        # TODO: Check for config.py and convert to yaml, if it fails, warn user.
-        configs = [ConfigFile(os.path.join(settings.user_directory, 'config.yaml'))]
-        for c in args.config:
-            configs.append(ConfigFile(c))
-        agenda = Agenda(args.agenda)
-        configs.append(Agenda.config)
-
-        # STAGE 2: Sending configuration to the correct place & merging in
-        #          order of priority.
-        #
-        #          Priorities (lowest to highest):
-        #           - Enviroment Variables
-        #           - config.yaml from `user_directory`
-        #           - config files passed via command line
-        #             (the first specified will be the first to be applied)
-        #           - Agenda
-        #           - Command line configuration e.g. disabled instrumentation.
-
-        # STAGE 2.1b: WAConfiguration
-        for config in configs:
-            for config_point in settings.configuration.keys():
-                if hasattr(config, config_point):
-                    settings.set(config_point, config.getattr(config_point))
-
-
-    def _parse_config(self):
-        pass
-
-    def _serialize_raw_config(self, env, args, agenda, configs):
-        pod = {}
-        pod['environment_variables'] = env.to_pod()
-        pod['commandline_arguments'] = args.to_pod()
-        pod['agenda'] = agenda.to_pod()
-        pod['config_files'] = [c.to_pod() for c in configs]
-        return pod
-
-    def _serialize_final_config(self):
-        pass
-
-
-class OldRunCommand(Command):
-
-    name = 'old_run'
-    description = 'Execute automated workloads on a remote device and process the resulting output.'
-
     def initialize(self, context):
         self.parser.add_argument('agenda', metavar='AGENDA',
                                  help="""
@@ -158,6 +63,7 @@ class OldRunCommand(Command):
                                  used to specify multiple IDs.
                                  """)
         self.parser.add_argument('--disable', action='append', dest='instruments_to_disable',
+                                 default=[],
                                  metavar='INSTRUMENT', help="""
                                  Specify an instrument to disable from the command line. This
                                  equivalent to adding "~{metavar}" to the instrumentation list in
@@ -167,38 +73,32 @@ class OldRunCommand(Command):
                                  This option may be specified multiple times.
                                  """)
 
-    def execute(self, args):  # NOQA
+    def execute(self, state, args):
         output_directory = self.set_up_output_directory(args)
         add_log_file(os.path.join(output_directory, "run.log"))
-        config = RunConfiguration(pluginloader)
 
+        disabled_instruments = toggle_set(["~{}".format(i) 
+                                           for i in args.instruments_to_disable])
+        state.jobs_config.disable_instruments(disabled_instruments)
+        state.jobs_config.only_run_ids(args.only_run_ids)
+
+        parser = AgendaParser()
         if os.path.isfile(args.agenda):
-            agenda = Agenda(args.agenda)
-            settings.agenda = args.agenda
-            shutil.copy(args.agenda, config.meta_directory)
+            parser.load_from_path(state, args.agenda)
         else:
-            self.logger.debug('{} is not a file; assuming workload name.'.format(args.agenda))
-            agenda = Agenda()
-            agenda.add_workload_entry(args.agenda)
+            try:
+                pluginloader.get_workload(args.agenda)
+                agenda = {'workloads': [{'name': args.agenda}]}
+                parser.load(state, agenda, 'CMDLINE_ARGS')
+            except NotFoundError:
+                msg = 'Agenda file "{}" does not exist, and there no workload '\
+                      'with that name.\nYou can get a list of available '\
+                      'by running "wa list workloads".'
+                raise ConfigError(msg.format(args.agenda))
 
-        for filepath in settings.config_paths:
-            config.load_config(filepath)
-
-        if args.instruments_to_disable:
-            if 'instrumentation' not in agenda.config:
-                agenda.config['instrumentation'] = []
-            for itd in args.instruments_to_disable:
-                self.logger.debug('Updating agenda to disable {}'.format(itd))
-                agenda.config['instrumentation'].append('~{}'.format(itd))
-
-        basename = 'config_'
-        for file_number, path in enumerate(settings.config_paths, 1):
-            file_ext = os.path.splitext(path)[1]
-            shutil.copy(path, os.path.join(config.meta_directory,
-                                           basename + str(file_number) + file_ext))
-
-        executor = Executor(config)
-        executor.execute(agenda, selectors={'ids': args.only_run_ids})
+        executor = Executor()
+        # TODO: fix executor
+        # executor.execute(state, selectors={'ids': args.only_run_ids})
 
     def set_up_output_directory(self, args):
         if args.output_directory:
diff --git a/wlauto/commands/show.py b/wlauto/commands/show.py
index e89085e3..75bbac8b 100644
--- a/wlauto/commands/show.py
+++ b/wlauto/commands/show.py
@@ -40,7 +40,7 @@ class ShowCommand(Command):
                                  help='''The name of the plugin for which information will
                                          be shown.''')
 
-    def execute(self, args):
+    def execute(self, state, args):
         # pylint: disable=unpacking-non-sequence
         plugin = pluginloader.get_plugin_class(args.name)
         out = StringIO()
diff --git a/wlauto/core/command.py b/wlauto/core/command.py
index 47f00c2e..4cbf424e 100644
--- a/wlauto/core/command.py
+++ b/wlauto/core/command.py
@@ -21,19 +21,22 @@ from wlauto.core.version import get_wa_version
 
 
 def init_argument_parser(parser):
-    parser.add_argument('-c', '--config', help='specify an additional config.py', action='append', default=[])
+    parser.add_argument('-c', '--config', action='append', default=[],
+                        help='specify an additional config.py')
     parser.add_argument('-v', '--verbose', action='count',
                         help='The scripts will produce verbose output.')
-    parser.add_argument('--version', action='version', version='%(prog)s {}'.format(get_wa_version()))
+    parser.add_argument('--version', action='version', 
+                        version='%(prog)s {}'.format(get_wa_version()))
     return parser
 
 
 class Command(Plugin):
     """
-    Defines a Workload Automation command. This will be executed from the command line as
-    ``wa <command> [args ...]``. This defines the name to be used when invoking wa, the
-    code that will actually be executed on invocation and the argument parser to be used
-    to parse the reset of the command line arguments.
+    Defines a Workload Automation command. This will be executed from the
+    command line as ``wa <command> [args ...]``. This defines the name to be
+    used when invoking wa, the code that will actually be executed on
+    invocation and the argument parser to be used to parse the reset of the
+    command line arguments.
 
     """
     kind = "command"
@@ -57,16 +60,19 @@ class Command(Plugin):
 
     def initialize(self, context):
         """
-        Perform command-specific initialisation (e.g. adding command-specific options to the command's
-        parser). ``context`` is always ``None``.
+        Perform command-specific initialisation (e.g. adding command-specific
+        options to the command's parser). ``context`` is always ``None``.
 
         """
         pass
 
-    def execute(self, args):
+    def execute(self, state, args):
         """
         Execute this command.
 
+        :state: An initialized ``WAState`` that contains the current state of
+                WA exeuction up to that point (processed configuraition, loaded
+                plugins, etc).
         :args: An ``argparse.Namespace`` containing command line arguments (as returned by
                ``argparse.ArgumentParser.parse_args()``. This would usually be the result of
                invoking ``self.parser``.
diff --git a/wlauto/core/configuration/configuration.py b/wlauto/core/configuration/configuration.py
index 837a8191..28cd7730 100644
--- a/wlauto/core/configuration/configuration.py
+++ b/wlauto/core/configuration/configuration.py
@@ -516,8 +516,10 @@ class Configuration(object):
 
     def set(self, name, value, check_mandatory=True):
         if name not in self.configuration:
-            raise ConfigError('Unknown {} configuration "{}"'.format(self.name, name))
-        self.configuration[name].set_value(self, value, check_mandatory=check_mandatory)
+            raise ConfigError('Unknown {} configuration "{}"'.format(self.name, 
+                                                                     name))
+        self.configuration[name].set_value(self, value, 
+                                           check_mandatory=check_mandatory)
 
     def update_config(self, values, check_mandatory=True):
         for k, v in values.iteritems():
@@ -610,6 +612,9 @@ class WAConfiguration(Configuration):
     def plugins_directory(self):
         return os.path.join(self.user_directory, 'plugins')
 
+    @property
+    def user_config_file(self):
+        return os.path.joion(self.user_directory, 'config.yaml')
 
     def __init__(self, environ):
         super(WAConfiguration, self).__init__()
@@ -618,7 +623,6 @@ class WAConfiguration(Configuration):
             self.set('user_directory', user_directory)
 
 
-
 # This is generic top-level configuration for WA runs.
 class RunConfiguration(Configuration):
 
diff --git a/wlauto/core/configuration/parsers.py b/wlauto/core/configuration/parsers.py
index 6c5af0d9..caa8d165 100644
--- a/wlauto/core/configuration/parsers.py
+++ b/wlauto/core/configuration/parsers.py
@@ -20,13 +20,151 @@ from wlauto.utils.serializer import read_pod, SerializerSyntaxError
 from wlauto.utils.types import toggle_set, counter
 from wlauto.core.configuration.configuration import JobSpec
 
+
+###############
+### Parsers ###
+###############
+
+class ConfigParser(object):
+
+    def load_from_path(self, state, filepath):
+        self.load(_load_file(filepath, "Config"), filepath)
+
+    def load(self, state, raw, source, wrap_exceptions=True):  # pylint: disable=too-many-branches
+        try:
+            if 'run_name' in raw:
+                msg = '"run_name" can only be specified in the config '\
+                      'section of an agenda'
+                raise ConfigError(msg)
+
+            if 'id' in raw:
+                raise ConfigError('"id" cannot be set globally')
+
+            merge_result_processors_instruments(raw)
+
+            # Get WA core configuration
+            for cfg_point in state.settings.configuration.itervalues():
+                value = get_aliased_param(cfg_point, raw)
+                if value is not None:
+                    state.settings.set(cfg_point.name, value)
+
+            # Get run specific configuration
+            for cfg_point in state.run_config.configuration.itervalues():
+                value = get_aliased_param(cfg_point, raw)
+                if value is not None:
+                    state.run_config.set(cfg_point.name, value)
+
+            # Get global job spec configuration
+            for cfg_point in JobSpec.configuration.itervalues():
+                value = get_aliased_param(cfg_point, raw)
+                if value is not None:
+                    state.jobs_config.set_global_value(cfg_point.name, value)
+
+            for name, values in raw.iteritems():
+                # Assume that all leftover config is for a plug-in or a global
+                # alias it is up to PluginCache to assert this assumption
+                state.plugin_cache.add_configs(name, values, source)
+
+        except ConfigError as e:
+            if wrap_exceptions:
+                raise ConfigError('Error in "{}":\n{}'.format(source, str(e)))
+            else:
+                raise e
+
+
+class AgendaParser(object):
+
+    def load_from_path(self, state, filepath):
+        raw = _load_file(filepath, 'Agenda')
+        self.load(state, raw, filepath)
+
+    def load(self, state, raw, source):
+        try:
+            if not isinstance(raw, dict):
+                raise ConfigError('Invalid agenda, top level entry must be a dict')
+
+            self._populate_and_validate_config(state, raw, source)
+            sections = self._pop_sections(raw)
+            global_workloads = self._pop_workloads(raw)
+
+            if raw:
+                msg = 'Invalid top level agenda entry(ies): "{}"'
+                raise ConfigError(msg.format('", "'.join(raw.keys())))
+
+            sect_ids, wkl_ids = self._collect_ids(sections, global_workloads)
+            self._process_global_workloads(state, global_workloads, wkl_ids)
+            self._process_sections(state, sections, sect_ids, wkl_ids)
+
+        except (ConfigError, SerializerSyntaxError) as e:
+            raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
+
+    def _populate_and_validate_config(self, state, raw, source):
+        for name in ['config', 'global']:
+            entry = raw.pop(name, None)
+            if entry is None:
+                continue
+
+            if not isinstance(entry, dict):
+                msg = 'Invalid entry "{}" - must be a dict'
+                raise ConfigError(msg.format(name))
+
+            if 'run_name' in entry:
+                state.run_config.set('run_name', entry.pop('run_name'))
+
+            state.load_config(entry, source, wrap_exceptions=False)
+
+    def _pop_sections(self, raw):
+        sections = raw.pop("sections", [])
+        if not isinstance(sections, list):
+            raise ConfigError('Invalid entry "sections" - must be a list')
+        return sections
+
+    def _pop_workloads(self, raw):
+        workloads = raw.pop("workloads", [])
+        if not isinstance(workloads, list):
+            raise ConfigError('Invalid entry "workloads" - must be a list')
+        return workloads
+
+    def _collect_ids(self, sections, global_workloads):
+        seen_section_ids = set()
+        seen_workload_ids = set()
+
+        for workload in global_workloads:
+            workload = _get_workload_entry(workload)
+            _collect_valid_id(workload.get("id"), seen_workload_ids, "workload")
+
+        for section in sections:
+            _collect_valid_id(section.get("id"), seen_section_ids, "section")
+            for workload in section["workloads"] if "workloads" in section else []:
+                workload = _get_workload_entry(workload)
+                _collect_valid_id(workload.get("id"), seen_workload_ids, 
+                                  "workload")
+
+        return seen_section_ids, seen_workload_ids
+
+    def _process_global_workloads(self, state, global_workloads, seen_wkl_ids):
+        for workload_entry in global_workloads:
+            workload = _process_workload_entry(workload_entry, seen_wkl_ids,
+                                               state.jobs_config)
+            state.jobs_config.add_workload(workload)
+
+    def _process_sections(self, state, sections, seen_sect_ids, seen_wkl_ids):
+        for section in sections:
+            workloads = []
+            for workload_entry in section.pop("workloads", []):
+                workload = _process_workload_entry(workload_entry, seen_workload_ids,
+                                                   state.jobs_config)
+                workloads.append(workload)
+
+            section = _construct_valid_entry(section, seen_section_ids, 
+                                             "s", state.jobs_config)
+            state.jobs_config.add_section(section, workloads)
+
+
 ########################
 ### Helper functions ###
 ########################
 
-DUPLICATE_ENTRY_ERROR = 'Only one of {} may be specified in a single entry'
-
-
 def get_aliased_param(cfg_point, d, default=None, pop=True):
     """
     Given a ConfigurationPoint and a dict, this function will search the dict for
@@ -62,55 +200,79 @@ def _load_file(filepath, error_name):
 
 
 def merge_result_processors_instruments(raw):
-    instruments = toggle_set(get_aliased_param(JobSpec.configuration['instrumentation'],
-                                               raw, default=[]))
+    instr_config = JobSpec.configuration['instrumentation']
+    instruments = toggle_set(get_aliased_param(instr_config, raw, default=[]))
     result_processors = toggle_set(raw.pop('result_processors', []))
     if instruments and result_processors:
         conflicts = instruments.conflicts_with(result_processors)
         if conflicts:
-            msg = '"instrumentation" and "result_processors" have conflicting entries: {}'
+            msg = '"instrumentation" and "result_processors" have '\
+                  'conflicting entries: {}'
             entires = ', '.join('"{}"'.format(c.strip("~")) for c in conflicts)
             raise ConfigError(msg.format(entires))
     raw['instrumentation'] = instruments.merge_with(result_processors)
 
 
-def _construct_valid_entry(raw, seen_ids, counter_name, jobs_config):
-    entries = {}
+def _pop_aliased(d, names, entry_id):
+    name_count = sum(1 for n in names if n in d)
+    if name_count > 1:
+        names_list = ', '.join(names)
+        msg = 'Inivalid workload entry "{}": at moust one of ({}}) must be specified.'
+        raise ConfigError(msg.format(workload_entry['id'], names_list))
+    for name in names:
+        if name in d:
+            return d.pop(name)
+    return None
+
+
+def _construct_valid_entry(raw, seen_ids, prefix, jobs_config):
+    workload_entry = {}
 
     # Generate an automatic ID if the entry doesn't already have one
-    if "id" not in raw:
+    if 'id' not in raw:
         while True:
-            new_id = "{}{}".format(counter_name, counter(name=counter_name))
+            new_id = '{}{}'.format(prefix, counter(name=prefix))
             if new_id not in seen_ids:
                 break
-        entries["id"] = new_id
+        workload_entry['id'] = new_id
         seen_ids.add(new_id)
     else:
-        entries["id"] = raw.pop("id")
+        workload_entry['id'] = raw.pop('id')
 
     # Process instrumentation
     merge_result_processors_instruments(raw)
 
-    # Validate all entries
+    # Validate all workload_entry
     for name, cfg_point in JobSpec.configuration.iteritems():
         value = get_aliased_param(cfg_point, raw)
         if value is not None:
             value = cfg_point.kind(value)
             cfg_point.validate_value(name, value)
-            entries[name] = value
-    entries["workload_parameters"] = raw.pop("workload_parameters", None)
-    entries["runtime_parameters"] = raw.pop("runtime_parameters", None)
-    entries["boot_parameters"] = raw.pop("boot_parameters", None)
+            workload_entry[name] = value
 
-    if "instrumentation" in entries:
-        jobs_config.update_enabled_instruments(entries["instrumentation"])
+    wk_id = workload_entry['id']
+    param_names = ['workload_params', 'workload_parameters']
+    if prefix == 'wk':
+        param_names +=  ['params', 'parameters']
+    workload_entry["workload_parameters"] = _pop_aliased(raw, param_names, wk_id)
 
-    # error if there are unknown entries
+    param_names = ['runtime_parameters', 'runtime_params']
+    if prefix == 's':
+        param_names +=  ['params', 'parameters']
+    workload_entry["runtime_parameters"] = _pop_aliased(raw, param_names, wk_id)
+
+    param_names = ['boot_parameters', 'boot_params']
+    workload_entry["boot_parameters"] = _pop_aliased(raw, param_names, wk_id)
+
+    if "instrumentation" in workload_entry:
+        jobs_config.update_enabled_instruments(workload_entry["instrumentation"])
+
+    # error if there are unknown workload_entry
     if raw:
         msg = 'Invalid entry(ies) in "{}": "{}"'
-        raise ConfigError(msg.format(entries['id'], ', '.join(raw.keys())))
+        raise ConfigError(msg.format(workload_entry['id'], ', '.join(raw.keys())))
 
-    return entries
+    return workload_entry
 
 
 def _collect_valid_id(entry_id, seen_ids, entry_type):
@@ -128,15 +290,6 @@ def _collect_valid_id(entry_id, seen_ids, entry_type):
     seen_ids.add(entry_id)
 
 
-def _resolve_params_alias(entry, param_alias):
-    possible_names = {"params", "{}_params".format(param_alias), "{}_parameters".format(param_alias)}
-    duplicate_entries = possible_names.intersection(set(entry.keys()))
-    if len(duplicate_entries) > 1:
-        raise ConfigError(DUPLICATE_ENTRY_ERROR.format(list(possible_names)))
-    for name in duplicate_entries:
-        entry["{}_parameters".format(param_alias)] = entry.pop(name)
-
-
 def _get_workload_entry(workload):
     if isinstance(workload, basestring):
         workload = {'name': workload}
@@ -147,150 +300,7 @@ def _get_workload_entry(workload):
 
 def _process_workload_entry(workload, seen_workload_ids, jobs_config):
     workload = _get_workload_entry(workload)
-    _resolve_params_alias(workload, "workload")
-    workload = _construct_valid_entry(workload, seen_workload_ids, "wk", jobs_config)
+    workload = _construct_valid_entry(workload, seen_workload_ids, 
+                                      "wk", jobs_config)
     return workload
 
-###############
-### Parsers ###
-###############
-
-
-class ConfigParser(object):
-
-    def __init__(self, wa_config, run_config, jobs_config, plugin_cache):
-        self.wa_config = wa_config
-        self.run_config = run_config
-        self.jobs_config = jobs_config
-        self.plugin_cache = plugin_cache
-
-    def load_from_path(self, filepath):
-        self.load(_load_file(filepath, "Config"), filepath)
-
-    def load(self, raw, source, wrap_exceptions=True):  # pylint: disable=too-many-branches
-        try:
-            if 'run_name' in raw:
-                msg = '"run_name" can only be specified in the config section of an agenda'
-                raise ConfigError(msg)
-            if 'id' in raw:
-                raise ConfigError('"id" cannot be set globally')
-
-            merge_result_processors_instruments(raw)
-
-            # Get WA core configuration
-            for cfg_point in self.wa_config.configuration.itervalues():
-                value = get_aliased_param(cfg_point, raw)
-                if value is not None:
-                    self.wa_config.set(cfg_point.name, value)
-
-            # Get run specific configuration
-            for cfg_point in self.run_config.configuration.itervalues():
-                value = get_aliased_param(cfg_point, raw)
-                if value is not None:
-                    self.run_config.set(cfg_point.name, value)
-
-            # Get global job spec configuration
-            for cfg_point in JobSpec.configuration.itervalues():
-                value = get_aliased_param(cfg_point, raw)
-                if value is not None:
-                    self.jobs_config.set_global_value(cfg_point.name, value)
-
-            for name, values in raw.iteritems():
-                # Assume that all leftover config is for a plug-in or a global
-                # alias it is up to PluginCache to assert this assumption
-                self.plugin_cache.add_configs(name, values, source)
-
-        except ConfigError as e:
-            if wrap_exceptions:
-                raise ConfigError('Error in "{}":\n{}'.format(source, str(e)))
-            else:
-                raise e
-
-
-class AgendaParser(object):
-
-    def __init__(self, wa_config, run_config, jobs_config, plugin_cache):
-        self.wa_config = wa_config
-        self.run_config = run_config
-        self.jobs_config = jobs_config
-        self.plugin_cache = plugin_cache
-
-    def load_from_path(self, filepath):
-        raw = _load_file(filepath, 'Agenda')
-        self.load(raw, filepath)
-
-    def load(self, raw, source):  # pylint: disable=too-many-branches, too-many-locals
-        try:
-            if not isinstance(raw, dict):
-                raise ConfigError('Invalid agenda, top level entry must be a dict')
-
-            # PHASE 1: Populate and validate configuration.
-            for name in ['config', 'global']:
-                entry = raw.pop(name, {})
-                if not isinstance(entry, dict):
-                    raise ConfigError('Invalid entry "{}" - must be a dict'.format(name))
-                if 'run_name' in entry:
-                    self.run_config.set('run_name', entry.pop('run_name'))
-                config_parser = ConfigParser(self.wa_config, self.run_config,
-                                             self.jobs_config, self.plugin_cache)
-                config_parser.load(entry, source, wrap_exceptions=False)
-
-            # PHASE 2: Getting "section" and "workload" entries.
-            sections = raw.pop("sections", [])
-            if not isinstance(sections, list):
-                raise ConfigError('Invalid entry "sections" - must be a list')
-            global_workloads = raw.pop("workloads", [])
-            if not isinstance(global_workloads, list):
-                raise ConfigError('Invalid entry "workloads" - must be a list')
-            if raw:
-                msg = 'Invalid top level agenda entry(ies): "{}"'
-                raise ConfigError(msg.format('", "'.join(raw.keys())))
-
-            # PHASE 3: Collecting existing workload and section IDs
-            seen_section_ids = set()
-            seen_workload_ids = set()
-
-            for workload in global_workloads:
-                workload = _get_workload_entry(workload)
-                _collect_valid_id(workload.get("id"), seen_workload_ids, "workload")
-
-            for section in sections:
-                _collect_valid_id(section.get("id"), seen_section_ids, "section")
-                for workload in section["workloads"] if "workloads" in section else []:
-                    workload = _get_workload_entry(workload)
-                    _collect_valid_id(workload.get("id"), seen_workload_ids, "workload")
-
-            # PHASE 4: Assigning IDs and validating entries
-            # TODO: Error handling for workload errors vs section errors ect
-            for workload in global_workloads:
-                self.jobs_config.add_workload(_process_workload_entry(workload,
-                                                                      seen_workload_ids,
-                                                                      self.jobs_config))
-
-            for section in sections:
-                workloads = []
-                for workload in section.pop("workloads", []):
-                    workloads.append(_process_workload_entry(workload,
-                                                             seen_workload_ids,
-                                                             self.jobs_config))
-
-                _resolve_params_alias(section, seen_section_ids)
-                section = _construct_valid_entry(section, seen_section_ids, "s", self.jobs_config)
-                self.jobs_config.add_section(section, workloads)
-
-            return seen_workload_ids, seen_section_ids
-        except (ConfigError, SerializerSyntaxError) as e:
-            raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
-
-
-# Command line options are parsed in the "run" command. This is used to send
-# certain arguments to the correct configuration points and keep a record of
-# how WA was invoked
-class CommandLineArgsParser(object):
-
-    def __init__(self, cmd_args, wa_config, jobs_config):
-        wa_config.set("verbosity", cmd_args.verbosity)
-        # TODO: Is this correct? Does there need to be a third output dir param
-        disabled_instruments = toggle_set(["~{}".format(i) for i in cmd_args.instruments_to_disable])
-        jobs_config.disable_instruments(disabled_instruments)
-        jobs_config.only_run_ids(cmd_args.only_run_ids)
diff --git a/wlauto/core/entry_point.py b/wlauto/core/entry_point.py
index 8855a55d..86739b3a 100644
--- a/wlauto/core/entry_point.py
+++ b/wlauto/core/entry_point.py
@@ -21,16 +21,15 @@ import os
 import subprocess
 import warnings
 
-from wlauto.core.configuration import settings
 from wlauto.core import pluginloader
 from wlauto.core.command import init_argument_parser
+from wlauto.core.configuration import settings
 from wlauto.core.host import init_user_directory
-from wlauto.exceptions import WAError, ConfigError
-from wlauto.utils.misc import get_traceback
-from wlauto.utils.log import init_logging
+from wlauto.core.state import WAState
+from wlauto.exceptions import WAError, DevlibError, ConfigError
 from wlauto.utils.doc import format_body
-
-from devlib import DevlibError
+from wlauto.utils.log import init_logging
+from wlauto.utils.misc import get_traceback
 
 warnings.filterwarnings(action='ignore', category=UserWarning, module='zope')
 
@@ -41,11 +40,14 @@ logger = logging.getLogger('command_line')
 def load_commands(subparsers):
     commands = {}
     for command in pluginloader.list_commands():
-        commands[command.name] = pluginloader.get_command(command.name, subparsers=subparsers)
+        commands[command.name] = pluginloader.get_command(command.name, 
+                                                          subparsers=subparsers)
     return commands
 
 
 def main():
+    state = WAState()
+
     if not os.path.exists(settings.user_directory):
         init_user_directory()
 
@@ -59,19 +61,22 @@ def main():
                                          formatter_class=argparse.RawDescriptionHelpFormatter,
                                          )
         init_argument_parser(parser)
-        commands = load_commands(parser.add_subparsers(dest='command'))  # each command will add its own subparser
+        # each command will add its own subparser
+        commands = load_commands(parser.add_subparsers(dest='command'))  
+
         args = parser.parse_args()
 
         settings.set("verbosity", args.verbose)
 
-        for config in args.config:
-            if not os.path.exists(config):
-                raise ConfigError("Config file {} not found".format(config))
+        for config_file in args.config:
+            if not os.path.exists(config_file):
+                raise ConfigError("Config file {} not found".format(config_file))
+            state.load_config_file(config_file)
 
         init_logging(settings.verbosity)
 
         command = commands[args.command]
-        sys.exit(command.execute(args))
+        sys.exit(command.execute(state, args))
 
     except KeyboardInterrupt:
         logging.info('Got CTRL-C. Aborting.')
diff --git a/wlauto/core/execution.py b/wlauto/core/execution.py
index a2d2fe83..3598cf31 100644
--- a/wlauto/core/execution.py
+++ b/wlauto/core/execution.py
@@ -36,29 +36,31 @@ following actors:
             allow instrumentation to do its stuff.
 
 """
-import os
-import uuid
 import logging
-import subprocess
+import os
 import random
+import subprocess
+import uuid
+from collections import Counter, defaultdict, OrderedDict
+from contextlib import contextmanager
 from copy import copy
 from datetime import datetime
-from contextlib import contextmanager
-from collections import Counter, defaultdict, OrderedDict
 from itertools import izip_longest
 
 import wlauto.core.signal as signal
 from wlauto.core import instrumentation
+from wlauto.core import pluginloader
 from wlauto.core.configuration import settings
 from wlauto.core.plugin import Artifact
-from wlauto.core import pluginloader
 from wlauto.core.resolver import ResourceResolver
 from wlauto.core.result import ResultManager, IterationResult, RunResult
 from wlauto.exceptions import (WAError, ConfigError, TimeoutError, InstrumentError,
                                DeviceError, DeviceNotRespondingError)
-from wlauto.utils.misc import ensure_directory_exists as _d, get_traceback, format_duration
+from wlauto.utils.misc import (ensure_directory_exists as _d, 
+                               get_traceback, format_duration)
 from wlauto.utils.serializer import json
 
+
 # The maximum number of reboot attempts for an iteration.
 MAX_REBOOT_ATTEMPTS = 3
 
@@ -95,6 +97,7 @@ class RunInfo(object):
         return d
     #TODO: pod
 
+
 class ExecutionContext(object):
     """
     Provides a context for instrumentation. Keeps track of things like
@@ -239,31 +242,32 @@ def _check_artifact_path(path, rootpath):
 
 class Executor(object):
     """
-    The ``Executor``'s job is to set up the execution context and pass to a ``Runner``
-    along with a loaded run specification. Once the ``Runner`` has done its thing,
-    the ``Executor`` performs some final reporint before returning.
+    The ``Executor``'s job is to set up the execution context and pass to a
+    ``Runner`` along with a loaded run specification. Once the ``Runner`` has
+    done its thing, the ``Executor`` performs some final reporint before
+    returning.
 
-    The initial context set up involves combining configuration from various sources,
-    loading of requided workloads, loading and installation of instruments and result
-    processors, etc. Static validation of the combined configuration is also performed.
+    The initial context set up involves combining configuration from various
+    sources, loading of requided workloads, loading and installation of
+    instruments and result processors, etc. Static validation of the combined
+    configuration is also performed.
 
     """
     # pylint: disable=R0915
 
-    def __init__(self, config):
+    def __init__(self):
         self.logger = logging.getLogger('Executor')
         self.error_logged = False
         self.warning_logged = False
-        self.config = config
         pluginloader = None
         self.device_manager = None
         self.device = None
         self.context = None
 
-    def execute(self, agenda, selectors=None):  # NOQA
+    def execute(self, state, selectors=None):  # NOQA
         """
-        Execute the run specified by an agenda. Optionally, selectors may be used to only
-        selecute a subset of the specified agenda.
+        Execute the run specified by an agenda. Optionally, selectors may be
+        used to only selecute a subset of the specified agenda.
 
         Params::
 
@@ -275,9 +279,10 @@ class Executor(object):
         Currently, the following seectors are supported:
 
         ids
-            The value must be a sequence of workload specfication IDs to be executed. Note
-            that if sections are specified inthe agenda, the workload specifacation ID will
-            be a combination of the section and workload IDs.
+            The value must be a sequence of workload specfication IDs to be
+            executed. Note that if sections are specified inthe agenda, the
+            workload specifacation ID will be a combination of the section and
+            workload IDs.
 
         """
         signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED)
diff --git a/wlauto/core/state.py b/wlauto/core/state.py
new file mode 100644
index 00000000..7843a8a6
--- /dev/null
+++ b/wlauto/core/state.py
@@ -0,0 +1,28 @@
+from wlauto.core.configuration.configuration import (RunConfiguration,
+                                                     JobGenerator, settings)
+from wlauto.core.configuration.parsers import ConfigParser
+from wlauto.core.configuration.plugin_cache import PluginCache
+
+
+class WAState(object):
+    """
+    Represents run-time state of WA. Mostly used as a container for loaded 
+    configuration and discovered plugins.
+
+    This exists outside of any command or run and is associated with the running 
+    instance of wA itself.
+    """
+
+    def __init__(self, settings=settings):
+        self.settings = settings
+        self.run_config = RunConfiguration()
+        self.plugin_cache = PluginCache()
+        self.jobs_config = JobGenerator(self.plugin_cache)
+
+        self._config_parser = ConfigParser()
+
+    def load_config_file(self, filepath):
+        self._config_parser.load_from_path(self, filepath)
+
+    def load_config(self, values, source, wrap_exceptions=True):
+        self._config_parser.load(self, values, source)

From 9cfa4e7f5104ddfcbbdca95a9fecabe12e7c4040 Mon Sep 17 00:00:00 2001
From: Sergei Trofimov <sergei.trofimov@arm.com>
Date: Mon, 13 Feb 2017 17:04:50 +0000
Subject: [PATCH 3/8] Properly initialize output directory and run state

---
 wlauto/commands/run.py                     |  32 +++--
 wlauto/core/configuration/configuration.py | 111 ++++++++--------
 wlauto/core/configuration/parsers.py       |   2 +-
 wlauto/core/configuration/plugin_cache.py  |  40 +++---
 wlauto/core/entry_point.py                 |   1 +
 wlauto/core/execution.py                   |  51 ++------
 wlauto/core/output.py                      | 143 +++++++++++++++++++++
 wlauto/core/state.py                       |   6 +-
 wlauto/utils/misc.py                       |   5 +
 wlauto/utils/serializer.py                 |  21 +++
 10 files changed, 279 insertions(+), 133 deletions(-)
 create mode 100644 wlauto/core/output.py

diff --git a/wlauto/commands/run.py b/wlauto/commands/run.py
index 5374a195..9eb43f92 100644
--- a/wlauto/commands/run.py
+++ b/wlauto/commands/run.py
@@ -24,6 +24,7 @@ from wlauto.core import pluginloader
 from wlauto.core.configuration import RunConfiguration
 from wlauto.core.configuration.parsers import AgendaParser, ConfigParser
 from wlauto.core.execution import Executor
+from wlauto.core.output import init_wa_output
 from wlauto.exceptions import NotFoundError, ConfigError
 from wlauto.utils.log import add_log_file
 from wlauto.utils.types import toggle_set
@@ -74,8 +75,8 @@ class RunCommand(Command):
                                  """)
 
     def execute(self, state, args):
-        output_directory = self.set_up_output_directory(args)
-        add_log_file(os.path.join(output_directory, "run.log"))
+        output = self.set_up_output_directory(state, args)
+        add_log_file(output.logfile)
 
         disabled_instruments = toggle_set(["~{}".format(i) 
                                            for i in args.instruments_to_disable])
@@ -87,7 +88,7 @@ class RunCommand(Command):
             parser.load_from_path(state, args.agenda)
         else:
             try:
-                pluginloader.get_workload(args.agenda)
+                pluginloader.get_plugin_class(args.agenda, kind='workload')
                 agenda = {'workloads': [{'name': args.agenda}]}
                 parser.load(state, agenda, 'CMDLINE_ARGS')
             except NotFoundError:
@@ -97,24 +98,21 @@ class RunCommand(Command):
                 raise ConfigError(msg.format(args.agenda))
 
         executor = Executor()
-        # TODO: fix executor
-        # executor.execute(state, selectors={'ids': args.only_run_ids})
+        executor.execute(state, output)
 
-    def set_up_output_directory(self, args):
+    def set_up_output_directory(self, state, args):
         if args.output_directory:
             output_directory = args.output_directory
         else:
             output_directory = settings.default_output_directory
         self.logger.debug('Using output directory: {}'.format(output_directory))
-        if os.path.exists(output_directory):
-            if args.force:
-                self.logger.info('Removing existing output directory.')
-                shutil.rmtree(os.path.abspath(output_directory))
-            else:
-                self.logger.error('Output directory {} exists.'.format(output_directory))
-                self.logger.error('Please specify another location, or use -f option to overwrite.\n')
+        try:
+            return init_wa_output(output_directory, state, args.force)
+        except RuntimeError as e:
+            if  'path exists' in str(e):
+                msg = 'Output directory "{}" exists.\nPlease specify another '\
+                      'location, or use -f option to overwrite.'
+                self.logger.critical(msg.format(output_directory))
                 sys.exit(1)
-
-        self.logger.info('Creating output directory.')
-        os.makedirs(output_directory)
-        return output_directory
+            else:
+                raise e
diff --git a/wlauto/core/configuration/configuration.py b/wlauto/core/configuration/configuration.py
index 28cd7730..3e7ec323 100644
--- a/wlauto/core/configuration/configuration.py
+++ b/wlauto/core/configuration/configuration.py
@@ -614,7 +614,7 @@ class WAConfiguration(Configuration):
 
     @property
     def user_config_file(self):
-        return os.path.joion(self.user_directory, 'config.yaml')
+        return os.path.join(self.user_directory, 'config.yaml')
 
     def __init__(self, environ):
         super(WAConfiguration, self).__init__()
@@ -738,6 +738,8 @@ class RunConfiguration(Configuration):
 
     def __init__(self):
         super(RunConfiguration, self).__init__()
+        for confpoint in self.meta_data:
+            confpoint.set_value(self, check_mandatory=False)
         self.device_config = None
 
     def merge_device_config(self, plugin_cache):
@@ -836,7 +838,7 @@ class JobSpec(Configuration):
         for k, v in values.iteritems():
             if k == "id":
                 continue
-            elif k in ["workload_parameters", "runtime_parameters", "boot_parameters"]:
+            elif k.endswith('_parameters'):
                 if v:
                     self.to_merge[k][source] = copy(v)
             else:
@@ -846,27 +848,27 @@ class JobSpec(Configuration):
                     msg = 'Error in {}:\n\t{}'
                     raise ConfigError(msg.format(source.name, e.message))
 
-    # pylint: disable=no-member
-    # Only call after the rest of the JobSpec is merged
+
     def merge_workload_parameters(self, plugin_cache):
         # merge global generic and specific config
         workload_params = plugin_cache.get_plugin_config(self.workload_name,
                                                          generic_name="workload_parameters")
 
-        # Merge entry "workload_parameters"
-        # TODO: Wrap in - "error in [agenda path]"
         cfg_points = plugin_cache.get_plugin_parameters(self.workload_name)
         for source in self._sources:
-            if source in self.to_merge["workload_params"]:
-                config = self.to_merge["workload_params"][source]
-                for name, cfg_point in cfg_points.iteritems():
-                    if name in config:
-                        value = config.pop(name)
-                        cfg_point.set_value(workload_params, value, check_mandatory=False)
-                if config:
-                    msg = 'conflicting entry(ies) for "{}" in {}: "{}"'
-                    msg = msg.format(self.workload_name, source.name,
-                                     '", "'.join(workload_params[source]))
+            config = self.to_merge["workload_parameters"].get(source)
+            if config is None:
+                continue
+
+            for name, cfg_point in cfg_points.iteritems():
+                if name in config:
+                    value = config.pop(name)
+                    cfg_point.set_value(workload_params, value, 
+                                        check_mandatory=False)
+            if config:
+                msg = 'conflicting entry(ies) for "{}" in {}: "{}"'
+                msg = msg.format(self.workload_name, source.name,
+                                    '", "'.join(workload_params[source]))
 
         self.workload_parameters = workload_params
 
@@ -920,12 +922,6 @@ class JobGenerator(object):
         self._read_enabled_instruments = True
         return self._enabled_instruments
 
-    def update_enabled_instruments(self, value):
-        if self._read_enabled_instruments:
-            msg = "'enabled_instruments' cannot be updated after it has been accessed"
-            raise RuntimeError(msg)
-        self._enabled_instruments.update(value)
-
     def __init__(self, plugin_cache):
         self.plugin_cache = plugin_cache
         self.ids_to_run = []
@@ -962,55 +958,58 @@ class JobGenerator(object):
         #TODO: Validate
         self.disabled_instruments = ["~{}".format(i) for i in instruments]
 
+    def update_enabled_instruments(self, value):
+        if self._read_enabled_instruments:
+            msg = "'enabled_instruments' cannot be updated after it has been accessed"
+            raise RuntimeError(msg)
+        self._enabled_instruments.update(value)
+
     def only_run_ids(self, ids):
         if isinstance(ids, str):
             ids = [ids]
         self.ids_to_run = ids
 
     def generate_job_specs(self, target_manager):
-
         for leaf in self.root_node.leaves():
-            # PHASE 1: Gather workload and section entries for this leaf
             workload_entries = leaf.workload_entries
             sections = [leaf]
             for ancestor in leaf.ancestors():
                 workload_entries = ancestor.workload_entries + workload_entries
                 sections.insert(0, ancestor)
 
-            # PHASE 2: Create job specs for this leaf
             for workload_entry in workload_entries:
-                job_spec = JobSpec()  # Loads defaults
-
-                # PHASE 2.1: Merge general job spec configuration
-                for section in sections:
-                    job_spec.update_config(section, check_mandatory=False)
-                job_spec.update_config(workload_entry, check_mandatory=False)
-
-                # PHASE 2.2: Merge global, section and workload entry "workload_parameters"
-                job_spec.merge_workload_parameters(self.plugin_cache)
-                target_manager.static_runtime_parameter_validation(job_spec.runtime_parameters)
-
-                # TODO: PHASE 2.3: Validate device runtime/boot paramerers
-                job_spec.merge_runtime_parameters(self.plugin_cache, target_manager)
-                target_manager.validate_runtime_parameters(job_spec.runtime_parameters)
-
-                # PHASE 2.4: Disable globally disabled instrumentation
-                job_spec.set("instrumentation", self.disabled_instruments)
-                job_spec.finalize()
-
-                # PHASE 2.5: Skip job_spec if part of it's ID is not in self.ids_to_run
-                if self.ids_to_run:
-                    for job_id in self.ids_to_run:
-                        if job_id in job_spec.id:
-                            #TODO: logging
-                            break
-                    else:
-                        continue
-
-                # PHASE 2.6: Update list of instruments that need to be setup
-                # pylint: disable=no-member
+                job_spec = create_job_spec(workload_entry, sections, target_manager)
+                for job_id in self.ids_to_run:
+                    if job_id in job_spec.id:
+                        break
+                else:
+                    continue
                 self.update_enabled_instruments(job_spec.instrumentation.values())
+                yield  job_spec
+
+
+
+def create_job_spec(workload_entry, sections, target_manager):
+    job_spec = JobSpec()
+
+    # PHASE 2.1: Merge general job spec configuration
+    for section in sections:
+        job_spec.update_config(section, check_mandatory=False)
+    job_spec.update_config(workload_entry, check_mandatory=False)
+
+    # PHASE 2.2: Merge global, section and workload entry "workload_parameters"
+    job_spec.merge_workload_parameters(self.plugin_cache)
+    target_manager.static_runtime_parameter_validation(job_spec.runtime_parameters)
+
+    # TODO: PHASE 2.3: Validate device runtime/boot paramerers
+    job_spec.merge_runtime_parameters(self.plugin_cache, target_manager)
+    target_manager.validate_runtime_parameters(job_spec.runtime_parameters)
+
+    # PHASE 2.4: Disable globally disabled instrumentation
+    job_spec.set("instrumentation", self.disabled_instruments)
+    job_spec.finalize()
+
+    return job_spec
 
-                yield job_spec
 
 settings = WAConfiguration(os.environ)
diff --git a/wlauto/core/configuration/parsers.py b/wlauto/core/configuration/parsers.py
index caa8d165..37624a18 100644
--- a/wlauto/core/configuration/parsers.py
+++ b/wlauto/core/configuration/parsers.py
@@ -28,7 +28,7 @@ from wlauto.core.configuration.configuration import JobSpec
 class ConfigParser(object):
 
     def load_from_path(self, state, filepath):
-        self.load(_load_file(filepath, "Config"), filepath)
+        self.load(state, _load_file(filepath, "Config"), filepath)
 
     def load(self, state, raw, source, wrap_exceptions=True):  # pylint: disable=too-many-branches
         try:
diff --git a/wlauto/core/configuration/plugin_cache.py b/wlauto/core/configuration/plugin_cache.py
index 110cc353..e0d79a41 100644
--- a/wlauto/core/configuration/plugin_cache.py
+++ b/wlauto/core/configuration/plugin_cache.py
@@ -105,7 +105,8 @@ class PluginCache(object):
                 for source in self.sources:
                     if source not in self.global_alias_values[alias]:
                         continue
-                    param.set_value(config, value=self.global_alias_values[alias][source])
+                    val = self.global_alias_values[alias][source]
+                    param.set_value(config, value=val)
 
         # Merge user config
         # Perform a simple merge with the order of sources representing priority
@@ -128,27 +129,34 @@ class PluginCache(object):
         return {param.name: param for param in params}
 
     # pylint: disable=too-many-nested-blocks, too-many-branches
-    def _merge_using_priority_specificity(self, specific_name, generic_name, final_config):
+    def _merge_using_priority_specificity(self, specific_name, 
+                                          generic_name, final_config):
         """
-        WA configuration can come from various sources of increasing priority, as well
-        as being specified in a generic and specific manner (e.g. ``device_config``
-        and ``nexus10`` respectivly). WA has two rules for the priority of configuration:
+        WA configuration can come from various sources of increasing priority,
+        as well as being specified in a generic and specific manner (e.g.
+        ``device_config`` and ``nexus10`` respectivly). WA has two rules for
+        the priority of configuration:
 
-            - Configuration from higher priority sources overrides configuration from
-              lower priority sources.
+            - Configuration from higher priority sources overrides
+              configuration from lower priority sources.
             - More specific configuration overrides less specific configuration.
 
-        There is a situation where these two rules come into conflict. When a generic
-        configuration is given in config source of high priority and a specific
-        configuration is given in a config source of lower priority. In this situation
-        it is not possible to know the end users intention and WA will error.
+        There is a situation where these two rules come into conflict. When a
+        generic configuration is given in config source of high priority and a
+        specific configuration is given in a config source of lower priority.
+        In this situation it is not possible to know the end users intention
+        and WA will error.
 
-        :param generic_name: The name of the generic configuration e.g ``device_config``
-        :param specific_name: The name of the specific configuration used, e.g ``nexus10``
-        :param cfg_point: A dict of ``ConfigurationPoint``s to be used when merging configuration.
-                          keys=config point name, values=config point
+        :param generic_name: The name of the generic configuration
+                             e.g ``device_config``
+        :param specific_name: The name of the specific configuration used
+                              e.g ``nexus10``
+        :param cfg_point: A dict of ``ConfigurationPoint``s to be used when
+                          merging configuration.  keys=config point name, 
+                          values=config point
 
-        :rtype: A fully merged and validated configuration in the form of a obj_dict.
+        :rtype: A fully merged and validated configuration in the form of a
+                obj_dict.
         """
         generic_config = copy(self.plugin_configs[generic_name])
         specific_config = copy(self.plugin_configs[specific_name])
diff --git a/wlauto/core/entry_point.py b/wlauto/core/entry_point.py
index 86739b3a..d58f1cc8 100644
--- a/wlauto/core/entry_point.py
+++ b/wlauto/core/entry_point.py
@@ -68,6 +68,7 @@ def main():
 
         settings.set("verbosity", args.verbose)
 
+        state.load_config_file(settings.user_config_file)
         for config_file in args.config:
             if not os.path.exists(config_file):
                 raise ConfigError("Config file {} not found".format(config_file))
diff --git a/wlauto/core/execution.py b/wlauto/core/execution.py
index 3598cf31..30477984 100644
--- a/wlauto/core/execution.py
+++ b/wlauto/core/execution.py
@@ -71,33 +71,6 @@ MAX_REBOOT_ATTEMPTS = 3
 REBOOT_DELAY = 3
 
 
-class RunInfo(object):
-    """
-    Information about the current run, such as its unique ID, run
-    time, etc.
-
-    """
-
-    def __init__(self, config):
-        self.config = config
-        self.uuid = uuid.uuid4()
-        self.start_time = None
-        self.end_time = None
-        self.duration = None
-        self.project = config.project
-        self.project_stage = config.project_stage
-        self.run_name = config.run_name or "{}_{}".format(os.path.split(config.output_directory)[1],
-                                                          datetime.utcnow().strftime("%Y-%m-%d_%H-%M-%S"))
-        self.notes = None
-        self.device_properties = {}
-
-    def to_dict(self):
-        d = copy(self.__dict__)
-        d['uuid'] = str(self.uuid)
-        return d
-    #TODO: pod
-
-
 class ExecutionContext(object):
     """
     Provides a context for instrumentation. Keeps track of things like
@@ -264,33 +237,26 @@ class Executor(object):
         self.device = None
         self.context = None
 
-    def execute(self, state, selectors=None):  # NOQA
+    def execute(self, state, output):
         """
         Execute the run specified by an agenda. Optionally, selectors may be
         used to only selecute a subset of the specified agenda.
 
         Params::
 
-            :agenda: an ``Agenda`` instance to be executed.
-            :selectors: A dict mapping selector name to the coresponding values.
-
-        **Selectors**
-
-        Currently, the following seectors are supported:
-
-        ids
-            The value must be a sequence of workload specfication IDs to be
-            executed. Note that if sections are specified inthe agenda, the
-            workload specifacation ID will be a combination of the section and
-            workload IDs.
+            :state: a ``WAState`` containing processed configuraiton
+            :output: an initialized ``RunOutput`` that will be used to
+                     store the results.
 
         """
         signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED)
         signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED)
 
-        self.logger.info('Initializing')
+        self.logger.info('Initializing run')
 
         self.logger.debug('Loading run configuration.')
+
+    def old_exec(self, agenda, selectors={}):
         self.config.set_agenda(agenda, selectors)
         self.config.finalize()
         config_outfile = os.path.join(self.config.meta_directory, 'run_config.json')
@@ -300,7 +266,8 @@ class Executor(object):
         self.logger.debug('Initialising device configuration.')
         if not self.config.device:
             raise ConfigError('Make sure a device is specified in the config.')
-        self.device_manager = pluginloader.get_manager(self.config.device, **self.config.device_config)
+        self.device_manager = pluginloader.get_manager(self.config.device, 
+                                                       **self.config.device_config)
         self.device_manager.validate()
         self.device = self.device_manager.target
 
diff --git a/wlauto/core/output.py b/wlauto/core/output.py
new file mode 100644
index 00000000..183ce8ed
--- /dev/null
+++ b/wlauto/core/output.py
@@ -0,0 +1,143 @@
+import logging
+import os
+import shutil
+import string
+import sys
+import uuid
+from copy import copy
+
+from wlauto.utils.misc import touch
+from wlauto.utils.serializer import write_pod, read_pod
+
+
+logger = logging.getLogger('output')
+
+
+class RunInfo(object):
+    """
+    Information about the current run, such as its unique ID, run
+    time, etc.
+
+    """
+    @staticmethod
+    def from_pod(pod):
+        uid = pod.pop('uuid')
+        if uid is not None:
+            uid = uuid.UUID(uid)
+        instance = RunInfo(**pod)
+        instance.uuid = uid
+        return instance
+
+    def __init__(self, run_name=None, project=None, project_stage=None,
+                 start_time=None, end_time=None, duration=None):
+        self.uuid = uuid.uuid4()
+        self.run_name = None
+        self.project = None
+        self.project_stage = None
+        self.start_time = None
+        self.end_time = None
+        self.duration = None
+
+    def to_pod(self):
+        d = copy(self.__dict__)
+        d['uuid'] = str(self.uuid)
+        return d
+
+
+class RunState(object):
+    """
+    Represents the state of a WA run.
+
+    """
+    @staticmethod
+    def from_pod(pod):
+        return RunState()
+
+    def __init__(self):
+        pass
+
+    def to_pod(self):
+        return {}
+
+
+class RunOutput(object):
+
+    @property
+    def logfile(self):
+        return os.path.join(self.basepath, 'run.log')
+
+    @property
+    def metadir(self):
+        return os.path.join(self.basepath, '__meta')
+
+    @property
+    def infofile(self):
+        return os.path.join(self.metadir, 'run_info.json')
+
+    @property
+    def statefile(self):
+        return os.path.join(self.basepath, '.run_state.json')
+
+    def __init__(self, path):
+        self.basepath = path
+        self.info = None
+        self.state = None
+        if (not os.path.isfile(self.statefile) or
+                not os.path.isfile(self.infofile)):
+            msg = '"{}" does not exist or is not a valid WA output directory.'
+            raise ValueError(msg.format(self.basepath))
+        self.reload()
+
+    def reload(self):
+        self.info = RunInfo.from_pod(read_pod(self.infofile))
+        self.state = RunState.from_pod(read_pod(self.statefile))
+
+    def write_info(self):
+        write_pod(self.info.to_pod(), self.infofile)
+
+    def write_state(self):
+        write_pod(self.state.to_pod(), self.statefile)
+
+
+
+def init_wa_output(path, wa_state, force=False):
+    if os.path.exists(path):
+        if force:
+            logger.info('Removing existing output directory.')
+            shutil.rmtree(os.path.abspath(path))
+        else:
+            raise RuntimeError('path exists: {}'.format(path))
+
+    logger.info('Creating output directory.')
+    os.makedirs(path)
+    meta_dir = os.path.join(path, '__meta')
+    os.makedirs(meta_dir)
+    _save_raw_config(meta_dir, wa_state)
+    touch(os.path.join(path, 'run.log'))
+
+    info = RunInfo(
+            run_name=wa_state.run_config.run_name,
+            project=wa_state.run_config.project,
+            project_stage=wa_state.run_config.project_stage,
+           )
+    write_pod(info.to_pod(), os.path.join(meta_dir, 'run_info.json'))
+    
+    with open(os.path.join(path, '.run_state.json'), 'w') as wfh:
+        wfh.write('{}')
+
+    return RunOutput(path)
+
+
+def _save_raw_config(meta_dir, state):
+    raw_config_dir = os.path.join(meta_dir, 'raw_config')
+    os.makedirs(raw_config_dir)
+
+    for i, source in enumerate(state.loaded_config_sources):
+        if not os.path.isfile(source):
+            continue
+        basename = os.path.basename(source)
+        dest_path = os.path.join(raw_config_dir, 'cfg{}-{}'.format(i, basename))
+        shutil.copy(source, dest_path)
+                                     
+                                     
+
diff --git a/wlauto/core/state.py b/wlauto/core/state.py
index 7843a8a6..9071aa93 100644
--- a/wlauto/core/state.py
+++ b/wlauto/core/state.py
@@ -18,11 +18,15 @@ class WAState(object):
         self.run_config = RunConfiguration()
         self.plugin_cache = PluginCache()
         self.jobs_config = JobGenerator(self.plugin_cache)
-
+        self.loaded_config_sources = []
         self._config_parser = ConfigParser()
 
     def load_config_file(self, filepath):
         self._config_parser.load_from_path(self, filepath)
+        self.loaded_config_sources.append(filepath)
 
     def load_config(self, values, source, wrap_exceptions=True):
         self._config_parser.load(self, values, source)
+        self.loaded_config_sources.append(source)
+
+
diff --git a/wlauto/utils/misc.py b/wlauto/utils/misc.py
index 3f6d7e8b..368bd30a 100644
--- a/wlauto/utils/misc.py
+++ b/wlauto/utils/misc.py
@@ -593,3 +593,8 @@ def merge_dicts_simple(base, other):
     for key, value in (base or {}).iteritems():
         result[key] = merge_config_values(result.get(key), value)
     return result
+
+
+def touch(path):
+    with open(path, 'w'):
+        pass
diff --git a/wlauto/utils/serializer.py b/wlauto/utils/serializer.py
index eb2d893e..d03c5cdd 100644
--- a/wlauto/utils/serializer.py
+++ b/wlauto/utils/serializer.py
@@ -228,6 +228,16 @@ def read_pod(source, fmt=None):
         message = 'source must be a path or an open file handle; got {}'
         raise ValueError(message.format(type(source)))
 
+def write_pod(pod, dest, fmt=None):
+    if isinstance(dest, basestring):
+        with open(dest, 'w') as wfh:
+            return _write_pod(pod, wfh, fmt)
+    elif hasattr(dest, 'write') and (hasattr(dest, 'name') or fmt):
+        return _write_pod(pod, dest, fmt)
+    else:
+        message = 'dest must be a path or an open file handle; got {}'
+        raise ValueError(message.format(type(dest)))
+
 
 def dump(o, wfh, fmt='json', *args, **kwargs):
     serializer = {'yaml': yaml,
@@ -256,6 +266,17 @@ def _read_pod(fh, fmt=None):
     else:
         raise ValueError('Unknown format "{}": {}'.format(fmt, getattr(fh, 'name', '<none>')))
 
+def _write_pod(pod, wfh, fmt=None):
+    if fmt is None:
+        fmt = os.path.splitext(wfh.name)[1].lower().strip('.')
+    if fmt == 'yaml':
+        return yaml.dump(pod, wfh)
+    elif fmt == 'json':
+        return json.dump(pod, wfh)
+    elif fmt == 'py':
+        raise ValueError('Serializing to Python is not supported')
+    else:
+        raise ValueError('Unknown format "{}": {}'.format(fmt, getattr(wfh, 'name', '<none>')))
 
 def is_pod(obj):
     return type(obj) in POD_TYPES

From 390e9ca78a30ed9c296d2d7d47c38da7117db038 Mon Sep 17 00:00:00 2001
From: Sergei Trofimov <sergei.trofimov@arm.com>
Date: Thu, 16 Feb 2017 11:02:22 +0000
Subject: [PATCH 4/8] Generating jobs.

---
 wlauto/commands/run.py                        |  22 ++-
 wlauto/core/command.py                        |   2 +-
 wlauto/core/configuration/configuration.py    | 159 ++++++++++--------
 wlauto/core/configuration/default.py          |   4 +-
 .../{state.py => configuration/manager.py}    |  28 ++-
 wlauto/core/device_manager.py                 |  10 +-
 wlauto/core/entry_point.py                    |  10 +-
 wlauto/core/execution.py                      |  40 ++++-
 wlauto/core/output.py                         |  41 +++++
 wlauto/tests/test_parsers.py                  |  24 +--
 wlauto/utils/types.py                         |  46 ++---
 11 files changed, 255 insertions(+), 131 deletions(-)
 rename wlauto/core/{state.py => configuration/manager.py} (54%)

diff --git a/wlauto/commands/run.py b/wlauto/commands/run.py
index 9eb43f92..12038b1e 100644
--- a/wlauto/commands/run.py
+++ b/wlauto/commands/run.py
@@ -25,6 +25,7 @@ from wlauto.core.configuration import RunConfiguration
 from wlauto.core.configuration.parsers import AgendaParser, ConfigParser
 from wlauto.core.execution import Executor
 from wlauto.core.output import init_wa_output
+from wlauto.core.version import get_wa_version
 from wlauto.exceptions import NotFoundError, ConfigError
 from wlauto.utils.log import add_log_file
 from wlauto.utils.types import toggle_set
@@ -74,23 +75,26 @@ class RunCommand(Command):
                                  This option may be specified multiple times.
                                  """)
 
-    def execute(self, state, args):
-        output = self.set_up_output_directory(state, args)
+    def execute(self, config, args):
+        output = self.set_up_output_directory(config, args)
         add_log_file(output.logfile)
 
+        self.logger.debug('Version: {}'.format(get_wa_version()))
+        self.logger.debug('Command Line: {}'.format(' '.join(sys.argv)))
+
         disabled_instruments = toggle_set(["~{}".format(i) 
                                            for i in args.instruments_to_disable])
-        state.jobs_config.disable_instruments(disabled_instruments)
-        state.jobs_config.only_run_ids(args.only_run_ids)
+        config.jobs_config.disable_instruments(disabled_instruments)
+        config.jobs_config.only_run_ids(args.only_run_ids)
 
         parser = AgendaParser()
         if os.path.isfile(args.agenda):
-            parser.load_from_path(state, args.agenda)
+            parser.load_from_path(config, args.agenda)
         else:
             try:
                 pluginloader.get_plugin_class(args.agenda, kind='workload')
                 agenda = {'workloads': [{'name': args.agenda}]}
-                parser.load(state, agenda, 'CMDLINE_ARGS')
+                parser.load(config, agenda, 'CMDLINE_ARGS')
             except NotFoundError:
                 msg = 'Agenda file "{}" does not exist, and there no workload '\
                       'with that name.\nYou can get a list of available '\
@@ -98,16 +102,16 @@ class RunCommand(Command):
                 raise ConfigError(msg.format(args.agenda))
 
         executor = Executor()
-        executor.execute(state, output)
+        executor.execute(config, output)
 
-    def set_up_output_directory(self, state, args):
+    def set_up_output_directory(self, config, args):
         if args.output_directory:
             output_directory = args.output_directory
         else:
             output_directory = settings.default_output_directory
         self.logger.debug('Using output directory: {}'.format(output_directory))
         try:
-            return init_wa_output(output_directory, state, args.force)
+            return init_wa_output(output_directory, config, args.force)
         except RuntimeError as e:
             if  'path exists' in str(e):
                 msg = 'Output directory "{}" exists.\nPlease specify another '\
diff --git a/wlauto/core/command.py b/wlauto/core/command.py
index 4cbf424e..4b2a7d93 100644
--- a/wlauto/core/command.py
+++ b/wlauto/core/command.py
@@ -70,7 +70,7 @@ class Command(Plugin):
         """
         Execute this command.
 
-        :state: An initialized ``WAState`` that contains the current state of
+        :state: An initialized ``ConfigManager`` that contains the current state of
                 WA exeuction up to that point (processed configuraition, loaded
                 plugins, etc).
         :args: An ``argparse.Namespace`` containing command line arguments (as returned by
diff --git a/wlauto/core/configuration/configuration.py b/wlauto/core/configuration/configuration.py
index 3e7ec323..0fb6b5cf 100644
--- a/wlauto/core/configuration/configuration.py
+++ b/wlauto/core/configuration/configuration.py
@@ -17,7 +17,7 @@ import re
 from copy import copy
 from collections import OrderedDict, defaultdict
 
-from wlauto.exceptions import ConfigError
+from wlauto.exceptions import ConfigError, NotFoundError
 from wlauto.utils.misc import (get_article, merge_config_values)
 from wlauto.utils.types import (identifier, integer, boolean,
                                 list_of_strings, toggle_set,
@@ -489,6 +489,15 @@ class CpuFreqParameters(object):
 #####################
 
 
+def _to_pod(cfg_point, value):
+    if is_pod(value):
+        return value
+    if hasattr(cfg_point.kind, 'to_pod'):
+        return value.to_pod()
+    msg = '{} value "{}" is not serializable'
+    raise ValueError(msg.format(cfg_point.name, value))
+
+
 class Configuration(object):
 
     config_points = []
@@ -498,16 +507,17 @@ class Configuration(object):
     configuration = {cp.name: cp for cp in config_points}
 
     @classmethod
-    # pylint: disable=unused-argument
-    def from_pod(cls, pod, plugin_cache):
+    def from_pod(cls, pod):
         instance = cls()
-        for name, cfg_point in cls.configuration.iteritems():
+        for cfg_point in cls.config_points:
             if name in pod:
-                cfg_point.set_value(instance, pod.pop(name))
+                value = pod.pop(name)
+                if hasattr(cfg_point.kind, 'from_pod'):
+                    value = cfg_point.kind.from_pod(value)
+                cfg_point.set_value(instance, value)
         if pod:
             msg = 'Invalid entry(ies) for "{}": "{}"'
-            raise ConfigError(msg.format(cls.name, '", "'.join(pod.keys())))
-        instance.validate()
+            raise ValueError(msg.format(cls.name, '", "'.join(pod.keys())))
         return instance
 
     def __init__(self):
@@ -531,17 +541,17 @@ class Configuration(object):
 
     def to_pod(self):
         pod = {}
-        for cfg_point_name in self.configuration.iterkeys():
-            value = getattr(self, cfg_point_name, None)
+        for cfg_point in self.configuration.itervalues():
+            value = getattr(self, cfg_point.name, None)
             if value is not None:
-                pod[cfg_point_name] = value
+                pod[cfg_point.name] = _to_pod(cfg_point, value)
         return pod
 
 
 # This configuration for the core WA framework
-class WAConfiguration(Configuration):
+class MetaConfiguration(Configuration):
 
-    name = "WA Configuration"
+    name = "Meta Configuration"
 
     plugin_packages = [
         'wlauto.commands',
@@ -617,7 +627,7 @@ class WAConfiguration(Configuration):
         return os.path.join(self.user_directory, 'config.yaml')
 
     def __init__(self, environ):
-        super(WAConfiguration, self).__init__()
+        super(MetaConfiguration, self).__init__()
         user_directory = environ.pop('WA_USER_DIRECTORY', '')
         if user_directory:
             self.set('user_directory', user_directory)
@@ -748,39 +758,30 @@ class RunConfiguration(Configuration):
         selected device.
         """
         # pylint: disable=no-member
-        self.device_config = plugin_cache.get_plugin_config(self.device_config,
+        if self.device is None:
+            msg = 'Attemting to merge device config with unspecified device'
+            raise RuntimeError(msg)
+        self.device_config = plugin_cache.get_plugin_config(self.device,
                                                             generic_name="device_config")
 
     def to_pod(self):
         pod = super(RunConfiguration, self).to_pod()
-        pod['device_config'] = self.device_config
+        pod['device_config'] = dict(self.device_config or {})
         return pod
 
-    # pylint: disable=no-member
     @classmethod
-    def from_pod(cls, pod, plugin_cache):
-        try:
-            device_config = obj_dict(values=pod.pop("device_config"), not_in_dict=['name'])
-        except KeyError as e:
-            msg = 'No value specified for mandatory parameter "{}".'
-            raise ConfigError(msg.format(e.args[0]))
+    def from_pod(cls, pod):
+        meta_pod = {}
+        for cfg_point in cls.meta_data:
+            meta_pod[cfg_point.name] = pod.pop(cfg_point.name, None)
 
-        instance = super(RunConfiguration, cls).from_pod(pod, plugin_cache)
+        instance = super(RunConfiguration, cls).from_pod(pod)
+        for cfg_point in cls.meta_data:
+            cfg_point.set_value(instance, meta_pod[cfg_point.name])
 
-        device_config.name = "device_config"
-        cfg_points = plugin_cache.get_plugin_parameters(instance.device)
-        for entry_name in device_config.iterkeys():
-            if entry_name not in cfg_points.iterkeys():
-                msg = 'Invalid entry "{}" for device "{}".'
-                raise ConfigError(msg.format(entry_name, instance.device, cls.name))
-            else:
-                cfg_points[entry_name].validate(device_config)
-
-        instance.device_config = device_config
         return instance
 
 
-# This is the configuration for WA jobs
 class JobSpec(Configuration):
 
     name = "Job Spec"
@@ -795,6 +796,23 @@ class JobSpec(Configuration):
                            description='''
                            The name of the workload to run.
                            '''),
+        ConfigurationPoint('workload_parameters', kind=obj_dict,
+                           aliases=["params", "workload_params"],
+                           description='''
+                           Parameter to be passed to the workload
+                           '''),
+        ConfigurationPoint('runtime_parameters', kind=obj_dict,
+                           aliases=["runtime_params"],
+                           description='''
+                           Runtime parameters to be set prior to running
+                           the workload.
+                           '''),
+        ConfigurationPoint('boot_parameters', kind=obj_dict,
+                           aliases=["boot_params"],
+                           description='''
+                           Parameters to be used when rebooting the target
+                           prior to running the workload.
+                           '''),
         ConfigurationPoint('label', kind=str,
                            description='''
                            Similar to IDs but do not have the uniqueness restriction.
@@ -823,14 +841,23 @@ class JobSpec(Configuration):
     ]
     configuration = {cp.name: cp for cp in config_points}
 
+    @classmethod
+    def from_pod(cls, pod):
+        job_id = pod.pop('id')
+        instance = super(JobSpec, cls).from_pod(pod)
+        instance['id'] = job_id
+        return instance
+
     def __init__(self):
         super(JobSpec, self).__init__()
         self.to_merge = defaultdict(OrderedDict)
         self._sources = []
         self.id = None
-        self.workload_parameters = None
-        self.runtime_parameters = None
-        self.boot_parameters = None
+
+    def to_pod(self):
+        pod = super(JobSpec, self).to_pod()
+        pod['id'] = self.id
+        return pod
 
     def update_config(self, source, check_mandatory=True):
         self._sources.append(source)
@@ -848,7 +875,6 @@ class JobSpec(Configuration):
                     msg = 'Error in {}:\n\t{}'
                     raise ConfigError(msg.format(source.name, e.message))
 
-
     def merge_workload_parameters(self, plugin_cache):
         # merge global generic and specific config
         workload_params = plugin_cache.get_plugin_config(self.workload_name,
@@ -876,7 +902,10 @@ class JobSpec(Configuration):
 
         # Order global runtime parameters
         runtime_parameters = OrderedDict()
-        global_runtime_params = plugin_cache.get_plugin_config("runtime_parameters")
+        try:
+            global_runtime_params = plugin_cache.get_plugin_config("runtime_parameters")
+        except NotFoundError:
+            global_runtime_params = {}
         for source in plugin_cache.sources:
             runtime_parameters[source] = global_runtime_params[source]
 
@@ -890,27 +919,6 @@ class JobSpec(Configuration):
     def finalize(self):
         self.id = "-".join([source.config['id'] for source in self._sources[1:]])  # ignore first id, "global"
 
-    def to_pod(self):
-        pod = super(JobSpec, self).to_pod()
-        pod['workload_parameters'] = self.workload_parameters
-        pod['runtime_parameters'] = self.runtime_parameters
-        pod['boot_parameters'] = self.boot_parameters
-        return pod
-
-    @classmethod
-    def from_pod(cls, pod, plugin_cache):
-        try:
-            workload_parameters = pod['workload_parameters']
-            runtime_parameters = pod['runtime_parameters']
-            boot_parameters = pod['boot_parameters']
-        except KeyError as e:
-            msg = 'No value specified for mandatory parameter "{}}".'
-            raise ConfigError(msg.format(e.args[0]))
-
-        instance = super(JobSpec, cls).from_pod(pod, plugin_cache)
-
-        # TODO: validate parameters and construct the rest of the instance
-
 
 # This is used to construct the list of Jobs WA will run
 class JobGenerator(object):
@@ -970,6 +978,7 @@ class JobGenerator(object):
         self.ids_to_run = ids
 
     def generate_job_specs(self, target_manager):
+        specs = []
         for leaf in self.root_node.leaves():
             workload_entries = leaf.workload_entries
             sections = [leaf]
@@ -978,18 +987,23 @@ class JobGenerator(object):
                 sections.insert(0, ancestor)
 
             for workload_entry in workload_entries:
-                job_spec = create_job_spec(workload_entry, sections, target_manager)
-                for job_id in self.ids_to_run:
-                    if job_id in job_spec.id:
-                        break
-                else:
-                    continue
+                job_spec = create_job_spec(workload_entry, sections, 
+                                           target_manager, self.plugin_cache,
+                                           self.disabled_instruments)
+                if self.ids_to_run:
+                    for job_id in self.ids_to_run:
+                        if job_id in job_spec.id:
+                            break
+                    else:
+                        continue
                 self.update_enabled_instruments(job_spec.instrumentation.values())
-                yield  job_spec
+                specs.append(job_spec)
+        return specs
 
 
 
-def create_job_spec(workload_entry, sections, target_manager):
+def create_job_spec(workload_entry, sections, target_manager, plugin_cache,
+                    disabled_instruments):
     job_spec = JobSpec()
 
     # PHASE 2.1: Merge general job spec configuration
@@ -998,18 +1012,17 @@ def create_job_spec(workload_entry, sections, target_manager):
     job_spec.update_config(workload_entry, check_mandatory=False)
 
     # PHASE 2.2: Merge global, section and workload entry "workload_parameters"
-    job_spec.merge_workload_parameters(self.plugin_cache)
-    target_manager.static_runtime_parameter_validation(job_spec.runtime_parameters)
+    job_spec.merge_workload_parameters(plugin_cache)
 
     # TODO: PHASE 2.3: Validate device runtime/boot paramerers
-    job_spec.merge_runtime_parameters(self.plugin_cache, target_manager)
+    job_spec.merge_runtime_parameters(plugin_cache, target_manager)
     target_manager.validate_runtime_parameters(job_spec.runtime_parameters)
 
     # PHASE 2.4: Disable globally disabled instrumentation
-    job_spec.set("instrumentation", self.disabled_instruments)
+    job_spec.set("instrumentation", disabled_instruments)
     job_spec.finalize()
 
     return job_spec
 
 
-settings = WAConfiguration(os.environ)
+settings = MetaConfiguration(os.environ)
diff --git a/wlauto/core/configuration/default.py b/wlauto/core/configuration/default.py
index 0d520ca9..5145a6b4 100644
--- a/wlauto/core/configuration/default.py
+++ b/wlauto/core/configuration/default.py
@@ -1,4 +1,4 @@
-from wlauto.core.configuration.configuration import WAConfiguration, RunConfiguration
+from wlauto.core.configuration.configuration import MetaConfiguration, RunConfiguration
 from wlauto.core.configuration.plugin_cache import PluginCache
 from wlauto.utils.serializer import yaml
 from wlauto.utils.doc import strip_inlined_text
@@ -33,7 +33,7 @@ def _format_instruments(output):
 
 def generate_default_config(path):
     with open(path, 'w') as output:
-        for param in WAConfiguration.config_points + RunConfiguration.config_points:
+        for param in MetaConfiguration.config_points + RunConfiguration.config_points:
             entry = {param.name: param.default}
             comment = _format_yaml_comment(param)
             output.writelines(comment)
diff --git a/wlauto/core/state.py b/wlauto/core/configuration/manager.py
similarity index 54%
rename from wlauto/core/state.py
rename to wlauto/core/configuration/manager.py
index 9071aa93..ef41be9f 100644
--- a/wlauto/core/state.py
+++ b/wlauto/core/configuration/manager.py
@@ -1,10 +1,29 @@
-from wlauto.core.configuration.configuration import (RunConfiguration,
+from wlauto.core.configuration.configuration import (MetaConfiguration,
+                                                     RunConfiguration,
                                                      JobGenerator, settings)
 from wlauto.core.configuration.parsers import ConfigParser
 from wlauto.core.configuration.plugin_cache import PluginCache
 
 
-class WAState(object):
+class CombinedConfig(object):
+
+    @staticmethod
+    def from_pod(pod):
+        instance = CombinedConfig()
+        instance.settings = MetaConfiguration.from_pod(pod.get('setttings', {}))
+        instance.run_config = RunConfiguration.from_pod(pod.get('run_config', {}))
+        return instance
+
+    def __init__(self, settings=None, run_config=None):
+        self.settings = settings
+        self.run_config = run_config
+
+    def to_pod(self):
+        return {'settings': self.settings.to_pod(),
+                'run_config': self.run_config.to_pod()}
+
+
+class ConfigManager(object):
     """
     Represents run-time state of WA. Mostly used as a container for loaded 
     configuration and discovered plugins.
@@ -20,6 +39,8 @@ class WAState(object):
         self.jobs_config = JobGenerator(self.plugin_cache)
         self.loaded_config_sources = []
         self._config_parser = ConfigParser()
+        self._job_specs = []
+        self.jobs = []
 
     def load_config_file(self, filepath):
         self._config_parser.load_from_path(self, filepath)
@@ -29,4 +50,7 @@ class WAState(object):
         self._config_parser.load(self, values, source)
         self.loaded_config_sources.append(source)
 
+    def finalize(self):
+        self.run_config.merge_device_config(self.plugin_cache)
+        return CombinedConfig(self.settings, self.run_config)
 
diff --git a/wlauto/core/device_manager.py b/wlauto/core/device_manager.py
index 3e8d9296..ca0c3a68 100644
--- a/wlauto/core/device_manager.py
+++ b/wlauto/core/device_manager.py
@@ -26,7 +26,8 @@ class TargetInfo(object):
         instance.os_version = pod['os_version']
         instance.abi = pod['abi']
         instance.is_rooted = pod['is_rooted']
-        instance.kernel_version = KernelVersion(pod['kernel_version'])
+        instance.kernel_version = KernelVersion(pod['kernel_release'], 
+                                                pod['kernel_version'])
         instance.kernel_config = KernelConfig(pod['kernel_config'])
 
         if pod["target"] == "AndroidTarget":
@@ -69,15 +70,16 @@ class TargetInfo(object):
 
     def to_pod(self):
         pod = {}
-        pod['target'] = self.target.__class__.__name__
+        pod['target'] = self.target
         pod['abi'] = self.abi
-        pod['cpuinfo'] = self.cpuinfo.text
+        pod['cpuinfo'] = self.cpuinfo.sections
         pod['os'] = self.os
         pod['os_version'] = self.os_version
         pod['abi'] = self.abi
         pod['is_rooted'] = self.is_rooted
+        pod['kernel_release'] = self.kernel_version.release
         pod['kernel_version'] = self.kernel_version.version
-        pod['kernel_config'] = self.kernel_config.text
+        pod['kernel_config'] = dict(self.kernel_config.iteritems())
 
         if self.target == "AndroidTarget":
             pod['screen_resolution'] = self.screen_resolution
diff --git a/wlauto/core/entry_point.py b/wlauto/core/entry_point.py
index d58f1cc8..d3dea0f0 100644
--- a/wlauto/core/entry_point.py
+++ b/wlauto/core/entry_point.py
@@ -24,8 +24,8 @@ import warnings
 from wlauto.core import pluginloader
 from wlauto.core.command import init_argument_parser
 from wlauto.core.configuration import settings
+from wlauto.core.configuration.manager import ConfigManager
 from wlauto.core.host import init_user_directory
-from wlauto.core.state import WAState
 from wlauto.exceptions import WAError, DevlibError, ConfigError
 from wlauto.utils.doc import format_body
 from wlauto.utils.log import init_logging
@@ -46,7 +46,7 @@ def load_commands(subparsers):
 
 
 def main():
-    state = WAState()
+    config = ConfigManager()
 
     if not os.path.exists(settings.user_directory):
         init_user_directory()
@@ -68,16 +68,16 @@ def main():
 
         settings.set("verbosity", args.verbose)
 
-        state.load_config_file(settings.user_config_file)
+        config.load_config_file(settings.user_config_file)
         for config_file in args.config:
             if not os.path.exists(config_file):
                 raise ConfigError("Config file {} not found".format(config_file))
-            state.load_config_file(config_file)
+            config.load_config_file(config_file)
 
         init_logging(settings.verbosity)
 
         command = commands[args.command]
-        sys.exit(command.execute(state, args))
+        sys.exit(command.execute(config, args))
 
     except KeyboardInterrupt:
         logging.info('Got CTRL-C. Aborting.')
diff --git a/wlauto/core/execution.py b/wlauto/core/execution.py
index 30477984..95d67602 100644
--- a/wlauto/core/execution.py
+++ b/wlauto/core/execution.py
@@ -51,6 +51,7 @@ import wlauto.core.signal as signal
 from wlauto.core import instrumentation
 from wlauto.core import pluginloader
 from wlauto.core.configuration import settings
+from wlauto.core.device_manager import TargetInfo
 from wlauto.core.plugin import Artifact
 from wlauto.core.resolver import ResourceResolver
 from wlauto.core.result import ResultManager, IterationResult, RunResult
@@ -213,6 +214,30 @@ def _check_artifact_path(path, rootpath):
     return full_path
 
 
+
+class FakeTargetManager(object):
+
+    def __init__(self, name, config):
+        self.device_name = name
+        self.device_config = config
+
+        from devlib import LocalLinuxTarget
+        self.target = LocalLinuxTarget({'unrooted': True})
+        
+    def get_target_info(self):
+        return TargetInfo(self.target)
+
+    def validate_runtime_parameters(self, params):
+        pass
+
+    def merge_runtime_parameters(self, params):
+        pass
+
+
+def init_target_manager(config):
+    return FakeTargetManager(config.device, config.device_config)
+
+
 class Executor(object):
     """
     The ``Executor``'s job is to set up the execution context and pass to a
@@ -237,14 +262,14 @@ class Executor(object):
         self.device = None
         self.context = None
 
-    def execute(self, state, output):
+    def execute(self, config_manager, output):
         """
         Execute the run specified by an agenda. Optionally, selectors may be
         used to only selecute a subset of the specified agenda.
 
         Params::
 
-            :state: a ``WAState`` containing processed configuraiton
+            :state: a ``ConfigManager`` containing processed configuraiton
             :output: an initialized ``RunOutput`` that will be used to
                      store the results.
 
@@ -253,8 +278,17 @@ class Executor(object):
         signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED)
 
         self.logger.info('Initializing run')
+        self.logger.debug('Finalizing run configuration.')
+        config = config_manager.finalize()
+        output.write_config(config)
 
-        self.logger.debug('Loading run configuration.')
+        self.logger.info('Connecting to target')
+        target_manager = init_target_manager(config.run_config)
+        output.write_target_info(target_manager.get_target_info())
+
+        self.logger.info('Generationg jobs')
+        job_specs = config_manager.jobs_config.generate_job_specs(target_manager)
+        output.write_job_specs(job_specs)
 
     def old_exec(self, agenda, selectors={}):
         self.config.set_agenda(agenda, selectors)
diff --git a/wlauto/core/output.py b/wlauto/core/output.py
index 183ce8ed..1047bf2f 100644
--- a/wlauto/core/output.py
+++ b/wlauto/core/output.py
@@ -6,6 +6,9 @@ import sys
 import uuid
 from copy import copy
 
+from wlauto.core.configuration.configuration import JobSpec
+from wlauto.core.configuration.manager import ConfigManager
+from wlauto.core.device_manager import TargetInfo
 from wlauto.utils.misc import touch
 from wlauto.utils.serializer import write_pod, read_pod
 
@@ -78,6 +81,18 @@ class RunOutput(object):
     def statefile(self):
         return os.path.join(self.basepath, '.run_state.json')
 
+    @property
+    def configfile(self):
+        return os.path.join(self.metadir, 'config.json')
+
+    @property
+    def targetfile(self):
+        return os.path.join(self.metadir, 'target_info.json')
+
+    @property
+    def jobsfile(self):
+        return os.path.join(self.metadir, 'jobs.json')
+
     def __init__(self, path):
         self.basepath = path
         self.info = None
@@ -98,6 +113,32 @@ class RunOutput(object):
     def write_state(self):
         write_pod(self.state.to_pod(), self.statefile)
 
+    def write_config(self, config):
+        write_pod(config.to_pod(), self.configfile)
+
+    def read_config(self):
+        if not os.path.isfile(self.configfile):
+            return None
+        return ConfigManager.from_pod(read_pod(self.configfile))
+
+    def write_target_info(self, ti):
+        write_pod(ti.to_pod(), self.targetfile)
+
+    def read_config(self):
+        if not os.path.isfile(self.targetfile):
+            return None
+        return TargetInfo.from_pod(read_pod(self.targetfile))
+
+    def write_job_specs(self, job_specs):
+        job_specs[0].to_pod()
+        js_pod = {'jobs': [js.to_pod() for js in job_specs]}
+        write_pod(js_pod, self.jobsfile)
+
+    def read_job_specs(self):
+        if not os.path.isfile(self.jobsfile):
+            return None
+        pod = read_pod(self.jobsfile)
+        return [JobSpec.from_pod(jp) for jp in pod['jobs']]
 
 
 def init_wa_output(path, wa_state, force=False):
diff --git a/wlauto/tests/test_parsers.py b/wlauto/tests/test_parsers.py
index 763d2c7f..77d8bba4 100644
--- a/wlauto/tests/test_parsers.py
+++ b/wlauto/tests/test_parsers.py
@@ -9,7 +9,7 @@ from wlauto.exceptions import ConfigError
 from wlauto.core.configuration.parsers import *  # pylint: disable=wildcard-import
 from wlauto.core.configuration.parsers import _load_file, _collect_valid_id, _resolve_params_alias
 from wlauto.core.configuration import RunConfiguration, JobGenerator, PluginCache, ConfigurationPoint
-from wlauto.core.configuration.configuration import WAConfiguration
+from wlauto.core.configuration.configuration import MetaConfiguration
 from wlauto.utils.types import toggle_set, reset_counter
 
 
@@ -129,8 +129,8 @@ class TestFunctions(TestCase):
 class TestConfigParser(TestCase):
 
     def test_error_cases(self):
-        wa_config = Mock(spec=WAConfiguration)
-        wa_config.configuration = WAConfiguration.configuration
+        wa_config = Mock(spec=MetaConfiguration)
+        wa_config.configuration = MetaConfiguration.configuration
         run_config = Mock(spec=RunConfiguration)
         run_config.configuration = RunConfiguration.configuration
         config_parser = ConfigParser(wa_config,
@@ -155,8 +155,8 @@ class TestConfigParser(TestCase):
                                "Unit test")
 
     def test_config_points(self):
-        wa_config = Mock(spec=WAConfiguration)
-        wa_config.configuration = WAConfiguration.configuration
+        wa_config = Mock(spec=MetaConfiguration)
+        wa_config.configuration = MetaConfiguration.configuration
 
         run_config = Mock(spec=RunConfiguration)
         run_config.configuration = RunConfiguration.configuration
@@ -211,8 +211,8 @@ class TestAgendaParser(TestCase):
 
     # Tests Phase 1 & 2
     def test_valid_structures(self):
-        wa_config = Mock(spec=WAConfiguration)
-        wa_config.configuration = WAConfiguration.configuration
+        wa_config = Mock(spec=MetaConfiguration)
+        wa_config.configuration = MetaConfiguration.configuration
         run_config = Mock(spec=RunConfiguration)
         run_config.configuration = RunConfiguration.configuration
         jobs_config = Mock(spec=JobGenerator)
@@ -241,8 +241,8 @@ class TestAgendaParser(TestCase):
 
     # Test Phase 3
     def test_id_collection(self):
-        wa_config = Mock(spec=WAConfiguration)
-        wa_config.configuration = WAConfiguration.configuration
+        wa_config = Mock(spec=MetaConfiguration)
+        wa_config.configuration = MetaConfiguration.configuration
         run_config = Mock(spec=RunConfiguration)
         run_config.configuration = RunConfiguration.configuration
         jobs_config = Mock(spec=JobGenerator)
@@ -267,8 +267,8 @@ class TestAgendaParser(TestCase):
 
     # Test Phase 4
     def test_id_assignment(self):
-        wa_config = Mock(spec=WAConfiguration)
-        wa_config.configuration = WAConfiguration.configuration
+        wa_config = Mock(spec=MetaConfiguration)
+        wa_config.configuration = MetaConfiguration.configuration
         run_config = Mock(spec=RunConfiguration)
         run_config.configuration = RunConfiguration.configuration
         jobs_config = Mock(spec=JobGenerator)
@@ -362,7 +362,7 @@ class TestAgendaParser(TestCase):
 
 
 class TestCommandLineArgsParser(TestCase):
-    wa_config = Mock(spec=WAConfiguration)
+    wa_config = Mock(spec=MetaConfiguration)
     run_config = Mock(spec=RunConfiguration)
     jobs_config = Mock(spec=JobGenerator)
 
diff --git a/wlauto/utils/types.py b/wlauto/utils/types.py
index c23f8215..8e4c5e65 100644
--- a/wlauto/utils/types.py
+++ b/wlauto/utils/types.py
@@ -322,7 +322,8 @@ class prioritylist(object):
             raise ValueError('Invalid index {}'.format(index))
         current_global_offset = 0
         priority_counts = {priority: count for (priority, count) in
-                           zip(self.priorities, [len(self.elements[p]) for p in self.priorities])}
+                           zip(self.priorities, [len(self.elements[p]) 
+                                                 for p in self.priorities])}
         for priority in self.priorities:
             if not index_range:
                 break
@@ -351,13 +352,9 @@ class toggle_set(set):
     and ``cherries`` but disables ``oranges``.
     """
 
-    def merge_with(self, other):
-        new_self = copy(self)
-        return toggle_set.merge(other, new_self)
-
-    def merge_into(self, other):
-        other = copy(other)
-        return toggle_set.merge(self, other)
+    @staticmethod
+    def from_pod(pod):
+        return toggle_set(pod)
 
     @staticmethod
     def merge(source, dest):
@@ -372,6 +369,14 @@ class toggle_set(set):
                 dest.add(item)
         return dest
 
+    def merge_with(self, other):
+        new_self = copy(self)
+        return toggle_set.merge(other, new_self)
+
+    def merge_into(self, other):
+        other = copy(other)
+        return toggle_set.merge(self, other)
+
     def values(self):
         """
         returns a list of enabled items.
@@ -396,6 +401,9 @@ class toggle_set(set):
                 conflicts.append(item)
         return conflicts
 
+    def to_pod(self):
+        return list(self.values())
+
 class ID(str):
 
     def merge_with(self, other):
@@ -411,11 +419,19 @@ class obj_dict(MutableMapping):
     as an attribute.
 
     :param not_in_dict: A list of keys that can only be accessed as attributes
+
     """
 
-    def __init__(self, not_in_dict=None, values={}):
+    @staticmethod
+    def from_pod(pod):
+        return obj_dict(pod)
+
+    def __init__(self, values=None, not_in_dict=None):
+        self.__dict__['dict'] = dict(values or {})
         self.__dict__['not_in_dict'] = not_in_dict if not_in_dict is not None else []
-        self.__dict__['dict'] = dict(values)
+
+    def to_pod(self):
+        return self.__dict__['dict']
 
     def __getitem__(self, key):
         if key in self.not_in_dict:
@@ -457,13 +473,3 @@ class obj_dict(MutableMapping):
             return self.__dict__['dict'][name]
         else:
             raise AttributeError("No such attribute: " + name)
-
-    def to_pod(self):
-        return self.__dict__.copy()
-
-    @staticmethod
-    def from_pod(pod):
-        instance = ObjDict()
-        for k, v in pod.iteritems():
-            instance[k] = v
-        return instance

From 18d001fd768ff65fa59ce7290ae8e875bcac6630 Mon Sep 17 00:00:00 2001
From: Sergei Trofimov <sergei.trofimov@arm.com>
Date: Tue, 21 Feb 2017 10:55:21 +0000
Subject: [PATCH 5/8] Instrument intialization, job generation + bits

---
 wlauto/commands/run.py                     |   1 +
 wlauto/core/configuration/configuration.py |  16 +-
 wlauto/core/configuration/manager.py       | 163 ++++++++++++++++++++-
 wlauto/core/configuration/parsers.py       |   4 +-
 wlauto/core/configuration/plugin_cache.py  |   5 +
 wlauto/core/execution.py                   |  37 ++++-
 wlauto/core/instrumentation.py             |   4 +-
 wlauto/core/output.py                      |   4 +
 wlauto/core/plugin.py                      |   6 +-
 wlauto/core/resolver.py                    |   2 +-
 wlauto/instrumentation/misc/__init__.py    |   4 +-
 wlauto/utils/types.py                      |   1 +
 12 files changed, 228 insertions(+), 19 deletions(-)

diff --git a/wlauto/commands/run.py b/wlauto/commands/run.py
index 12038b1e..57a78819 100644
--- a/wlauto/commands/run.py
+++ b/wlauto/commands/run.py
@@ -90,6 +90,7 @@ class RunCommand(Command):
         parser = AgendaParser()
         if os.path.isfile(args.agenda):
             parser.load_from_path(config, args.agenda)
+            shutil.copy(args.agenda, output.raw_config_dir)
         else:
             try:
                 pluginloader.get_plugin_class(args.agenda, kind='workload')
diff --git a/wlauto/core/configuration/configuration.py b/wlauto/core/configuration/configuration.py
index 0fb6b5cf..9b043a0d 100644
--- a/wlauto/core/configuration/configuration.py
+++ b/wlauto/core/configuration/configuration.py
@@ -541,10 +541,9 @@ class Configuration(object):
 
     def to_pod(self):
         pod = {}
-        for cfg_point in self.configuration.itervalues():
+        for cfg_point in self.config_points:
             value = getattr(self, cfg_point.name, None)
-            if value is not None:
-                pod[cfg_point.name] = _to_pod(cfg_point, value)
+            pod[cfg_point.name] = _to_pod(cfg_point, value)
         return pod
 
 
@@ -848,6 +847,16 @@ class JobSpec(Configuration):
         instance['id'] = job_id
         return instance
 
+    @property
+    def section_id(self):
+        if self.id is not None:
+            self.id.rsplit('-', 1)[0]
+
+    @property
+    def workload_id(self):
+        if self.id is not None:
+            self.id.rsplit('-', 1)[-1]
+
     def __init__(self):
         super(JobSpec, self).__init__()
         self.to_merge = defaultdict(OrderedDict)
@@ -1001,7 +1010,6 @@ class JobGenerator(object):
         return specs
 
 
-
 def create_job_spec(workload_entry, sections, target_manager, plugin_cache,
                     disabled_instruments):
     job_spec = JobSpec()
diff --git a/wlauto/core/configuration/manager.py b/wlauto/core/configuration/manager.py
index ef41be9f..b8bacd38 100644
--- a/wlauto/core/configuration/manager.py
+++ b/wlauto/core/configuration/manager.py
@@ -1,3 +1,7 @@
+import random
+from itertools import izip_longest, groupby, chain
+
+from wlauto.core import pluginloader
 from wlauto.core.configuration.configuration import (MetaConfiguration,
                                                      RunConfiguration,
                                                      JobGenerator, settings)
@@ -10,7 +14,7 @@ class CombinedConfig(object):
     @staticmethod
     def from_pod(pod):
         instance = CombinedConfig()
-        instance.settings = MetaConfiguration.from_pod(pod.get('setttings', {}))
+        instance.settings = MetaConfiguration.from_pod(pod.get('settings', {}))
         instance.run_config = RunConfiguration.from_pod(pod.get('run_config', {}))
         return instance
 
@@ -23,6 +27,24 @@ class CombinedConfig(object):
                 'run_config': self.run_config.to_pod()}
 
 
+class Job(object):
+
+    def __init__(self, spec, iteration, context):
+        self.spec = spec
+        self.iteration = iteration
+        self.context = context
+        self.status = 'new'
+        self.workload = None
+        self.output = None
+
+    def load(self, target, loader=pluginloader):
+        self.workload = loader.get_workload(self.spec.workload_name,
+                                            target,
+                                            **self.spec.workload_parameters)
+        self.workload.init_resources(self.context)
+        self.workload.validate()
+
+
 class ConfigManager(object):
     """
     Represents run-time state of WA. Mostly used as a container for loaded 
@@ -32,6 +54,26 @@ class ConfigManager(object):
     instance of wA itself.
     """
 
+    @property
+    def enabled_instruments(self):
+        return self.jobs_config.enabled_instruments
+
+    @property
+    def job_specs(self):
+        if not self._jobs_generated:
+            msg = 'Attempting to access job specs before '\
+                  'jobs have been generated'
+            raise RuntimeError(msg)
+        return [j.spec for j in self._jobs]
+
+    @property
+    def jobs(self):
+        if not self._jobs_generated:
+            msg = 'Attempting to access jobs before '\
+                  'they have been generated'
+            raise RuntimeError(msg)
+        return self._jobs
+
     def __init__(self, settings=settings):
         self.settings = settings
         self.run_config = RunConfiguration()
@@ -39,8 +81,9 @@ class ConfigManager(object):
         self.jobs_config = JobGenerator(self.plugin_cache)
         self.loaded_config_sources = []
         self._config_parser = ConfigParser()
-        self._job_specs = []
-        self.jobs = []
+        self._jobs = []
+        self._jobs_generated = False
+        self.agenda = None
 
     def load_config_file(self, filepath):
         self._config_parser.load_from_path(self, filepath)
@@ -50,7 +93,121 @@ class ConfigManager(object):
         self._config_parser.load(self, values, source)
         self.loaded_config_sources.append(source)
 
+    def get_plugin(self, name=None, kind=None, *args, **kwargs):
+        return self.plugin_cache.get_plugin(name, kind, *args, **kwargs)
+
+    def get_instruments(self, target):
+        instruments = []
+        for name in self.enabled_instruments:
+            instruments.append(self.get_plugin(name, kind='instrument', 
+                                               target=target))
+        return instruments
+
     def finalize(self):
+        if not self.agenda:
+            msg = 'Attempting to finalize config before agenda has been set'
+            raise RuntimeError(msg)
         self.run_config.merge_device_config(self.plugin_cache)
         return CombinedConfig(self.settings, self.run_config)
 
+    def generate_jobs(self, context):
+        job_specs = self.jobs_config.generate_job_specs(context.tm)
+        exec_order = self.run_config.execution_order
+        for spec, i in permute_iterations(job_specs, exec_order):
+            job = Job(spec, i, context)
+            job.load(context.tm.target)
+            self._jobs.append(job)
+        self._jobs_generated = True
+
+
+def permute_by_job(specs):
+    """
+    This is that "classic" implementation that executes all iterations of a
+    workload spec before proceeding onto the next spec.
+
+    """
+    for spec in specs:
+        for i in range(1, spec.iterations + 1):
+            yield (spec, i)
+ 
+
+def permute_by_iteration(specs):
+    """
+    Runs the first iteration for all benchmarks first, before proceeding to the
+    next iteration, i.e. A1, B1, C1, A2, B2, C2...  instead of  A1, A1, B1, B2,
+    C1, C2...
+
+    If multiple sections where specified in the agenda, this will run all
+    sections for the first global spec first, followed by all sections for the
+    second spec, etc.
+
+    e.g. given sections X and Y, and global specs A and B, with 2 iterations,
+    this will run
+
+    X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
+
+    """
+    groups = [list(g) for k, g in groupby(specs, lambda s: s.workload_id)]
+
+    all_tuples = []
+    for spec in chain(*groups):
+        all_tuples.append([(spec, i + 1) 
+                           for i in xrange(spec.iterations)])
+    for t in chain(*map(list, izip_longest(*all_tuples))):
+        if t is not None:
+            yield t
+
+
+def permute_by_section(specs):
+    """
+    Runs the first iteration for all benchmarks first, before proceeding to the
+    next iteration, i.e. A1, B1, C1, A2, B2, C2...  instead of  A1, A1, B1, B2,
+    C1, C2...
+
+    If multiple sections where specified in the agenda, this will run all specs
+    for the first section followed by all specs for the seciod section, etc.
+
+    e.g. given sections X and Y, and global specs A and B, with 2 iterations,
+    this will run
+
+    X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
+
+    """
+    groups = [list(g) for k, g in groupby(specs, lambda s: s.section_id)]
+
+    all_tuples = []
+    for spec in chain(*groups):
+        all_tuples.append([(spec, i + 1) 
+                           for i in xrange(spec.iterations)])
+    for t in chain(*map(list, izip_longest(*all_tuples))):
+        if t is not None:
+            yield t
+ 
+
+def permute_randomly(specs):
+    """
+    This will generate a random permutation of specs/iteration tuples.
+
+    """
+    result = []
+    for spec in specs:
+        for i in xrange(1, spec.iterations + 1):
+            result.append((spec, i))
+    random.shuffle(result)
+    for t in result:
+        yield t
+
+
+permute_map = {
+    'by_iteration': permute_by_iteration,
+    'by_job': permute_by_job,
+    'by_section': permute_by_section,
+    'random': permute_randomly,
+}
+
+
+def permute_iterations(specs, exec_order):
+    if exec_order not in permute_map:
+        msg = 'Unknown execution order "{}"; must be in: {}'
+        raise ValueError(msg.format(exec_order, permute_map.keys()))
+    return permute_map[exec_order](specs)
diff --git a/wlauto/core/configuration/parsers.py b/wlauto/core/configuration/parsers.py
index 37624a18..df6d019e 100644
--- a/wlauto/core/configuration/parsers.py
+++ b/wlauto/core/configuration/parsers.py
@@ -95,6 +95,8 @@ class AgendaParser(object):
             self._process_global_workloads(state, global_workloads, wkl_ids)
             self._process_sections(state, sections, sect_ids, wkl_ids)
 
+            state.agenda = source
+
         except (ConfigError, SerializerSyntaxError) as e:
             raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
 
@@ -156,7 +158,7 @@ class AgendaParser(object):
                                                    state.jobs_config)
                 workloads.append(workload)
 
-            section = _construct_valid_entry(section, seen_section_ids, 
+            section = _construct_valid_entry(section, seen_sect_ids, 
                                              "s", state.jobs_config)
             state.jobs_config.add_section(section, workloads)
 
diff --git a/wlauto/core/configuration/plugin_cache.py b/wlauto/core/configuration/plugin_cache.py
index e0d79a41..4c02192d 100644
--- a/wlauto/core/configuration/plugin_cache.py
+++ b/wlauto/core/configuration/plugin_cache.py
@@ -123,6 +123,11 @@ class PluginCache(object):
 
         return config
 
+    def get_plugin(self, name, kind=None, *args, **kwargs):
+        config = self.get_plugin_config(name)
+        kwargs = dict(config.items() + kwargs.items())
+        return self.loader.get_plugin(name, kind=kind, *args, **kwargs)
+
     @memoized
     def get_plugin_parameters(self, name):
         params = self.loader.get_plugin_class(name).parameters
diff --git a/wlauto/core/execution.py b/wlauto/core/execution.py
index 95d67602..3ac3a2dd 100644
--- a/wlauto/core/execution.py
+++ b/wlauto/core/execution.py
@@ -73,6 +73,19 @@ REBOOT_DELAY = 3
 
 
 class ExecutionContext(object):
+
+
+    def __init__(self, cm, tm, output):
+        self.logger = logging.getLogger('ExecContext')
+        self.cm = cm
+        self.tm = tm
+        self.output = output
+        self.logger.debug('Loading resource discoverers')
+        self.resolver = ResourceResolver(cm)
+        self.resolver.load()
+
+
+class OldExecutionContext(object):
     """
     Provides a context for instrumentation. Keeps track of things like
     current workload and iteration.
@@ -214,8 +227,8 @@ def _check_artifact_path(path, rootpath):
     return full_path
 
 
-
 class FakeTargetManager(object):
+    # TODO: this is a FAKE
 
     def __init__(self, name, config):
         self.device_name = name
@@ -286,9 +299,17 @@ class Executor(object):
         target_manager = init_target_manager(config.run_config)
         output.write_target_info(target_manager.get_target_info())
 
-        self.logger.info('Generationg jobs')
-        job_specs = config_manager.jobs_config.generate_job_specs(target_manager)
-        output.write_job_specs(job_specs)
+        self.logger.info('Initializing execution conetext')
+        context = ExecutionContext(config_manager, target_manager, output)
+
+        self.logger.info('Generating jobs')
+        config_manager.generate_jobs(context)
+        output.write_job_specs(config_manager.job_specs)
+
+        self.logger.info('Installing instrumentation')
+        for instrument in config_manager.get_instruments(target_manager.target):
+            instrumentation.install(instrument)
+        instrumentation.validate()
 
     def old_exec(self, agenda, selectors={}):
         self.config.set_agenda(agenda, selectors)
@@ -396,6 +417,12 @@ class Executor(object):
         signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED)
 
 
+class Runner(object):
+    """
+    
+    """
+
+
 class RunnerJob(object):
     """
     Represents a single execution of a ``RunnerJobDescription``. There will be one created for each iteration
@@ -410,7 +437,7 @@ class RunnerJob(object):
         self.result = IterationResult(self.spec)
 
 
-class Runner(object):
+class OldRunner(object):
     """
     This class is responsible for actually performing a workload automation
     run. The main responsibility of this class is to emit appropriate signals
diff --git a/wlauto/core/instrumentation.py b/wlauto/core/instrumentation.py
index db2db8ce..6bba95c5 100644
--- a/wlauto/core/instrumentation.py
+++ b/wlauto/core/instrumentation.py
@@ -380,9 +380,9 @@ class Instrument(Plugin):
     """
     kind = "instrument"
 
-    def __init__(self, device, **kwargs):
+    def __init__(self, target, **kwargs):
         super(Instrument, self).__init__(**kwargs)
-        self.device = device
+        self.target = target
         self.is_enabled = True
         self.is_broken = False
 
diff --git a/wlauto/core/output.py b/wlauto/core/output.py
index 1047bf2f..77d5853e 100644
--- a/wlauto/core/output.py
+++ b/wlauto/core/output.py
@@ -93,6 +93,10 @@ class RunOutput(object):
     def jobsfile(self):
         return os.path.join(self.metadir, 'jobs.json')
 
+    @property
+    def raw_config_dir(self):
+        return os.path.join(self.metadir, 'raw_config')
+
     def __init__(self, path):
         self.basepath = path
         self.info = None
diff --git a/wlauto/core/plugin.py b/wlauto/core/plugin.py
index f614169b..ccf2dece 100644
--- a/wlauto/core/plugin.py
+++ b/wlauto/core/plugin.py
@@ -557,6 +557,8 @@ class PluginLoader(object):
     def update(self, packages=None, paths=None, ignore_paths=None):
         """ Load plugins from the specified paths/packages
         without clearing or reloading existing plugin. """
+        msg = 'Updating from: packages={} paths={}'
+        self.logger.debug(msg.format(packages, paths))
         if packages:
             self.packages.extend(packages)
             self._discover_from_packages(packages)
@@ -572,6 +574,7 @@ class PluginLoader(object):
 
     def reload(self):
         """ Clear all discovered items and re-run the discovery. """
+        self.logger.debug('Reloading')
         self.clear()
         self._discover_from_packages(self.packages)
         self._discover_from_paths(self.paths, self.ignore_paths)
@@ -591,7 +594,8 @@ class PluginLoader(object):
             raise ValueError('Unknown plugin type: {}'.format(kind))
         store = self.kind_map[kind]
         if name not in store:
-            raise NotFoundError('plugins {} is not {} {}.'.format(name, get_article(kind), kind))
+            msg = 'plugins {} is not {} {}.'
+            raise NotFoundError(msg.format(name, get_article(kind), kind))
         return store[name]
 
     def get_plugin(self, name=None, kind=None, *args, **kwargs):
diff --git a/wlauto/core/resolver.py b/wlauto/core/resolver.py
index e68cec4f..ba643b0d 100644
--- a/wlauto/core/resolver.py
+++ b/wlauto/core/resolver.py
@@ -48,7 +48,7 @@ class ResourceResolver(object):
 
         """
 
-        for rescls in self.config.ext_loader.list_resource_getters():
+        for rescls in pluginloader.list_resource_getters():
             getter = self.config.get_plugin(name=rescls.name, kind="resource_getter", resolver=self)
             getter.register()
 
diff --git a/wlauto/instrumentation/misc/__init__.py b/wlauto/instrumentation/misc/__init__.py
index 86c880fd..a02793b0 100644
--- a/wlauto/instrumentation/misc/__init__.py
+++ b/wlauto/instrumentation/misc/__init__.py
@@ -207,8 +207,8 @@ class ExecutionTimeInstrument(Instrument):
 
     priority = 15
 
-    def __init__(self, device, **kwargs):
-        super(ExecutionTimeInstrument, self).__init__(device, **kwargs)
+    def __init__(self, target, **kwargs):
+        super(ExecutionTimeInstrument, self).__init__(target, **kwargs)
         self.start_time = None
         self.end_time = None
 
diff --git a/wlauto/utils/types.py b/wlauto/utils/types.py
index 8e4c5e65..7b13f979 100644
--- a/wlauto/utils/types.py
+++ b/wlauto/utils/types.py
@@ -404,6 +404,7 @@ class toggle_set(set):
     def to_pod(self):
         return list(self.values())
 
+
 class ID(str):
 
     def merge_with(self, other):

From 42539bbe0d00dfad968e0a6b20162b2264b4f047 Mon Sep 17 00:00:00 2001
From: Sergei Trofimov <sergei.trofimov@arm.com>
Date: Mon, 6 Mar 2017 11:10:25 +0000
Subject: [PATCH 6/8] New target description + moving target stuff under
 "framework"

Changing the way target descriptions work from a static mapping to
something that is dynamically generated and is extensible via plugins.
Also moving core target implementation stuff under "framework".
---
 scripts/wa                                  |    2 +-
 setup.py                                    |    4 +-
 wa/__init__.py                              |    2 -
 wa/commands/list.py                         |  106 ++
 wa/commands/run.py                          |  133 +-
 wa/framework/command.py                     |   47 +-
 wa/framework/configuration/__init__.py      |   21 +-
 wa/framework/configuration/core.py          | 1296 ++++++++++++-------
 wa/framework/configuration/default.py       |   42 +
 wa/framework/configuration/execution.py     |  267 +++-
 wa/framework/configuration/parsers.py       |  308 +++++
 wa/framework/configuration/plugin_cache.py  |  227 ++++
 wa/framework/configuration/tree.py          |   89 ++
 wa/framework/entrypoint.py                  |  118 +-
 wa/framework/exception.py                   |   28 +-
 wa/framework/execution.py                   | 1140 +++++++++++-----
 wa/framework/host.py                        |   42 +-
 wa/framework/instrumentation.py             |  399 ++++++
 wa/framework/old_output.py                  |  362 ++++++
 wa/framework/output.py                      |  458 ++-----
 wa/framework/plugin.py                      |  602 +++++----
 wa/framework/pluginloader.py                |   52 +-
 wa/framework/resource.py                    |  512 ++------
 wa/framework/resource_getters.py            |  510 ++++++++
 wa/framework/run.py                         |    2 +-
 wa/framework/signal.py                      |   17 +-
 wa/framework/target.py                      |   80 ++
 wa/{ => framework}/target/__init__.py       |    0
 wa/{ => framework}/target/config.py         |    0
 wa/framework/target/descriptor.py           |  252 ++++
 wa/framework/target/info.py                 |   78 ++
 wa/{ => framework}/target/manager.py        |   19 +-
 wa/{ => framework}/target/runtime_config.py |    0
 wa/framework/workload.py                    |   15 +-
 wa/target/info.py                           |   85 --
 wa/utils/formatter.py                       |  148 +++
 wa/utils/log.py                             |  306 +++++
 wa/utils/misc.py                            |  544 ++++----
 wa/utils/serializer.py                      |   90 +-
 wa/utils/terminalsize.py                    |   93 ++
 wa/utils/types.py                           |  312 +++--
 wa/workloads/dhrystone/__init__.py          |    4 +-
 wlauto/core/configuration/plugin_cache.py   |    3 +-
 43 files changed, 6229 insertions(+), 2586 deletions(-)
 create mode 100644 wa/commands/list.py
 create mode 100644 wa/framework/configuration/default.py
 create mode 100644 wa/framework/configuration/parsers.py
 create mode 100644 wa/framework/configuration/plugin_cache.py
 create mode 100644 wa/framework/configuration/tree.py
 create mode 100644 wa/framework/instrumentation.py
 create mode 100644 wa/framework/old_output.py
 create mode 100644 wa/framework/resource_getters.py
 create mode 100644 wa/framework/target.py
 rename wa/{ => framework}/target/__init__.py (100%)
 rename wa/{ => framework}/target/config.py (100%)
 create mode 100644 wa/framework/target/descriptor.py
 create mode 100644 wa/framework/target/info.py
 rename wa/{ => framework}/target/manager.py (97%)
 rename wa/{ => framework}/target/runtime_config.py (100%)
 delete mode 100644 wa/target/info.py
 create mode 100644 wa/utils/formatter.py
 create mode 100644 wa/utils/log.py
 create mode 100644 wa/utils/terminalsize.py

diff --git a/scripts/wa b/scripts/wa
index cc2cf7f5..435075eb 100644
--- a/scripts/wa
+++ b/scripts/wa
@@ -13,5 +13,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-from wlauto.core.entry_point import main
+from wa.framework.entrypoint import main
 main()
diff --git a/setup.py b/setup.py
index 99152713..0b9bdf04 100644
--- a/setup.py
+++ b/setup.py
@@ -24,9 +24,9 @@ except ImportError:
     from distutils.core import setup
 
 
-wlauto_dir = os.path.join(os.path.dirname(__file__), 'wlauto')
+wlauto_dir = os.path.join(os.path.dirname(__file__), 'wa')
 
-sys.path.insert(0, os.path.join(wlauto_dir, 'core'))
+sys.path.insert(0, os.path.join(wlauto_dir, 'framework'))
 from version import get_wa_version
 
 # happends if falling back to distutils
diff --git a/wa/__init__.py b/wa/__init__.py
index 25cf6b22..262984be 100644
--- a/wa/__init__.py
+++ b/wa/__init__.py
@@ -2,8 +2,6 @@ from wa.framework import pluginloader, log, signal
 from wa.framework.configuration import settings
 from wa.framework.plugin import Plugin, Parameter
 from wa.framework.command import Command
-from wa.framework.run import runmethod
-from wa.framework.output import RunOutput
 from wa.framework.workload import Workload
 
 from wa.framework.exception import WAError, NotFoundError, ValidationError, WorkloadError
diff --git a/wa/commands/list.py b/wa/commands/list.py
new file mode 100644
index 00000000..540b200f
--- /dev/null
+++ b/wa/commands/list.py
@@ -0,0 +1,106 @@
+#    Copyright 2014-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from wa import Command, settings
+from wa.framework import pluginloader
+from wa.framework.plugin import PluginLoader
+from wa.framework.target.descriptor import get_target_descriptions
+from wa.utils.doc import get_summary
+from wa.utils.formatter import DescriptionListFormatter
+
+
+class ListCommand(Command):
+
+    name = 'list'
+    description = 'List available WA plugins with a short description of each.'
+
+    def initialize(self, context):
+        kinds = get_kinds()
+        self.parser.add_argument('kind', metavar='KIND',
+                                 help=('Specify the kind of plugin to list. Must be '
+                                       'one of: {}'.format(', '.join(kinds))),
+                                 choices=kinds)
+        self.parser.add_argument('-n', '--name', 
+                                 help='Filter results by the name specified')
+        self.parser.add_argument('-o', '--packaged-only', action='store_true',
+                                 help='''
+                                 Only list plugins packaged with WA itself. Do
+                                 not list plugins installed locally or from
+                                 other packages.
+                                 ''')
+        self.parser.add_argument('-p', '--platform', 
+                                 help='''
+                                 Only list results that are supported by the
+                                 specified platform.
+                                 ''')
+
+    def execute(self, state, args):
+        filters = {}
+        if args.name:
+            filters['name'] = args.name
+
+        if args.kind == 'targets':
+            list_targets()
+        else:
+            list_plugins(args, filters)
+
+
+def get_kinds():
+    kinds = pluginloader.kinds
+    if 'target_descriptor' in kinds:
+        kinds.remove('target_descriptor')
+        kinds.append('target')
+    return ['{}s'.format(name) for name in kinds]
+
+
+def list_targets():
+    targets = get_target_descriptions()
+    targets = sorted(targets, key=lambda x: x.name)
+
+    output = DescriptionListFormatter()
+    for target in targets:
+        output.add_item(target.description or '', target.name)
+    print output.format_data()
+
+
+def list_plugins(args, filters):
+    results = pluginloader.list_plugins(args.kind[:-1])
+    if filters or args.platform:
+        filtered_results = []
+        for result in results:
+            passed = True
+            for k, v in filters.iteritems():
+                if getattr(result, k) != v:
+                    passed = False
+                    break
+            if passed and args.platform:
+                passed = check_platform(result, args.platform)
+            if passed:
+                filtered_results.append(result)
+    else:  # no filters specified
+        filtered_results = results
+
+    if filtered_results:
+        output = DescriptionListFormatter()
+        for result in sorted(filtered_results, key=lambda x: x.name):
+            output.add_item(get_summary(result), result.name)
+        print output.format_data()
+
+
+def check_platform(plugin, platform):
+    supported_platforms = getattr(plugin, 'supported_platforms', [])
+    if supported_platforms:
+        return platform in supported_platforms
+    return True
diff --git a/wa/commands/run.py b/wa/commands/run.py
index c967a316..dc351e68 100644
--- a/wa/commands/run.py
+++ b/wa/commands/run.py
@@ -18,70 +18,117 @@ import os
 import sys
 import shutil
 
+import wa
 from wa import Command, settings
-from wa.framework import log
-from wa.framework.agenda import Agenda
-from wa.framework.output import RunOutput
+from wa.framework import pluginloader
+from wa.framework.configuration import RunConfiguration
+from wa.framework.configuration.parsers import AgendaParser, ConfigParser
+from wa.framework.execution import Executor
+from wa.framework.output import init_wa_output
+from wa.framework.version import get_wa_version
+from wa.framework.exception import NotFoundError, ConfigError
+from wa.utils import log
+from wa.utils.types import toggle_set
 
 
 class RunCommand(Command):
 
     name = 'run'
-    description = """
+    description = '''
     Execute automated workloads on a remote device and process the resulting output.
-    """
+
+    '''
 
     def initialize(self, context):
         self.parser.add_argument('agenda', metavar='AGENDA',
                                  help="""
-                                 Agenda for this workload automation run. This defines which
-                                 workloads will be executed, how many times, with which
-                                 tunables, etc.  See example agendas in {} for an example of
-                                 how this file should be structured.
-                                 """.format(os.path.dirname(wlauto.__file__)))
+                                 Agenda for this workload automation run. This
+                                 defines which workloads will be executed, how
+                                 many times, with which tunables, etc.  See
+                                 example agendas in {} for an example of how
+                                 this file should be structured.
+                                 """.format(os.path.dirname(wa.__file__)))
         self.parser.add_argument('-d', '--output-directory', metavar='DIR', default=None,
                                  help="""
-                                 Specify a directory where the output will be generated. If
-                                 the directory already exists, the script will abort unless -f
-                                 option (see below) is used, in which case the contents of the
-                                 directory will be overwritten. If this option is not specified,
-                                 then {} will be used instead.
-                                 """.format(settings.output_directory))
+                                 Specify a directory where the output will be
+                                 generated. If the directory already exists,
+                                 the script will abort unless -f option (see
+                                 below) is used, in which case the contents of
+                                 the directory will be overwritten. If this
+                                 option is not specified, then {} will be used
+                                 instead.
+                                 """.format(settings.default_output_directory))
         self.parser.add_argument('-f', '--force', action='store_true',
                                  help="""
-                                 Overwrite output directory if it exists. By default, the script
-                                 will abort in this situation to prevent accidental data loss.
+                                 Overwrite output directory if it exists. By
+                                 default, the script will abort in this
+                                 situation to prevent accidental data loss.
                                  """)
         self.parser.add_argument('-i', '--id', action='append', dest='only_run_ids', metavar='ID',
                                  help="""
-                                 Specify a workload spec ID from an agenda to run. If this is
-                                 specified, only that particular spec will be run, and other
-                                 workloads in the agenda will be ignored. This option may be
-                                 used to specify multiple IDs.
+                                 Specify a workload spec ID from an agenda to
+                                 run. If this is specified, only that
+                                 particular spec will be run, and other
+                                 workloads in the agenda will be ignored. This
+                                 option may be used to specify multiple IDs.
                                  """)
         self.parser.add_argument('--disable', action='append', dest='instruments_to_disable',
+                                 default=[],
                                  metavar='INSTRUMENT', help="""
-                                 Specify an instrument to disable from the command line. This
-                                 equivalent to adding "~{metavar}" to the instrumentation list in
-                                 the agenda. This can be used to temporarily disable a troublesome
-                                 instrument for a particular run without introducing permanent
-                                 change to the config (which one might then forget to revert).
-                                 This option may be specified multiple times.
+                                 Specify an instrument to disable from the
+                                 command line. This equivalent to adding
+                                 "~{metavar}" to the instrumentation list in
+                                 the agenda. This can be used to temporarily
+                                 disable a troublesome instrument for a
+                                 particular run without introducing permanent
+                                 change to the config (which one might then
+                                 forget to revert).  This option may be
+        specified multiple times.
                                  """)
 
-    def execute(self, args):  # NOQA
+    def execute(self, config, args):
+        output = self.set_up_output_directory(config, args)
+        log.add_file(output.logfile)
+
+        self.logger.debug('Version: {}'.format(get_wa_version()))
+        self.logger.debug('Command Line: {}'.format(' '.join(sys.argv)))
+
+        disabled_instruments = toggle_set(["~{}".format(i) 
+                                           for i in args.instruments_to_disable])
+        config.jobs_config.disable_instruments(disabled_instruments)
+        config.jobs_config.only_run_ids(args.only_run_ids)
+
+        parser = AgendaParser()
+        if os.path.isfile(args.agenda):
+            parser.load_from_path(config, args.agenda)
+            shutil.copy(args.agenda, output.raw_config_dir)
+        else:
+            try:
+                pluginloader.get_plugin_class(args.agenda, kind='workload')
+                agenda = {'workloads': [{'name': args.agenda}]}
+                parser.load(config, agenda, 'CMDLINE_ARGS')
+            except NotFoundError:
+                msg = 'Agenda file "{}" does not exist, and there no workload '\
+                      'with that name.\nYou can get a list of available '\
+                      'by running "wa list workloads".'
+                raise ConfigError(msg.format(args.agenda))
+
+        executor = Executor()
+        executor.execute(config, output)
+
+    def set_up_output_directory(self, config, args):
+        if args.output_directory:
+            output_directory = args.output_directory
+        else:
+            output_directory = settings.default_output_directory
+        self.logger.debug('Using output directory: {}'.format(output_directory))
         try:
-            executor = Executor(args.output_directory, args.force)
-        except RuntimeError:
-            self.logger.error('Output directory {} exists.'.format(args.output_directory))
-            self.logger.error('Please specify another location, or use -f option to overwrite.\n')
-            return 2
-        for path in settings.get_config_paths():
-            executor.load_config(path)
-        executor.load_agenda(args.agenda)
-        for itd in args.instruments_to_disable:
-            self.logger.debug('Globally disabling instrument "{}" (from command line option)'.format(itd))
-            executor.disable_instrument(itd)
-        executor.initialize()
-        executor.execute(selectors={'ids': args.only_run_ids})
-        executor.finalize()
+            return init_wa_output(output_directory, config, args.force)
+        except RuntimeError as e:
+            if  'path exists' in str(e):
+                msg = 'Output directory "{}" exists.\nPlease specify another '\
+                      'location, or use -f option to overwrite.'
+                self.logger.critical(msg.format(output_directory))
+                sys.exit(1)
+            else:
+                raise e
diff --git a/wa/framework/command.py b/wa/framework/command.py
index 644ffd2c..443a54f0 100644
--- a/wa/framework/command.py
+++ b/wa/framework/command.py
@@ -16,32 +16,42 @@
 import textwrap
 
 from wa.framework.plugin import Plugin
-from wa.framework.entrypoint import init_argument_parser
+from wa.framework.version import get_wa_version
 from wa.utils.doc import format_body
 
 
+def init_argument_parser(parser):
+    parser.add_argument('-c', '--config', action='append', default=[],
+                        help='specify an additional config.py')
+    parser.add_argument('-v', '--verbose', action='count',
+                        help='The scripts will produce verbose output.')
+    parser.add_argument('--version', action='version', 
+                        version='%(prog)s {}'.format(get_wa_version()))
+    return parser
+
+
 class Command(Plugin):
     """
-    Defines a Workload Automation command. This will be executed from the command line as
-    ``wa <command> [args ...]``. This defines the name to be used when invoking wa, the
-    code that will actually be executed on invocation and the argument parser to be used
-    to parse the reset of the command line arguments.
+    Defines a Workload Automation command. This will be executed from the
+    command line as ``wa <command> [args ...]``. This defines the name to be
+    used when invoking wa, the code that will actually be executed on
+    invocation and the argument parser to be used to parse the reset of the
+    command line arguments.
 
     """
-
-    kind = 'command'
+    kind = "command"
     help = None
     usage = None
     description = None
     epilog = None
     formatter_class = None
 
-    def __init__(self, subparsers, **kwargs):
-        super(Command, self).__init__(**kwargs)
+    def __init__(self, subparsers):
+        super(Command, self).__init__()
         self.group = subparsers
+        desc = format_body(textwrap.dedent(self.description), 80)
         parser_params = dict(help=(self.help or self.description), usage=self.usage,
-                             description=format_body(textwrap.dedent(self.description), 80),
-                             epilog=self.epilog)
+                             description=desc, epilog=self.epilog)
         if self.formatter_class:
             parser_params['formatter_class'] = self.formatter_class
         self.parser = subparsers.add_parser(self.name, **parser_params)
@@ -50,19 +60,22 @@ class Command(Plugin):
 
     def initialize(self, context):
         """
-        Perform command-specific initialisation (e.g. adding command-specific options to the command's
-        parser). ``context`` is always ``None``.
+        Perform command-specific initialisation (e.g. adding command-specific
+        options to the command's parser). ``context`` is always ``None``.
 
         """
         pass
 
-    def execute(self, args):
+    def execute(self, state, args):
         """
         Execute this command.
 
-        :args: An ``argparse.Namespace`` containing command line arguments (as returned by
-               ``argparse.ArgumentParser.parse_args()``. This would usually be the result of
-               invoking ``self.parser``.
+        :state: An initialized ``ConfigManager`` that contains the current state of
+                WA exeuction up to that point (processed configuraition, loaded
+                plugins, etc).
+        :args: An ``argparse.Namespace`` containing command line arguments (as 
+               returned by ``argparse.ArgumentParser.parse_args()``. This would
+               usually be the result of invoking ``self.parser``.
 
         """
         raise NotImplementedError()
diff --git a/wa/framework/configuration/__init__.py b/wa/framework/configuration/__init__.py
index 5c1be001..a3593794 100644
--- a/wa/framework/configuration/__init__.py
+++ b/wa/framework/configuration/__init__.py
@@ -1,2 +1,19 @@
-from wa.framework.configuration.core import settings, ConfigurationPoint, PluginConfiguration
-from wa.framework.configuration.core import merge_config_values, WA_CONFIGURATION
+#    Copyright 2013-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from wlauto.core.configuration.configuration import (settings,
+                                                     RunConfiguration,
+                                                     JobGenerator,
+                                                     ConfigurationPoint)
+from wlauto.core.configuration.plugin_cache import PluginCache
diff --git a/wa/framework/configuration/core.py b/wa/framework/configuration/core.py
index 7c33d746..c79df8b8 100644
--- a/wa/framework/configuration/core.py
+++ b/wa/framework/configuration/core.py
@@ -1,29 +1,164 @@
-import os
-import logging
-from glob import glob
-from copy import copy
-from itertools import chain
+#    Copyright 2014-2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
-from wa.framework import pluginloader
-from wa.framework.exception import ConfigError
-from wa.utils.types import integer, boolean, identifier, list_of_strings, list_of
-from wa.utils.misc import isiterable, get_article
-from wa.utils.serializer import read_pod, yaml
+import os
+import re
+from copy import copy
+from collections import OrderedDict, defaultdict
+
+from wa.framework.exception import ConfigError, NotFoundError
+from wa.framework.configuration.tree import SectionNode
+from wa.utils.misc import (get_article, merge_config_values)
+from wa.utils.types import (identifier, integer, boolean,
+                                list_of_strings, toggle_set,
+                                obj_dict)
+from wa.utils.serializer import is_pod
+
+# Mapping for kind conversion; see docs for convert_types below
+KIND_MAP = {
+    int: integer,
+    bool: boolean,
+    dict: OrderedDict,
+}
+
+ITERATION_STATUS = [
+    'NOT_STARTED',
+    'RUNNING',
+
+    'OK',
+    'NONCRITICAL',
+    'PARTIAL',
+    'FAILED',
+    'ABORTED',
+    'SKIPPED',
+]
+
+##########################
+### CONFIG POINT TYPES ###
+##########################
+
+
+class RebootPolicy(object):
+    """
+    Represents the reboot policy for the execution -- at what points the device
+    should be rebooted. This, in turn, is controlled by the policy value that is
+    passed in on construction and would typically be read from the user's settings.
+    Valid policy values are:
+
+    :never: The device will never be rebooted.
+    :as_needed: Only reboot the device if it becomes unresponsive, or needs to be flashed, etc.
+    :initial: The device will be rebooted when the execution first starts, just before
+              executing the first workload spec.
+    :each_spec: The device will be rebooted before running a new workload spec.
+    :each_iteration: The device will be rebooted before each new iteration.
+
+    """
+
+    valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_iteration']
+
+    def __init__(self, policy):
+        policy = policy.strip().lower().replace(' ', '_')
+        if policy not in self.valid_policies:
+            message = 'Invalid reboot policy {}; must be one of {}'.format(policy, ', '.join(self.valid_policies))
+            raise ConfigError(message)
+        self.policy = policy
+
+    @property
+    def can_reboot(self):
+        return self.policy != 'never'
+
+    @property
+    def perform_initial_boot(self):
+        return self.policy not in ['never', 'as_needed']
+
+    @property
+    def reboot_on_each_spec(self):
+        return self.policy in ['each_spec', 'each_iteration']
+
+    @property
+    def reboot_on_each_iteration(self):
+        return self.policy == 'each_iteration'
+
+    def __str__(self):
+        return self.policy
+
+    __repr__ = __str__
+
+    def __cmp__(self, other):
+        if isinstance(other, RebootPolicy):
+            return cmp(self.policy, other.policy)
+        else:
+            return cmp(self.policy, other)
+
+    def to_pod(self):
+        return self.policy
+
+    @staticmethod
+    def from_pod(pod):
+        return RebootPolicy(pod)
+
+
+class status_list(list):
+
+    def append(self, item):
+        list.append(self, str(item).upper())
+
+
+class LoggingConfig(dict):
+
+    defaults = {
+        'file_format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
+        'verbose_format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
+        'regular_format': '%(levelname)-8s %(message)s',
+        'color': True,
+    }
+
+    def __init__(self, config=None):
+        dict.__init__(self)
+        if isinstance(config, dict):
+            config = {identifier(k.lower()): v for k, v in config.iteritems()}
+            self['regular_format'] = config.pop('regular_format', self.defaults['regular_format'])
+            self['verbose_format'] = config.pop('verbose_format', self.defaults['verbose_format'])
+            self['file_format'] = config.pop('file_format', self.defaults['file_format'])
+            self['color'] = config.pop('colour_enabled', self.defaults['color'])  # legacy
+            self['color'] = config.pop('color', self.defaults['color'])
+            if config:
+                message = 'Unexpected logging configuation parameters: {}'
+                raise ValueError(message.format(bad_vals=', '.join(config.keys())))
+        elif config is None:
+            for k, v in self.defaults.iteritems():
+                self[k] = v
+        else:
+            raise ValueError(config)
+
+
+def get_type_name(kind):
+    typename = str(kind)
+    if '\'' in typename:
+        typename = typename.split('\'')[1]
+    elif typename.startswith('<function'):
+        typename = typename.split()[1]
+    return typename
 
 
 class ConfigurationPoint(object):
     """
-    This defines a gneric configuration point for workload automation. This is
+    This defines a generic configuration point for workload automation. This is
     used to handle global settings, plugin parameters, etc.
 
     """
 
-    # Mapping for kind conversion; see docs for convert_types below
-    kind_map = {
-        int: integer,
-        bool: boolean,
-    }
-
     def __init__(self, name,
                  kind=None,
                  mandatory=None,
@@ -34,12 +169,12 @@ class ConfigurationPoint(object):
                  constraint=None,
                  merge=False,
                  aliases=None,
-                 convert_types=True):
+                 global_alias=None):
         """
         Create a new Parameter object.
 
         :param name: The name of the parameter. This will become an instance
-                     member of the extension object to which the parameter is
+                     member of the plugin object to which the parameter is
                      applied, so it must be a valid python  identifier. This
                      is the only mandatory parameter.
         :param kind: The type of parameter this is. This must be a callable
@@ -49,11 +184,11 @@ class ConfigurationPoint(object):
                      ``int``, ``bool``, etc. -- can be used here. This
                      defaults to ``str`` if not specified.
         :param mandatory: If set to ``True``, then a non-``None`` value for
-                          this parameter *must* be provided on extension
+                          this parameter *must* be provided on plugin
                           object construction, otherwise ``ConfigError``
                           will be raised.
         :param default: The default value for this parameter. If no value
-                        is specified on extension construction, this value
+                        is specified on plugin construction, this value
                         will be used instead. (Note: if this is specified
                         and is not ``None``, then ``mandatory`` parameter
                         will be ignored).
@@ -78,26 +213,24 @@ class ConfigurationPoint(object):
                       the new value. If this is set to ``True`` then the two
                       values will be merged instead. The rules by which the
                       values are merged will be determined by the types of
-                      the existing and new values -- see 
+                      the existing and new values -- see
                       ``merge_config_values`` documentation for details.
         :param aliases: Alternative names for the same configuration point.
                         These are largely for backwards compatibility.
-        :param convert_types: If ``True`` (the default), will automatically
-                              convert ``kind`` values from native Python
-                              types to WA equivalents. This allows more
-                              ituitive interprestation of parameter values,
-                              e.g. the string ``"false"`` being interpreted
-                              as ``False`` when specifed as the value for
-                              a boolean Parameter.
-
+        :param global_alias: An alias for this parameter that can be specified at
+                            the global level. A global_alias can map onto many
+                            ConfigurationPoints.
         """
         self.name = identifier(name)
+        if kind in KIND_MAP:
+            kind = KIND_MAP[kind]
         if kind is not None and not callable(kind):
             raise ValueError('Kind must be callable.')
-        if convert_types and kind in self.kind_map:
-            kind = self.kind_map[kind]
         self.kind = kind
         self.mandatory = mandatory
+        if not is_pod(default):
+            msg = "The default for '{}' must be a Plain Old Data type, but it is of type '{}' instead."
+            raise TypeError(msg.format(self.name, type(default)))
         self.default = default
         self.override = override
         self.allowed_values = allowed_values
@@ -109,531 +242,796 @@ class ConfigurationPoint(object):
         self.constraint = constraint
         self.merge = merge
         self.aliases = aliases or []
+        self.global_alias = global_alias
+
+        if self.default is not None:
+            try:
+                self.validate_value("init", self.default)
+            except ConfigError:
+                raise ValueError('Default value "{}" is not valid'.format(self.default))
 
     def match(self, name):
-        if name == self.name:
+        if name == self.name or name in self.aliases:
             return True
-        elif name in self.aliases:
+        elif name == self.global_alias:
             return True
         return False
 
-    def set_value(self, obj, value=None):
+    def set_value(self, obj, value=None, check_mandatory=True):
         if value is None:
             if self.default is not None:
                 value = self.default
-            elif self.mandatory:
-                msg = 'No values specified for mandatory parameter {} in {}'
+            elif check_mandatory and self.mandatory:
+                msg = 'No values specified for mandatory parameter "{}" in {}'
                 raise ConfigError(msg.format(self.name, obj.name))
         else:
             try:
                 value = self.kind(value)
             except (ValueError, TypeError):
-                typename = self.get_type_name()
+                typename = get_type_name(self.kind)
                 msg = 'Bad value "{}" for {}; must be {} {}'
                 article = get_article(typename)
                 raise ConfigError(msg.format(value, self.name, article, typename))
+        if value is not None:
+            self.validate_value(obj.name, value)
         if self.merge and hasattr(obj, self.name):
             value = merge_config_values(getattr(obj, self.name), value)
         setattr(obj, self.name, value)
 
     def validate(self, obj):
         value = getattr(obj, self.name, None)
-        self.validate_value(value)
-
-    def validate_value(self,obj, value):
         if value is not None:
-            if self.allowed_values:
-                self._validate_allowed_values(obj, value)
-            if self.constraint:
-                self._validate_constraint(obj, value)
+            self.validate_value(obj.name, value)
         else:
             if self.mandatory:
-                msg = 'No value specified for mandatory parameter {} in {}.'
+                msg = 'No value specified for mandatory parameter "{}" in {}.'
                 raise ConfigError(msg.format(self.name, obj.name))
 
-    def get_type_name(self):
-        typename = str(self.kind)
-        if '\'' in typename:
-            typename = typename.split('\'')[1]
-        elif typename.startswith('<function'):
-            typename = typename.split()[1]
-        return typename
+    def validate_value(self, name, value):
+        if self.allowed_values:
+            self.validate_allowed_values(name, value)
+        if self.constraint:
+            self.validate_constraint(name, value)
 
-    def _validate_allowed_values(self, obj, value):
+    def validate_allowed_values(self, name, value):
         if 'list' in str(self.kind):
             for v in value:
                 if v not in self.allowed_values:
                     msg = 'Invalid value {} for {} in {}; must be in {}'
-                    raise ConfigError(msg.format(v, self.name, obj.name, self.allowed_values))
+                    raise ConfigError(msg.format(v, self.name, name, self.allowed_values))
         else:
             if value not in self.allowed_values:
                 msg = 'Invalid value {} for {} in {}; must be in {}'
-                raise ConfigError(msg.format(value, self.name, obj.name, self.allowed_values))
+                raise ConfigError(msg.format(value, self.name, name, self.allowed_values))
 
-    def _validate_constraint(self, obj, value):
-        msg_vals = {'value': value, 'param': self.name, 'extension': obj.name}
+    def validate_constraint(self, name, value):
+        msg_vals = {'value': value, 'param': self.name, 'plugin': name}
         if isinstance(self.constraint, tuple) and len(self.constraint) == 2:
             constraint, msg = self.constraint  # pylint: disable=unpacking-non-sequence
         elif callable(self.constraint):
             constraint = self.constraint
-            msg = '"{value}" failed constraint validation for {param} in {extension}.'
+            msg = '"{value}" failed constraint validation for "{param}" in "{plugin}".'
         else:
-            raise ValueError('Invalid constraint for {}: must be callable or a 2-tuple'.format(self.name))
+            raise ValueError('Invalid constraint for "{}": must be callable or a 2-tuple'.format(self.name))
         if not constraint(value):
             raise ConfigError(value, msg.format(**msg_vals))
 
     def __repr__(self):
         d = copy(self.__dict__)
         del d['description']
-        return 'ConfPoint({})'.format(d)
+        return 'ConfigurationPoint({})'.format(d)
 
     __str__ = __repr__
 
 
-class ConfigurationPointCollection(object):
+class RuntimeParameter(object):
 
-    def __init__(self):
-        self._configs = []
-        self._config_map = {}
+    def __init__(self, name,
+                 kind=None,
+                 description=None,
+                 merge=False):
 
-    def get(self, name, default=None):
-        return self._config_map.get(name, default)
+        self.name = re.compile(name)
+        if kind is not None:
+            if kind in KIND_MAP:
+                kind = KIND_MAP[kind]
+            if not callable(kind):
+                raise ValueError('Kind must be callable.')
+        else:
+            kind = str
+        self.kind = kind
+        self.description = description
+        self.merge = merge
 
-    def add(self, point):
-        if not isinstance(point, ConfigurationPoint):
-            raise ValueError('Mustbe a ConfigurationPoint, got {}'.format(point.__class__))
-        existing = self.get(point.name)
-        if existing:
-            if point.override:
-                new_point = copy(existing)
-                for a, v in point.__dict__.iteritems():
-                    if v is not None:
-                        setattr(new_point, a, v)
-                self.remove(existing)
-                point = new_point
-            else:
-                raise ValueError('Duplicate ConfigurationPoint "{}"'.format(point.name))
-        self._add(point)
+    def validate_kind(self, value, name):
+        try:
+            value = self.kind(value)
+        except (ValueError, TypeError):
+            typename = get_type_name(self.kind)
+            msg = 'Bad value "{}" for {}; must be {} {}'
+            article = get_article(typename)
+            raise ConfigError(msg.format(value, name, article, typename))
 
-    def remove(self, point):
-        self._configs.remove(point)
-        del self._config_map[point.name]
-        for alias in point.aliases:
-            del self._config_map[alias]
+    def match(self, name):
+        if self.name.match(name):
+            return True
+        return False
 
-    append = add
+    def update_value(self, name, new_value, source, dest):
+        self.validate_kind(new_value, name)
 
-    def _add(self, point):
-        self._configs.append(point)
-        self._config_map[point.name] = point
-        for alias in point.aliases:
-            if alias in self._config_map:
-                message = 'Clashing alias "{}" between "{}" and "{}"'
-                raise ValueError(message.format(alias, point.name,
-                                                self._config_map[alias].name))
+        if name in dest:
+            old_value, sources = dest[name]
+        else:
+            old_value = None
+            sources = {}
+        sources[source] = new_value
 
-    def __str__(self):
-        str(self._configs)
+        if self.merge:
+            new_value = merge_config_values(old_value, new_value)
 
-    __repr__ = __str__
+        dest[name] = (new_value, sources)
 
-    def __iadd__(self, other):
-        for p in other:
-            self.add(p)
-        return self
 
-    def __iter__(self):
-        return iter(self._configs)
+class RuntimeParameterManager(object):
 
-    def __contains__(self, p):
-        if isinstance(p, basestring):
-            return p in self._config_map
-        return p.name in self._config_map
+    runtime_parameters = []
 
-    def __getitem__(self, i):
-        if isinstance(i, int):
-            return self._configs[i]
-        return self._config_map[i]
+    def __init__(self, target_manager):
+        self.state = {}
+        self.target_manager = target_manager
 
-    def __len__(self):
-        return len(self._configs)
+    def get_initial_state(self):
+        """
+        Should be used to load the starting state from the device. This state
+        should be updated if any changes are made to the device, and they are successful.
+        """
+        pass
 
-    
-class LoggingConfig(dict):
+    def match(self, name):
+        for rtp in self.runtime_parameters:
+            if rtp.match(name):
+                return True
+        return False
 
-    defaults = {
-        'file_format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
-        'verbose_format': '%(asctime)s %(levelname)-8s %(name)s: %(message)s',
-        'regular_format': '%(levelname)-8s %(message)s',
-        'color': True,
+    def update_value(self, name, value, source, dest):
+        for rtp in self.runtime_parameters:
+            if rtp.match(name):
+                rtp.update_value(name, value, source, dest)
+                break
+        else:
+            msg = 'Unknown runtime parameter "{}"'
+            raise ConfigError(msg.format(name))
+
+    def static_validation(self, params):
+        """
+        Validate values that do not require a active device connection.
+        This method should also pop all runtime parameters meant for this manager
+        from params, even if they are not beign statically validated.
+        """
+        pass
+
+    def dynamic_validation(self, params):
+        """
+        Validate values that require an active device connection
+        """
+        pass
+
+    def commit(self):
+        """
+        All values have been validated, this will now actually set values
+        """
+        pass
+
+################################
+### RuntimeParameterManagers ###
+################################
+
+
+class CpuFreqParameters(object):
+
+    runtime_parameters = {
+        "cores": RuntimeParameter("(.+)_cores"),
+        "min_frequency": RuntimeParameter("(.+)_min_frequency", kind=int),
+        "max_frequency": RuntimeParameter("(.+)_max_frequency", kind=int),
+        "frequency": RuntimeParameter("(.+)_frequency", kind=int),
+        "governor": RuntimeParameter("(.+)_governor"),
+        "governor_tunables": RuntimeParameter("(.+)_governor_tunables"),
     }
 
-    def __init__(self, config=None):
-        if isinstance(config, dict):
-            config = {identifier(k.lower()): v for k, v in config.iteritems()}
-            self['regular_format'] = config.pop('regular_format', self.defaults['regular_format'])
-            self['verbose_format'] = config.pop('verbose_format', self.defaults['verbose_format'])
-            self['file_format'] = config.pop('file_format', self.defaults['file_format'])
-            self['color'] = config.pop('colour_enabled', self.defaults['color'])  # legacy
-            self['color'] = config.pop('color', self.defaults['color'])
-            if config:
-                message = 'Unexpected logging configuation parameters: {}'
-                raise ValueError(message.format(bad_vals=', '.join(config.keys())))
-        elif config is None:
-            for k, v in self.defaults.iteritems():
-                self[k] = v
-        else:
-            raise ValueError(config)
+    def __init__(self, target):
+        super(CpuFreqParameters, self).__init__(target)
+        self.core_names = set(target.core_names)
 
+    def match(self, name):
+        for param in self.runtime_parameters.itervalues():
+            if param.match(name):
+                return True
+        return False
 
-__WA_CONFIGURATION = [
-    ConfigurationPoint(
-        'user_directory',
-        description="""
-        Path to the user directory. This is the location WA will look for
-        user configuration, additional plugins and plugin dependencies.
-        """,
-        kind=str,
-        default=os.path.join(os.path.expanduser('~'), '.workload_automation'),
-    ),
-    ConfigurationPoint(
-        'plugin_packages',
-        kind=list_of_strings,
-        default=[
-            'wa.commands',
-            'wa.workloads',
-#            'wa.instruments',
-#            'wa.processors',
-#            'wa.targets',
-            'wa.framework.actor',
-            'wa.framework.target',
-            'wa.framework.resource',
-            'wa.framework.execution',
-        ],
-        description="""
-        List of packages that will be scanned for WA plugins.
-        """,
-    ),
-    ConfigurationPoint(
-        'plugin_paths',
-        kind=list_of_strings,
-        default=[
-            'workloads',
-            'instruments',
-            'targets',
-            'processors',
-
-            # Legacy
-            'devices',
-            'result_processors',
-        ],
-        description="""
-        List of paths that will be scanned for WA plugins.
-        """,
-    ),
-    ConfigurationPoint(
-        'plugin_ignore_paths',
-        kind=list_of_strings,
-        default=[],
-        description="""
-        List of (sub)paths that will be ignored when scanning 
-        ``plugin_paths`` for WA plugins.
-        """,
-    ),
-    ConfigurationPoint(
-        'filer_mount_point',
-        description="""
-        The local mount point for the filer hosting WA assets.
-        """,
-    ),
-    ConfigurationPoint(
-        'logging',
-        kind=LoggingConfig,
-        description="""
-        WA logging configuration. This should be a dict with a subset
-        of the following keys::
-
-        :normal_format: Logging format used for console output
-        :verbose_format: Logging format used for verbose console output 
-        :file_format: Logging format used for run.log
-        :color: If ``True`` (the default), console logging output will
-                contain bash color escape codes. Set this to ``False`` if
-                console output will be piped somewhere that does not know
-                how to handle those.
-        """,
-    ),
-    ConfigurationPoint(
-        'verbosity',
-        kind=int,
-        default=0,
-        description="""
-        Verbosity of console output.
-        """,
-    ),
-]
-
-WA_CONFIGURATION = {cp.name: cp for cp in __WA_CONFIGURATION}
-
-ENVIRONMENT_VARIABLES = {
-    'WA_USER_DIRECTORY': WA_CONFIGURATION['user_directory'],
-    'WA_PLUGIN_PATHS': WA_CONFIGURATION['plugin_paths'],
-    'WA_EXTENSION_PATHS': WA_CONFIGURATION['plugin_paths'],  # extension_paths (legacy)
-}
-
-
-class WAConfiguration(object):
-    """
-    This is configuration for Workload Automation framework as a whole. This
-    does not track configuration for WA runs. Rather, this tracks "meta" 
-    configuration, such as various locations WA looks for things, logging
-    configuration etc.
-
-    """
-
-    basename = 'config'
-
-    def __init__(self):
-        self.user_directory = ''
-        self.dependencies_directory = 'dependencies'
-        self.plugin_packages = []
-        self.plugin_paths = []
-        self.plugin_ignore_paths = []
-        self.logging = {}
-        self._logger = logging.getLogger('settings')
-        for confpoint in WA_CONFIGURATION.itervalues():
-            confpoint.set_value(self)
-
-    def load_environment(self):
-        for name, confpoint in ENVIRONMENT_VARIABLES.iteritems():
-            value = os.getenv(name)
-            if value:
-                confpoint.set_value(self, value)
-        self._expand_paths()
-
-    def load_config_file(self, path):
-        self.load(read_pod(path))
-
-    def load_user_config(self):
-        globpath = os.path.join(self.user_directory, '{}.*'.format(self.basename))
-        for path in glob(globpath):
-            ext = os.path.splitext(path)[1].lower()
-            if ext in ['.pyc', '.pyo']:
+    def update_value(self, name, value, source):
+        for param in self.runtime_parameters.iteritems():
+            core_name_match = param.name.match(name)
+            if not core_name_match:
                 continue
-            self.load_config_file(path)
 
-    def load(self, config):
-        for name, value in config.iteritems():
-            if name in WA_CONFIGURATION:
-                confpoint = WA_CONFIGURATION[name]
-                confpoint.set_value(self, value)
-        self._expand_paths()
+            core_name = core_name_match.groups()[0]
+            if core_name not in self.core_names:
+                msg = '"{}" in {} is not a valid core name, must be in: {}'
+                raise ConfigError(msg.format(core_name, name, ", ".join(self.core_names)))
 
-    def set(self, name, value):
-        if name not in WA_CONFIGURATION:
-            raise ConfigError('Unknown WA configuration "{}"'.format(name))
-        WA_CONFIGURATION[name].set_value(value)
+            param.update_value(name, value, source)
+            break
+        else:
+            RuntimeError('"{}" does not belong to CpuFreqParameters'.format(name))
 
-    def initialize_user_directory(self, overwrite=False):
-        """
-        Initialize a fresh user environment creating the workload automation.
+    def _get_merged_value(self, core, param_name):
+        return self.runtime_parameters[param_name].merged_values["{}_{}".format(core, param_name)]
 
-        """
-        if os.path.exists(self.user_directory):
-            if not overwrite:
-                raise ConfigError('Environment {} already exists.'.format(self.user_directory))
-            shutil.rmtree(self.user_directory)
+    def _cross_validate(self, core):
+        min_freq = self._get_merged_value(core, "min_frequency")
+        max_frequency = self._get_merged_value(core, "max_frequency")
+        if max_frequency < min_freq:
+            msg = "{core}_max_frequency must be larger than {core}_min_frequency"
+            raise ConfigError(msg.format(core=core))
+        frequency = self._get_merged_value(core, "frequency")
+        if not min_freq < frequency < max_frequency:
+            msg = "{core}_frequency must be between {core}_min_frequency and {core}_max_frequency"
+            raise ConfigError(msg.format(core=core))
+        #TODO: more checks
 
-        self._expand_paths()
-        os.makedirs(self.dependencies_directory)
-        for path in self.plugin_paths:
-            os.makedirs(path)
+    def commit_to_device(self, target):
+        pass
+        # TODO: Write values to device is correct order ect
 
-        with open(os.path.join(self.user_directory, 'config.yaml'), 'w') as wfh:
-            yaml.dump(self.to_pod())
+#####################
+### Configuration ###
+#####################
 
-        if os.getenv('USER') == 'root':
-            # If running with sudo on POSIX, change the ownership to the real user.
-            real_user = os.getenv('SUDO_USER')
-            if real_user:
-                import pwd  # done here as module won't import on win32
-                user_entry = pwd.getpwnam(real_user)
-                uid, gid = user_entry.pw_uid, user_entry.pw_gid
-                os.chown(self.user_directory, uid, gid)
-                # why, oh why isn't there a recusive=True option for os.chown?
-                for root, dirs, files in os.walk(self.user_directory):
-                    for d in dirs:
-                        os.chown(os.path.join(root, d), uid, gid)
-                    for f in files:
-                        os.chown(os.path.join(root, f), uid, gid)
 
-    @staticmethod
-    def from_pod(pod):
-        instance = WAConfiguration()
-        instance.load(pod)
+def _to_pod(cfg_point, value):
+    if is_pod(value):
+        return value
+    if hasattr(cfg_point.kind, 'to_pod'):
+        return value.to_pod()
+    msg = '{} value "{}" is not serializable'
+    raise ValueError(msg.format(cfg_point.name, value))
+
+
+class Configuration(object):
+
+    config_points = []
+    name = ''
+
+    # The below line must be added to all subclasses
+    configuration = {cp.name: cp for cp in config_points}
+
+    @classmethod
+    def from_pod(cls, pod):
+        instance = cls()
+        for cfg_point in cls.config_points:
+            if name in pod:
+                value = pod.pop(name)
+                if hasattr(cfg_point.kind, 'from_pod'):
+                    value = cfg_point.kind.from_pod(value)
+                cfg_point.set_value(instance, value)
+        if pod:
+            msg = 'Invalid entry(ies) for "{}": "{}"'
+            raise ValueError(msg.format(cls.name, '", "'.join(pod.keys())))
         return instance
 
+    def __init__(self):
+        for confpoint in self.config_points:
+            confpoint.set_value(self, check_mandatory=False)
+
+    def set(self, name, value, check_mandatory=True):
+        if name not in self.configuration:
+            raise ConfigError('Unknown {} configuration "{}"'.format(self.name, 
+                                                                     name))
+        self.configuration[name].set_value(self, value, 
+                                           check_mandatory=check_mandatory)
+
+    def update_config(self, values, check_mandatory=True):
+        for k, v in values.iteritems():
+            self.set(k, v, check_mandatory=check_mandatory)
+
+    def validate(self):
+        for cfg_point in self.config_points:
+            cfg_point.validate(self)
+
     def to_pod(self):
-        return dict(
-            user_directory=self.user_directory,
-            plugin_packages=self.plugin_packages,
-            plugin_paths=self.plugin_paths,
-            plugin_ignore_paths=self.plugin_ignore_paths,
-            logging=self.logging,
-        )
-
-    def _expand_paths(self):
-        self.dependencies_directory = os.path.join(self.user_directory, 
-                                                   self.dependencies_directory)
-        expanded = []
-        for path in self.plugin_paths:
-            path = os.path.expanduser(path)
-            path = os.path.expandvars(path)
-            expanded.append(os.path.join(self.user_directory, path))
-        self.plugin_paths = expanded
-        expanded = []
-        for path in self.plugin_ignore_paths:
-            path = os.path.expanduser(path)
-            path = os.path.expandvars(path)
-            exanded.append(os.path.join(self.user_directory, path))
-        self.pluing_ignore_paths = expanded
+        pod = {}
+        for cfg_point in self.config_points:
+            value = getattr(self, cfg_point.name, None)
+            pod[cfg_point.name] = _to_pod(cfg_point, value)
+        return pod
 
 
-class PluginConfiguration(object):
-    """ Maintains a mapping of plugin_name --> plugin_config. """
+# This configuration for the core WA framework
+class MetaConfiguration(Configuration):
 
-    def __init__(self, loader=pluginloader):
-        self.loader = loader
-        self.config = {}
+    name = "Meta Configuration"
 
-    def update(self, name, config):
-        if not hasattr(config, 'get'):
-            raise ValueError('config must be a dict-like object got: {}'.format(config))
-        name, alias_config = self.loader.resolve_alias(name)
-        existing_config = self.config.get(name)
-        if existing_config is None:
-            existing_config = alias_config
+    plugin_packages = [
+        'wa.commands',
+        'wa.workloads',
+        #'wa.instrumentation',
+        #'wa.result_processors',
+        #'wa.managers',
+        'wa.framework.target.descriptor',
+        'wa.framework.resource_getters',
+    ]
 
-        new_config = config or {}
-        plugin_cls = self.loader.get_plugin_class(name)
+    config_points = [
+        ConfigurationPoint(
+            'user_directory',
+            description="""
+            Path to the user directory. This is the location WA will look for
+            user configuration, additional plugins and plugin dependencies.
+            """,
+            kind=str,
+            default=os.path.join(os.path.expanduser('~'), '.workload_automation'),
+        ),
+        ConfigurationPoint(
+            'assets_repository',
+            description="""
+            The local mount point for the filer hosting WA assets.
+            """,
+        ),
+        ConfigurationPoint(
+            'logging',
+            kind=LoggingConfig,
+            default=LoggingConfig.defaults,
+            description="""
+            WA logging configuration. This should be a dict with a subset
+            of the following keys::
+
+            :normal_format: Logging format used for console output
+            :verbose_format: Logging format used for verbose console output
+            :file_format: Logging format used for run.log
+            :color: If ``True`` (the default), console logging output will
+                    contain bash color escape codes. Set this to ``False`` if
+                    console output will be piped somewhere that does not know
+                    how to handle those.
+            """,
+        ),
+        ConfigurationPoint(
+            'verbosity',
+            kind=int,
+            default=0,
+            description="""
+            Verbosity of console output.
+            """,
+        ),
+        ConfigurationPoint(  # TODO: Needs some format for dates etc/ comes from cfg
+            'default_output_directory',
+            default="wa_output",
+            description="""
+            The default output directory that will be created if not
+            specified when invoking a run.
+            """,
+        ),
+    ]
+    configuration = {cp.name: cp for cp in config_points}
+
+    @property
+    def dependencies_directory(self):
+        return os.path.join(self.user_directory, 'dependencies')
+
+    @property
+    def plugins_directory(self):
+        return os.path.join(self.user_directory, 'plugins')
+
+    @property
+    def user_config_file(self):
+        return os.path.join(self.user_directory, 'config.yaml')
+
+    def __init__(self, environ):
+        super(MetaConfiguration, self).__init__()
+        user_directory = environ.pop('WA_USER_DIRECTORY', '')
+        if user_directory:
+            self.set('user_directory', user_directory)
 
 
+# This is generic top-level configuration for WA runs.
+class RunConfiguration(Configuration):
 
-def merge_config_values(base, other):
-    """
-    This is used to merge two objects, typically when setting the value of a
-    ``ConfigurationPoint``. First, both objects are categorized into
+    name = "Run Configuration"
 
-        c: A scalar value. Basically, most objects. These values
-           are treated as atomic, and not mergeable.
-        s: A sequence. Anything iterable that is not a dict or
-           a string (strings are considered scalars).
-        m: A key-value mapping. ``dict`` and its derivatives.
-        n: ``None``.
-        o: A mergeable object; this is an object that implements both
-          ``merge_with`` and ``merge_into`` methods.
+    # Metadata is separated out because it is not loaded into the auto generated config file
+    meta_data = [
+        ConfigurationPoint('run_name', kind=str,
+                           description='''
+                           A string that labels the WA run that is being performed. This would typically
+                           be set in the ``config`` section of an agenda (see
+                           :ref:`configuration in an agenda <configuration_in_agenda>`) rather than in the config file.
 
-    The merge rules based on the two categories are then as follows:
+                           .. _old-style format strings: http://docs.python.org/2/library/stdtypes.html#string-formatting-operations
+                           .. _log record attributes: http://docs.python.org/2/library/logging.html#logrecord-attributes
+                           '''),
+        ConfigurationPoint('project', kind=str,
+                           description='''
+                           A string naming the project for which data is being collected. This may be
+                           useful, e.g. when uploading data to a shared database that is populated from
+                           multiple projects.
+                           '''),
+        ConfigurationPoint('project_stage', kind=dict,
+                           description='''
+                           A dict or a string that allows adding additional identifier. This is may be
+                           useful for long-running projects.
+                           '''),
+    ]
+    config_points = [
+        ConfigurationPoint('execution_order', kind=str, default='by_iteration',
+                           allowed_values=['by_iteration', 'by_spec', 'by_section', 'random'],
+                           description='''
+                           Defines the order in which the agenda spec will be executed. At the moment,
+                           the following execution orders are supported:
 
-        (c1, c2) --> c2
-        (s1, s2) --> s1 . s2
-        (m1, m2) --> m1 . m2
-        (c, s) --> [c] . s
-        (s, c) --> s . [c]
-        (s, m) --> s . [m]
-        (m, s) --> [m] . s
-        (m, c) --> ERROR
-        (c, m) --> ERROR
-        (o, X) --> o.merge_with(X)
-        (X, o) --> o.merge_into(X)
-        (X, n) --> X
-        (n, X) --> X
+                           ``"by_iteration"``
+                             The first iteration of each workload spec is executed one after the other,
+                             so all workloads are executed before proceeding on to the second iteration.
+                             E.g. A1 B1 C1 A2 C2 A3. This is the default if no order is explicitly specified.
 
-    where:
+                             In case of multiple sections, this will spread them out, such that specs
+                             from the same section are further part. E.g. given sections X and Y, global
+                             specs A and B, and two iterations, this will run ::
 
-        '.'  means concatenation (for maps, contcationation of (k, v) streams
-             then converted back into a map). If the types of the two objects
-             differ, the type of ``other`` is used for the result.
-        'X'  means "any category"
-        '[]' used to indicate a literal sequence (not necessarily a ``list``).
-             when this is concatenated with an actual sequence, that sequencies
-             type is used.
+                                             X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
 
-    notes:
+                           ``"by_section"``
+                             Same  as ``"by_iteration"``, however this will group specs from the same
+                             section together, so given sections X and Y, global specs A and B, and two iterations,
+                             this will run ::
 
-        - When a mapping is combined with a sequence, that mapping is
-          treated as a scalar value.
-        - When combining two mergeable objects, they're combined using
-          ``o1.merge_with(o2)`` (_not_ using o2.merge_into(o1)).
-        - Combining anything with ``None`` yields that value, irrespective
-          of the order. So a ``None`` value is eqivalent to the corresponding
-          item being omitted.
-        - When both values are scalars, merging is equivalent to overwriting.
-        - There is no recursion (e.g. if map values are lists, they will not
-          be merged; ``other`` will overwrite ``base`` values). If complicated
-          merging semantics (such as recursion) are required, they should be
-          implemented within custom mergeable types (i.e. those that implement
-          ``merge_with`` and ``merge_into``).
+                                     X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
 
-    While this can be used as a generic "combine any two arbitrary objects" 
-    function, the semantics have been selected specifically for merging
-    configuration point values.
+                           ``"by_spec"``
+                             All iterations of the first spec are executed before moving on to the next
+                             spec. E.g. A1 A2 A3 B1 C1 C2 This may also be specified as ``"classic"``,
+                             as this was the way workloads were executed in earlier versions of WA.
 
-    """
-    cat_base = categorize(base)
-    cat_other = categorize(other)
+                           ``"random"``
+                             Execution order is entirely random.
+                           '''),
+        ConfigurationPoint('reboot_policy', kind=RebootPolicy, default='as_needed',
+                           allowed_values=RebootPolicy.valid_policies,
+                           description='''
+                           This defines when during execution of a run the Device will be rebooted. The
+                           possible values are:
 
-    if cat_base == 'n':
-        return other
-    elif cat_other == 'n':
-        return base
+                           ``"never"``
+                              The device will never be rebooted.
+                           ``"initial"``
+                              The device will be rebooted when the execution first starts, just before
+                              executing the first workload spec.
+                           ``"each_spec"``
+                              The device will be rebooted before running a new workload spec.
+                              Note: this acts the same as each_iteration when execution order is set to by_iteration
+                           ``"each_iteration"``
+                              The device will be rebooted before each new iteration.
+                           '''),
+        ConfigurationPoint('device', kind=str, mandatory=True,
+                           description='''
+                           This setting defines what specific Device subclass will be used to interact
+                           the connected device. Obviously, this must match your setup.
+                           '''),
+        ConfigurationPoint('retry_on_status', kind=status_list,
+                           default=['FAILED', 'PARTIAL'],
+                           allowed_values=ITERATION_STATUS,
+                           description='''
+                           This is list of statuses on which a job will be cosidered to have failed and
+                           will be automatically retried up to ``max_retries`` times. This defaults to
+                           ``["FAILED", "PARTIAL"]`` if not set. Possible values are:
 
-    if cat_base == 'o':
-        return base.merge_with(other)
-    elif cat_other == 'o':
-        return other.merge_into(base)
+                           ``"OK"``
+                           This iteration has completed and no errors have been detected
 
-    if cat_base == 'm':
-        if cat_other == 's':
-            return merge_sequencies([base], other)
-        elif cat_other == 'm':
-            return merge_maps(base, other)
-        else:
-            message = 'merge error ({}, {}): "{}" and "{}"'
-            raise ValueError(message.format(cat_base, cat_other, base, other))
-    elif cat_base == 's':
-        if cat_other == 's':
-            return merge_sequencies(base, other)
-        else:
-            return merge_sequencies(base, [other])
-    else:  # cat_base == 'c'
-        if cat_other == 's':
-            return merge_sequencies([base], other)
-        elif cat_other == 'm':
-            message = 'merge error ({}, {}): "{}" and "{}"'
-            raise ValueError(message.format(cat_base, cat_other, base, other))
-        else:
-            return other
+                           ``"PARTIAL"``
+                           One or more instruments have failed (the iteration may still be running).
+
+                           ``"FAILED"``
+                           The workload itself has failed.
+
+                           ``"ABORTED"``
+                           The user interupted the workload
+                           '''),
+        ConfigurationPoint('max_retries', kind=int, default=3,
+                           description='''
+                           The maximum number of times failed jobs will be retried before giving up. If
+                           not set, this will default to ``3``.
+
+                           .. note:: this number does not include the original attempt
+                           '''),
+    ]
+    configuration = {cp.name: cp for cp in config_points + meta_data}
+
+    def __init__(self):
+        super(RunConfiguration, self).__init__()
+        for confpoint in self.meta_data:
+            confpoint.set_value(self, check_mandatory=False)
+        self.device_config = None
+
+    def merge_device_config(self, plugin_cache):
+        """
+        Merges global device config and validates that it is correct for the
+        selected device.
+        """
+        # pylint: disable=no-member
+        if self.device is None:
+            msg = 'Attemting to merge device config with unspecified device'
+            raise RuntimeError(msg)
+        self.device_config = plugin_cache.get_plugin_config(self.device,
+                                                            generic_name="device_config")
+
+    def to_pod(self):
+        pod = super(RunConfiguration, self).to_pod()
+        pod['device_config'] = dict(self.device_config or {})
+        return pod
+
+    @classmethod
+    def from_pod(cls, pod):
+        meta_pod = {}
+        for cfg_point in cls.meta_data:
+            meta_pod[cfg_point.name] = pod.pop(cfg_point.name, None)
+
+        instance = super(RunConfiguration, cls).from_pod(pod)
+        for cfg_point in cls.meta_data:
+            cfg_point.set_value(instance, meta_pod[cfg_point.name])
+
+        return instance
 
 
-def merge_sequencies(s1, s2):
-    return type(s2)(chain(s1, s2))
+class JobSpec(Configuration):
+
+    name = "Job Spec"
+
+    config_points = [
+        ConfigurationPoint('iterations', kind=int, default=1,
+                           description='''
+                           How many times to repeat this workload spec
+                           '''),
+        ConfigurationPoint('workload_name', kind=str, mandatory=True,
+                           aliases=["name"],
+                           description='''
+                           The name of the workload to run.
+                           '''),
+        ConfigurationPoint('workload_parameters', kind=obj_dict,
+                           aliases=["params", "workload_params"],
+                           description='''
+                           Parameter to be passed to the workload
+                           '''),
+        ConfigurationPoint('runtime_parameters', kind=obj_dict,
+                           aliases=["runtime_params"],
+                           description='''
+                           Runtime parameters to be set prior to running
+                           the workload.
+                           '''),
+        ConfigurationPoint('boot_parameters', kind=obj_dict,
+                           aliases=["boot_params"],
+                           description='''
+                           Parameters to be used when rebooting the target
+                           prior to running the workload.
+                           '''),
+        ConfigurationPoint('label', kind=str,
+                           description='''
+                           Similar to IDs but do not have the uniqueness restriction.
+                           If specified, labels will be used by some result
+                           processes instead of (or in addition to) the workload
+                           name. For example, the csv result processor will put
+                           the label in the "workload" column of the CSV file.
+                           '''),
+        ConfigurationPoint('instrumentation', kind=toggle_set, merge=True,
+                           aliases=["instruments"],
+                           description='''
+                           The instruments to enable (or disabled using a ~)
+                           during this workload spec.
+                           '''),
+        ConfigurationPoint('flash', kind=dict, merge=True,
+                           description='''
+
+                           '''),
+        ConfigurationPoint('classifiers', kind=dict, merge=True,
+                           description='''
+                           Classifiers allow you to tag metrics from this workload
+                           spec to help in post processing them. Theses are often
+                           used to help identify what runtime_parameters were used
+                           for results when post processing.
+                           '''),
+    ]
+    configuration = {cp.name: cp for cp in config_points}
+
+    @classmethod
+    def from_pod(cls, pod):
+        job_id = pod.pop('id')
+        instance = super(JobSpec, cls).from_pod(pod)
+        instance['id'] = job_id
+        return instance
+
+    @property
+    def section_id(self):
+        if self.id is not None:
+            self.id.rsplit('-', 1)[0]
+
+    @property
+    def workload_id(self):
+        if self.id is not None:
+            self.id.rsplit('-', 1)[-1]
+
+    def __init__(self):
+        super(JobSpec, self).__init__()
+        self.to_merge = defaultdict(OrderedDict)
+        self._sources = []
+        self.id = None
+
+    def to_pod(self):
+        pod = super(JobSpec, self).to_pod()
+        pod['id'] = self.id
+        return pod
+
+    def update_config(self, source, check_mandatory=True):
+        self._sources.append(source)
+        values = source.config
+        for k, v in values.iteritems():
+            if k == "id":
+                continue
+            elif k.endswith('_parameters'):
+                if v:
+                    self.to_merge[k][source] = copy(v)
+            else:
+                try:
+                    self.set(k, v, check_mandatory=check_mandatory)
+                except ConfigError as e:
+                    msg = 'Error in {}:\n\t{}'
+                    raise ConfigError(msg.format(source.name, e.message))
+
+    def merge_workload_parameters(self, plugin_cache):
+        # merge global generic and specific config
+        workload_params = plugin_cache.get_plugin_config(self.workload_name,
+                                                         generic_name="workload_parameters")
+
+        cfg_points = plugin_cache.get_plugin_parameters(self.workload_name)
+        for source in self._sources:
+            config = self.to_merge["workload_parameters"].get(source)
+            if config is None:
+                continue
+
+            for name, cfg_point in cfg_points.iteritems():
+                if name in config:
+                    value = config.pop(name)
+                    cfg_point.set_value(workload_params, value, 
+                                        check_mandatory=False)
+            if config:
+                msg = 'conflicting entry(ies) for "{}" in {}: "{}"'
+                msg = msg.format(self.workload_name, source.name,
+                                    '", "'.join(workload_params[source]))
+
+        self.workload_parameters = workload_params
+
+    def merge_runtime_parameters(self, plugin_cache, target_manager):
+
+        # Order global runtime parameters
+        runtime_parameters = OrderedDict()
+        try:
+            global_runtime_params = plugin_cache.get_plugin_config("runtime_parameters")
+        except NotFoundError:
+            global_runtime_params = {}
+        for source in plugin_cache.sources:
+            runtime_parameters[source] = global_runtime_params[source]
+
+        # Add runtime parameters from JobSpec
+        for source, values in self.to_merge['runtime_parameters'].iteritems():
+            runtime_parameters[source] = values
+
+        # Merge
+        self.runtime_parameters = target_manager.merge_runtime_parameters(runtime_parameters)
+
+    def finalize(self):
+        self.id = "-".join([source.config['id'] for source in self._sources[1:]])  # ignore first id, "global"
 
 
-def merge_maps(m1, m2):
-    return type(m2)(chain(m1.iteritems(), m2.iteritems()))
+# This is used to construct the list of Jobs WA will run
+class JobGenerator(object):
+
+    name = "Jobs Configuration"
+
+    @property
+    def enabled_instruments(self):
+        self._read_enabled_instruments = True
+        return self._enabled_instruments
+
+    def __init__(self, plugin_cache):
+        self.plugin_cache = plugin_cache
+        self.ids_to_run = []
+        self.sections = []
+        self.workloads = []
+        self._enabled_instruments = set()
+        self._read_enabled_instruments = False
+        self.disabled_instruments = []
+
+        self.job_spec_template = obj_dict(not_in_dict=['name'])
+        self.job_spec_template.name = "globally specified job spec configuration"
+        self.job_spec_template.id = "global"
+        # Load defaults
+        for cfg_point in JobSpec.configuration.itervalues():
+            cfg_point.set_value(self.job_spec_template, check_mandatory=False)
+
+        self.root_node = SectionNode(self.job_spec_template)
+
+    def set_global_value(self, name, value):
+        JobSpec.configuration[name].set_value(self.job_spec_template, value,
+                                              check_mandatory=False)
+        if name == "instrumentation":
+            self.update_enabled_instruments(value)
+
+    def add_section(self, section, workloads):
+        new_node = self.root_node.add_section(section)
+        for workload in workloads:
+            new_node.add_workload(workload)
+
+    def add_workload(self, workload):
+        self.root_node.add_workload(workload)
+
+    def disable_instruments(self, instruments):
+        #TODO: Validate
+        self.disabled_instruments = ["~{}".format(i) for i in instruments]
+
+    def update_enabled_instruments(self, value):
+        if self._read_enabled_instruments:
+            msg = "'enabled_instruments' cannot be updated after it has been accessed"
+            raise RuntimeError(msg)
+        self._enabled_instruments.update(value)
+
+    def only_run_ids(self, ids):
+        if isinstance(ids, str):
+            ids = [ids]
+        self.ids_to_run = ids
+
+    def generate_job_specs(self, target_manager):
+        specs = []
+        for leaf in self.root_node.leaves():
+            workload_entries = leaf.workload_entries
+            sections = [leaf]
+            for ancestor in leaf.ancestors():
+                workload_entries = ancestor.workload_entries + workload_entries
+                sections.insert(0, ancestor)
+
+            for workload_entry in workload_entries:
+                job_spec = create_job_spec(workload_entry, sections, 
+                                           target_manager, self.plugin_cache,
+                                           self.disabled_instruments)
+                if self.ids_to_run:
+                    for job_id in self.ids_to_run:
+                        if job_id in job_spec.id:
+                            break
+                    else:
+                        continue
+                self.update_enabled_instruments(job_spec.instrumentation.values())
+                specs.append(job_spec)
+        return specs
 
 
-def categorize(v):
-    if hasattr(v, 'merge_with') and hasattr(v, 'merge_into'):
-        return 'o'
-    elif hasattr(v, 'iteritems'):
-        return 'm'
-    elif isiterable(v):
-        return 's'
-    elif v is None:
-        return 'n'
-    else:
-        return 'c'
+def create_job_spec(workload_entry, sections, target_manager, plugin_cache,
+                    disabled_instruments):
+    job_spec = JobSpec()
+
+    # PHASE 2.1: Merge general job spec configuration
+    for section in sections:
+        job_spec.update_config(section, check_mandatory=False)
+    job_spec.update_config(workload_entry, check_mandatory=False)
+
+    # PHASE 2.2: Merge global, section and workload entry "workload_parameters"
+    job_spec.merge_workload_parameters(plugin_cache)
+
+    # TODO: PHASE 2.3: Validate device runtime/boot paramerers
+    job_spec.merge_runtime_parameters(plugin_cache, target_manager)
+    target_manager.validate_runtime_parameters(job_spec.runtime_parameters)
+
+    # PHASE 2.4: Disable globally disabled instrumentation
+    job_spec.set("instrumentation", disabled_instruments)
+    job_spec.finalize()
+
+    return job_spec
 
 
-settings = WAConfiguration()
+settings = MetaConfiguration(os.environ)
diff --git a/wa/framework/configuration/default.py b/wa/framework/configuration/default.py
new file mode 100644
index 00000000..5145a6b4
--- /dev/null
+++ b/wa/framework/configuration/default.py
@@ -0,0 +1,42 @@
+from wlauto.core.configuration.configuration import MetaConfiguration, RunConfiguration
+from wlauto.core.configuration.plugin_cache import PluginCache
+from wlauto.utils.serializer import yaml
+from wlauto.utils.doc import strip_inlined_text
+
+DEFAULT_INSTRUMENTS = ['execution_time',
+                       'interrupts',
+                       'cpufreq',
+                       'status',
+                       'standard',
+                       'csv']
+
+
+def _format_yaml_comment(param, short_description=False):
+    comment = param.description
+    comment = strip_inlined_text(comment)
+    if short_description:
+        comment = comment.split('\n\n')[0]
+    comment = comment.replace('\n', '\n# ')
+    comment = "# {}\n".format(comment)
+    return comment
+
+
+def _format_instruments(output):
+    plugin_cache = PluginCache()
+    output.write("instrumentation:\n")
+    for plugin in DEFAULT_INSTRUMENTS:
+        plugin_cls = plugin_cache.loader.get_plugin_class(plugin)
+        output.writelines(_format_yaml_comment(plugin_cls, short_description=True))
+        output.write(" - {}\n".format(plugin))
+        output.write("\n")
+
+
+def generate_default_config(path):
+    with open(path, 'w') as output:
+        for param in MetaConfiguration.config_points + RunConfiguration.config_points:
+            entry = {param.name: param.default}
+            comment = _format_yaml_comment(param)
+            output.writelines(comment)
+            yaml.dump(entry, output, default_flow_style=False)
+            output.write("\n")
+        _format_instruments(output)
diff --git a/wa/framework/configuration/execution.py b/wa/framework/configuration/execution.py
index 908d7583..442adf21 100644
--- a/wa/framework/configuration/execution.py
+++ b/wa/framework/configuration/execution.py
@@ -1,67 +1,222 @@
-from copy import copy
-from collections import OrderedDict
+import random
+from itertools import izip_longest, groupby, chain
 
 from wa.framework import pluginloader
-from wa.framework.exception import ConfigError
-from wa.framework.configuration.core import ConfigurationPoint
-from wa.framework.utils.types import TreeNode, list_of, identifier
+from wa.framework.configuration.core import (MetaConfiguration, RunConfiguration,
+                                             JobGenerator, settings)
+from wa.framework.configuration.parsers import ConfigParser
+from wa.framework.configuration.plugin_cache import PluginCache
 
 
-class ExecConfig(object):
+class CombinedConfig(object):
 
-    static_config_points = [
-            ConfigurationPoint(
-                'components',
-                kind=list_of(identifier),
-                description="""
-                Components to be activated.
-                """,
-            ),
-            ConfigurationPoint(
-                'runtime_parameters',
-                kind=list_of(identifier),
-                aliases=['runtime_params'],
-                description="""
-                Components to be activated.
-                """,
-            ),
-            ConfigurationPoint(
-                'classifiers',
-                kind=list_of(str),
-                description="""
-                Classifiers to be used. Classifiers are arbitrary key-value
-                pairs associated with with config. They may be used during output
-                proicessing and should be used to provide additional context for
-                collected results.
-                """,
-            ),
-    ]
+    @staticmethod
+    def from_pod(pod):
+        instance = CombinedConfig()
+        instance.settings = MetaConfiguration.from_pod(pod.get('settings', {}))
+        instance.run_config = RunConfiguration.from_pod(pod.get('run_config', {}))
+        return instance
 
-    config_points = None
+    def __init__(self, settings=None, run_config=None):
+        self.settings = settings
+        self.run_config = run_config
 
-    @classmethod
-    def _load(cls, load_global=False, loader=pluginloader):
-        if cls.config_points is None:
-            cls.config_points = {c.name: c for c in cls.static_config_points}
-            for plugin in loader.list_plugins():
-                cp = ConfigurationPoint(
-                    plugin.name,
-                    kind=OrderedDict,
-                    description="""
-                    Configuration for {} plugin.
-                    """.format(plugin.name)
-                )
-                cls._add_config_point(plugin.name, cp)
-                for alias in plugin.aliases:
-                    cls._add_config_point(alias.name, cp)
-
-    @classmethod
-    def _add_config_point(cls, name, cp):
-        if name in cls.config_points:
-            message = 'Cofig point for "{}" already exists ("{}")'
-            raise ValueError(message.format(name, cls.config_points[name].name))
+    def to_pod(self):
+        return {'settings': self.settings.to_pod(),
+                'run_config': self.run_config.to_pod()}
 
 
+class JobStatus:
+    PENDING = 0
+    RUNNING = 1
+    OK = 2
+    FAILED = 3
+    PARTIAL = 4
+    ABORTED = 5
+    PASSED = 6
 
-class GlobalExecConfig(ExecConfig):
 
+class Job(object):
+
+    def __init__(self, spec, iteration, context):
+        self.spec = spec
+        self.iteration = iteration
+        self.context = context
+        self.status = 'new'
+        self.workload = None
+        self.output = None
+
+    def load(self, target, loader=pluginloader):
+        self.workload = loader.get_workload(self.spec.workload_name,
+                                            target,
+                                            **self.spec.workload_parameters)
+        self.workload.init_resources(self.context)
+        self.workload.validate()
+
+
+class ConfigManager(object):
+    """
+    Represents run-time state of WA. Mostly used as a container for loaded 
+    configuration and discovered plugins.
+
+    This exists outside of any command or run and is associated with the running 
+    instance of wA itself.
+    """
+
+    @property
+    def enabled_instruments(self):
+        return self.jobs_config.enabled_instruments
+
+    @property
+    def job_specs(self):
+        if not self._jobs_generated:
+            msg = 'Attempting to access job specs before '\
+                  'jobs have been generated'
+            raise RuntimeError(msg)
+        return [j.spec for j in self._jobs]
+
+    @property
+    def jobs(self):
+        if not self._jobs_generated:
+            msg = 'Attempting to access jobs before '\
+                  'they have been generated'
+            raise RuntimeError(msg)
+        return self._jobs
+
+    def __init__(self, settings=settings):
+        self.settings = settings
+        self.run_config = RunConfiguration()
+        self.plugin_cache = PluginCache()
+        self.jobs_config = JobGenerator(self.plugin_cache)
+        self.loaded_config_sources = []
+        self._config_parser = ConfigParser()
+        self._jobs = []
+        self._jobs_generated = False
+        self.agenda = None
+
+    def load_config_file(self, filepath):
+        self._config_parser.load_from_path(self, filepath)
+        self.loaded_config_sources.append(filepath)
+
+    def load_config(self, values, source, wrap_exceptions=True):
+        self._config_parser.load(self, values, source)
+        self.loaded_config_sources.append(source)
+
+    def get_plugin(self, name=None, kind=None, *args, **kwargs):
+        return self.plugin_cache.get_plugin(name, kind, *args, **kwargs)
+
+    def get_instruments(self, target):
+        instruments = []
+        for name in self.enabled_instruments:
+            instruments.append(self.get_plugin(name, kind='instrument', 
+                                               target=target))
+        return instruments
+
+    def finalize(self):
+        if not self.agenda:
+            msg = 'Attempting to finalize config before agenda has been set'
+            raise RuntimeError(msg)
+        self.run_config.merge_device_config(self.plugin_cache)
+        return CombinedConfig(self.settings, self.run_config)
+
+    def generate_jobs(self, context):
+        job_specs = self.jobs_config.generate_job_specs(context.tm)
+        exec_order = self.run_config.execution_order
+        for spec, i in permute_iterations(job_specs, exec_order):
+            job = Job(spec, i, context)
+            job.load(context.tm.target)
+            self._jobs.append(job)
+        self._jobs_generated = True
+
+
+def permute_by_job(specs):
+    """
+    This is that "classic" implementation that executes all iterations of a
+    workload spec before proceeding onto the next spec.
+
+    """
+    for spec in specs:
+        for i in range(1, spec.iterations + 1):
+            yield (spec, i)
+ 
+
+def permute_by_iteration(specs):
+    """
+    Runs the first iteration for all benchmarks first, before proceeding to the
+    next iteration, i.e. A1, B1, C1, A2, B2, C2...  instead of  A1, A1, B1, B2,
+    C1, C2...
+
+    If multiple sections where specified in the agenda, this will run all
+    sections for the first global spec first, followed by all sections for the
+    second spec, etc.
+
+    e.g. given sections X and Y, and global specs A and B, with 2 iterations,
+    this will run
+
+    X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
+
+    """
+    groups = [list(g) for k, g in groupby(specs, lambda s: s.workload_id)]
+
+    all_tuples = []
+    for spec in chain(*groups):
+        all_tuples.append([(spec, i + 1) 
+                           for i in xrange(spec.iterations)])
+    for t in chain(*map(list, izip_longest(*all_tuples))):
+        if t is not None:
+            yield t
+
+
+def permute_by_section(specs):
+    """
+    Runs the first iteration for all benchmarks first, before proceeding to the
+    next iteration, i.e. A1, B1, C1, A2, B2, C2...  instead of  A1, A1, B1, B2,
+    C1, C2...
+
+    If multiple sections where specified in the agenda, this will run all specs
+    for the first section followed by all specs for the seciod section, etc.
+
+    e.g. given sections X and Y, and global specs A and B, with 2 iterations,
+    this will run
+
+    X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
+
+    """
+    groups = [list(g) for k, g in groupby(specs, lambda s: s.section_id)]
+
+    all_tuples = []
+    for spec in chain(*groups):
+        all_tuples.append([(spec, i + 1) 
+                           for i in xrange(spec.iterations)])
+    for t in chain(*map(list, izip_longest(*all_tuples))):
+        if t is not None:
+            yield t
+ 
+
+def permute_randomly(specs):
+    """
+    This will generate a random permutation of specs/iteration tuples.
+
+    """
+    result = []
+    for spec in specs:
+        for i in xrange(1, spec.iterations + 1):
+            result.append((spec, i))
+    random.shuffle(result)
+    for t in result:
+        yield t
+
+
+permute_map = {
+    'by_iteration': permute_by_iteration,
+    'by_job': permute_by_job,
+    'by_section': permute_by_section,
+    'random': permute_randomly,
+}
+
+
+def permute_iterations(specs, exec_order):
+    if exec_order not in permute_map:
+        msg = 'Unknown execution order "{}"; must be in: {}'
+        raise ValueError(msg.format(exec_order, permute_map.keys()))
+    return permute_map[exec_order](specs)
diff --git a/wa/framework/configuration/parsers.py b/wa/framework/configuration/parsers.py
new file mode 100644
index 00000000..df6d019e
--- /dev/null
+++ b/wa/framework/configuration/parsers.py
@@ -0,0 +1,308 @@
+#    Copyright 2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+
+from wlauto.exceptions import ConfigError
+from wlauto.utils.serializer import read_pod, SerializerSyntaxError
+from wlauto.utils.types import toggle_set, counter
+from wlauto.core.configuration.configuration import JobSpec
+
+
+###############
+### Parsers ###
+###############
+
+class ConfigParser(object):
+
+    def load_from_path(self, state, filepath):
+        self.load(state, _load_file(filepath, "Config"), filepath)
+
+    def load(self, state, raw, source, wrap_exceptions=True):  # pylint: disable=too-many-branches
+        try:
+            if 'run_name' in raw:
+                msg = '"run_name" can only be specified in the config '\
+                      'section of an agenda'
+                raise ConfigError(msg)
+
+            if 'id' in raw:
+                raise ConfigError('"id" cannot be set globally')
+
+            merge_result_processors_instruments(raw)
+
+            # Get WA core configuration
+            for cfg_point in state.settings.configuration.itervalues():
+                value = get_aliased_param(cfg_point, raw)
+                if value is not None:
+                    state.settings.set(cfg_point.name, value)
+
+            # Get run specific configuration
+            for cfg_point in state.run_config.configuration.itervalues():
+                value = get_aliased_param(cfg_point, raw)
+                if value is not None:
+                    state.run_config.set(cfg_point.name, value)
+
+            # Get global job spec configuration
+            for cfg_point in JobSpec.configuration.itervalues():
+                value = get_aliased_param(cfg_point, raw)
+                if value is not None:
+                    state.jobs_config.set_global_value(cfg_point.name, value)
+
+            for name, values in raw.iteritems():
+                # Assume that all leftover config is for a plug-in or a global
+                # alias it is up to PluginCache to assert this assumption
+                state.plugin_cache.add_configs(name, values, source)
+
+        except ConfigError as e:
+            if wrap_exceptions:
+                raise ConfigError('Error in "{}":\n{}'.format(source, str(e)))
+            else:
+                raise e
+
+
+class AgendaParser(object):
+
+    def load_from_path(self, state, filepath):
+        raw = _load_file(filepath, 'Agenda')
+        self.load(state, raw, filepath)
+
+    def load(self, state, raw, source):
+        try:
+            if not isinstance(raw, dict):
+                raise ConfigError('Invalid agenda, top level entry must be a dict')
+
+            self._populate_and_validate_config(state, raw, source)
+            sections = self._pop_sections(raw)
+            global_workloads = self._pop_workloads(raw)
+
+            if raw:
+                msg = 'Invalid top level agenda entry(ies): "{}"'
+                raise ConfigError(msg.format('", "'.join(raw.keys())))
+
+            sect_ids, wkl_ids = self._collect_ids(sections, global_workloads)
+            self._process_global_workloads(state, global_workloads, wkl_ids)
+            self._process_sections(state, sections, sect_ids, wkl_ids)
+
+            state.agenda = source
+
+        except (ConfigError, SerializerSyntaxError) as e:
+            raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
+
+    def _populate_and_validate_config(self, state, raw, source):
+        for name in ['config', 'global']:
+            entry = raw.pop(name, None)
+            if entry is None:
+                continue
+
+            if not isinstance(entry, dict):
+                msg = 'Invalid entry "{}" - must be a dict'
+                raise ConfigError(msg.format(name))
+
+            if 'run_name' in entry:
+                state.run_config.set('run_name', entry.pop('run_name'))
+
+            state.load_config(entry, source, wrap_exceptions=False)
+
+    def _pop_sections(self, raw):
+        sections = raw.pop("sections", [])
+        if not isinstance(sections, list):
+            raise ConfigError('Invalid entry "sections" - must be a list')
+        return sections
+
+    def _pop_workloads(self, raw):
+        workloads = raw.pop("workloads", [])
+        if not isinstance(workloads, list):
+            raise ConfigError('Invalid entry "workloads" - must be a list')
+        return workloads
+
+    def _collect_ids(self, sections, global_workloads):
+        seen_section_ids = set()
+        seen_workload_ids = set()
+
+        for workload in global_workloads:
+            workload = _get_workload_entry(workload)
+            _collect_valid_id(workload.get("id"), seen_workload_ids, "workload")
+
+        for section in sections:
+            _collect_valid_id(section.get("id"), seen_section_ids, "section")
+            for workload in section["workloads"] if "workloads" in section else []:
+                workload = _get_workload_entry(workload)
+                _collect_valid_id(workload.get("id"), seen_workload_ids, 
+                                  "workload")
+
+        return seen_section_ids, seen_workload_ids
+
+    def _process_global_workloads(self, state, global_workloads, seen_wkl_ids):
+        for workload_entry in global_workloads:
+            workload = _process_workload_entry(workload_entry, seen_wkl_ids,
+                                               state.jobs_config)
+            state.jobs_config.add_workload(workload)
+
+    def _process_sections(self, state, sections, seen_sect_ids, seen_wkl_ids):
+        for section in sections:
+            workloads = []
+            for workload_entry in section.pop("workloads", []):
+                workload = _process_workload_entry(workload_entry, seen_workload_ids,
+                                                   state.jobs_config)
+                workloads.append(workload)
+
+            section = _construct_valid_entry(section, seen_sect_ids, 
+                                             "s", state.jobs_config)
+            state.jobs_config.add_section(section, workloads)
+
+
+########################
+### Helper functions ###
+########################
+
+def get_aliased_param(cfg_point, d, default=None, pop=True):
+    """
+    Given a ConfigurationPoint and a dict, this function will search the dict for
+    the ConfigurationPoint's name/aliases. If more than one is found it will raise
+    a ConfigError. If one (and only one) is found then it will return the value
+    for the ConfigurationPoint. If the name or aliases are present in the dict it will
+    return the "default" parameter of this function.
+    """
+    aliases = [cfg_point.name] + cfg_point.aliases
+    alias_map = [a for a in aliases if a in d]
+    if len(alias_map) > 1:
+        raise ConfigError(DUPLICATE_ENTRY_ERROR.format(aliases))
+    elif alias_map:
+        if pop:
+            return d.pop(alias_map[0])
+        else:
+            return d[alias_map[0]]
+    else:
+        return default
+
+
+def _load_file(filepath, error_name):
+    if not os.path.isfile(filepath):
+        raise ValueError("{} does not exist".format(filepath))
+    try:
+        raw = read_pod(filepath)
+    except SerializerSyntaxError as e:
+        raise ConfigError('Error parsing {} {}: {}'.format(error_name, filepath, e))
+    if not isinstance(raw, dict):
+        message = '{} does not contain a valid {} structure; top level must be a dict.'
+        raise ConfigError(message.format(filepath, error_name))
+    return raw
+
+
+def merge_result_processors_instruments(raw):
+    instr_config = JobSpec.configuration['instrumentation']
+    instruments = toggle_set(get_aliased_param(instr_config, raw, default=[]))
+    result_processors = toggle_set(raw.pop('result_processors', []))
+    if instruments and result_processors:
+        conflicts = instruments.conflicts_with(result_processors)
+        if conflicts:
+            msg = '"instrumentation" and "result_processors" have '\
+                  'conflicting entries: {}'
+            entires = ', '.join('"{}"'.format(c.strip("~")) for c in conflicts)
+            raise ConfigError(msg.format(entires))
+    raw['instrumentation'] = instruments.merge_with(result_processors)
+
+
+def _pop_aliased(d, names, entry_id):
+    name_count = sum(1 for n in names if n in d)
+    if name_count > 1:
+        names_list = ', '.join(names)
+        msg = 'Inivalid workload entry "{}": at moust one of ({}}) must be specified.'
+        raise ConfigError(msg.format(workload_entry['id'], names_list))
+    for name in names:
+        if name in d:
+            return d.pop(name)
+    return None
+
+
+def _construct_valid_entry(raw, seen_ids, prefix, jobs_config):
+    workload_entry = {}
+
+    # Generate an automatic ID if the entry doesn't already have one
+    if 'id' not in raw:
+        while True:
+            new_id = '{}{}'.format(prefix, counter(name=prefix))
+            if new_id not in seen_ids:
+                break
+        workload_entry['id'] = new_id
+        seen_ids.add(new_id)
+    else:
+        workload_entry['id'] = raw.pop('id')
+
+    # Process instrumentation
+    merge_result_processors_instruments(raw)
+
+    # Validate all workload_entry
+    for name, cfg_point in JobSpec.configuration.iteritems():
+        value = get_aliased_param(cfg_point, raw)
+        if value is not None:
+            value = cfg_point.kind(value)
+            cfg_point.validate_value(name, value)
+            workload_entry[name] = value
+
+    wk_id = workload_entry['id']
+    param_names = ['workload_params', 'workload_parameters']
+    if prefix == 'wk':
+        param_names +=  ['params', 'parameters']
+    workload_entry["workload_parameters"] = _pop_aliased(raw, param_names, wk_id)
+
+    param_names = ['runtime_parameters', 'runtime_params']
+    if prefix == 's':
+        param_names +=  ['params', 'parameters']
+    workload_entry["runtime_parameters"] = _pop_aliased(raw, param_names, wk_id)
+
+    param_names = ['boot_parameters', 'boot_params']
+    workload_entry["boot_parameters"] = _pop_aliased(raw, param_names, wk_id)
+
+    if "instrumentation" in workload_entry:
+        jobs_config.update_enabled_instruments(workload_entry["instrumentation"])
+
+    # error if there are unknown workload_entry
+    if raw:
+        msg = 'Invalid entry(ies) in "{}": "{}"'
+        raise ConfigError(msg.format(workload_entry['id'], ', '.join(raw.keys())))
+
+    return workload_entry
+
+
+def _collect_valid_id(entry_id, seen_ids, entry_type):
+    if entry_id is None:
+        return
+    if entry_id in seen_ids:
+        raise ConfigError('Duplicate {} ID "{}".'.format(entry_type, entry_id))
+    # "-" is reserved for joining section and workload IDs
+    if "-" in entry_id:
+        msg = 'Invalid {} ID "{}"; IDs cannot contain a "-"'
+        raise ConfigError(msg.format(entry_type, entry_id))
+    if entry_id == "global":
+        msg = 'Invalid {} ID "global"; is a reserved ID'
+        raise ConfigError(msg.format(entry_type))
+    seen_ids.add(entry_id)
+
+
+def _get_workload_entry(workload):
+    if isinstance(workload, basestring):
+        workload = {'name': workload}
+    elif not isinstance(workload, dict):
+        raise ConfigError('Invalid workload entry: "{}"')
+    return workload
+
+
+def _process_workload_entry(workload, seen_workload_ids, jobs_config):
+    workload = _get_workload_entry(workload)
+    workload = _construct_valid_entry(workload, seen_workload_ids, 
+                                      "wk", jobs_config)
+    return workload
+
diff --git a/wa/framework/configuration/plugin_cache.py b/wa/framework/configuration/plugin_cache.py
new file mode 100644
index 00000000..bfabb97c
--- /dev/null
+++ b/wa/framework/configuration/plugin_cache.py
@@ -0,0 +1,227 @@
+#    Copyright 2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from copy import copy
+from collections import defaultdict
+from itertools import chain
+
+from devlib.utils.misc import memoized
+
+from wa.framework import pluginloader
+from wa.framework.exception import ConfigError
+from wa.framework.target.descriptor import get_target_descriptions
+from wa.utils.types import obj_dict
+
+GENERIC_CONFIGS = ["device_config", "workload_parameters",
+                   "boot_parameters", "runtime_parameters"]
+
+
+class PluginCache(object):
+    """
+    The plugin cache is used to store configuration that cannot be processed at
+    this stage, whether thats because it is unknown if its needed
+    (in the case of disabled plug-ins) or it is not know what it belongs to (in
+    the case of "device-config" ect.). It also maintains where configuration came
+    from, and the priority order of said sources.
+    """
+
+    def __init__(self, loader=pluginloader):
+        self.loader = loader
+        self.sources = []
+        self.plugin_configs = defaultdict(lambda: defaultdict(dict))
+        self.global_alias_values = defaultdict(dict)
+        self.targets = {td.name: td for td in get_target_descriptions()}
+
+        # Generate a mapping of what global aliases belong to
+        self._global_alias_map = defaultdict(dict)
+        self._list_of_global_aliases = set()
+        for plugin in self.loader.list_plugins():
+            for param in plugin.parameters:
+                if param.global_alias:
+                    self._global_alias_map[plugin.name][param.global_alias] = param
+                    self._list_of_global_aliases.add(param.global_alias)
+
+    def add_source(self, source):
+        if source in self.sources:
+            raise Exception("Source has already been added.")
+        self.sources.append(source)
+
+    def add_global_alias(self, alias, value, source):
+        if source not in self.sources:
+            msg = "Source '{}' has not been added to the plugin cache."
+            raise RuntimeError(msg.format(source))
+
+        if not self.is_global_alias(alias):
+            msg = "'{} is not a valid global alias'"
+            raise RuntimeError(msg.format(alias))
+
+        self.global_alias_values[alias][source] = value
+
+    def add_configs(self, plugin_name, values, source):
+        if self.is_global_alias(plugin_name):
+            self.add_global_alias(plugin_name, values, source)
+            return
+        for name, value in values.iteritems():
+            self.add_config(plugin_name, name, value, source)
+
+    def add_config(self, plugin_name, name, value, source):
+        if source not in self.sources:
+            msg = "Source '{}' has not been added to the plugin cache."
+            raise RuntimeError(msg.format(source))
+
+        if (not self.loader.has_plugin(plugin_name) and 
+                plugin_name not in GENERIC_CONFIGS):
+            msg = 'configuration provided for unknown plugin "{}"'
+            raise ConfigError(msg.format(plugin_name))
+
+        if (plugin_name not in GENERIC_CONFIGS and
+                name not in self.get_plugin_parameters(plugin_name)):
+            msg = "'{}' is not a valid parameter for '{}'"
+            raise ConfigError(msg.format(name, plugin_name))
+
+        self.plugin_configs[plugin_name][source][name] = value
+
+    def is_global_alias(self, name):
+        return name in self._list_of_global_aliases
+
+    def get_plugin_config(self, plugin_name, generic_name=None):
+        config = obj_dict(not_in_dict=['name'])
+        config.name = plugin_name
+
+        if plugin_name not in GENERIC_CONFIGS:
+            self._set_plugin_defaults(plugin_name, config)
+            self._set_from_global_aliases(plugin_name, config)
+
+        if generic_name is None:
+            # Perform a simple merge with the order of sources representing
+            # priority
+            plugin_config = self.plugin_configs[plugin_name]
+            for source in self.sources:
+                if source not in plugin_config:
+                    continue
+                for name, value in plugin_config[source].iteritems():
+                    cfg_points[name].set_value(config, value=value)
+        else:
+            # A more complicated merge that involves priority of sources and
+            # specificity
+            self._merge_using_priority_specificity(plugin_name, generic_name, config)
+
+        return config
+
+    def get_plugin(self, name, kind=None, *args, **kwargs):
+        config = self.get_plugin_config(name)
+        kwargs = dict(config.items() + kwargs.items())
+        return self.loader.get_plugin(name, kind=kind, *args, **kwargs)
+
+    @memoized
+    def get_plugin_parameters(self, name):
+        if name in self.targets:
+            return self._get_target_params(name)
+        params = self.loader.get_plugin_class(name).parameters
+        return {param.name: param for param in params}
+
+    def _set_plugin_defaults(self, plugin_name, config):
+        cfg_points = self.get_plugin_parameters(plugin_name)
+        for cfg_point in cfg_points.itervalues():
+            cfg_point.set_value(config, check_mandatory=False)
+
+    def _set_from_global_aliases(self, plugin_name, config):
+        for alias, param in self._global_alias_map[plugin_name].iteritems():
+            if alias in self.global_alias_values:
+                for source in self.sources:
+                    if source not in self.global_alias_values[alias]:
+                        continue
+                    val = self.global_alias_values[alias][source]
+                    param.set_value(config, value=val)
+
+    def _get_target_params(self, name):
+        td = self.targets[name]
+        params = {p.name: p for p in chain(td.target_params, td.platform_params)}
+        #params['connection_settings'] = {p.name: p for p in td.conn_params}
+        return params
+
+    # pylint: disable=too-many-nested-blocks, too-many-branches
+    def _merge_using_priority_specificity(self, specific_name, 
+                                          generic_name, final_config):
+        """
+        WA configuration can come from various sources of increasing priority,
+        as well as being specified in a generic and specific manner (e.g
+        ``device_config`` and ``nexus10`` respectivly). WA has two rules for
+        the priority of configuration:
+
+            - Configuration from higher priority sources overrides
+              configuration from lower priority sources.
+            - More specific configuration overrides less specific configuration.
+
+        There is a situation where these two rules come into conflict. When a
+        generic configuration is given in config source of high priority and a
+        specific configuration is given in a config source of lower priority.
+        In this situation it is not possible to know the end users intention
+        and WA will error.
+
+        :param generic_name: The name of the generic configuration
+                             e.g ``device_config``
+        :param specific_name: The name of the specific configuration used
+                              e.g ``nexus10``
+        :param cfg_point: A dict of ``ConfigurationPoint``s to be used when
+                          merging configuration.  keys=config point name, 
+                          values=config point
+
+        :rtype: A fully merged and validated configuration in the form of a
+                obj_dict.
+        """
+        generic_config = copy(self.plugin_configs[generic_name])
+        specific_config = copy(self.plugin_configs[specific_name])
+        cfg_points = self.get_plugin_parameters(specific_name)
+        sources = self.sources
+        seen_specific_config = defaultdict(list)
+
+        # set_value uses the 'name' attribute of the passed object in it error
+        # messages, to ensure these messages make sense the name will have to be
+        # changed several times during this function.
+        final_config.name = specific_name
+
+        # pylint: disable=too-many-nested-blocks
+        for source in sources:
+            try:
+                if source in generic_config:
+                    final_config.name = generic_name
+                    for name, cfg_point in cfg_points.iteritems():
+                        if name in generic_config[source]:
+                            if name in seen_specific_config:
+                                msg = ('"{generic_name}" configuration "{config_name}" has already been '
+                                       'specified more specifically for {specific_name} in:\n\t\t{sources}')
+                                msg = msg.format(generic_name=generic_name,
+                                                 config_name=name,
+                                                 specific_name=specific_name,
+                                                 sources=", ".join(seen_specific_config[name]))
+                                raise ConfigError(msg)
+                            value = generic_config[source][name]
+                            cfg_point.set_value(final_config, value, check_mandatory=False)
+
+                if source in specific_config:
+                    final_config.name = specific_name
+                    for name, cfg_point in cfg_points.iteritems():
+                        if name in specific_config[source]:
+                            seen_specific_config[name].append(str(source))
+                            value = specific_config[source][name]
+                            cfg_point.set_value(final_config, value, check_mandatory=False)
+
+            except ConfigError as e:
+                raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
+
+        # Validate final configuration
+        final_config.name = specific_name
+        for cfg_point in cfg_points.itervalues():
+            cfg_point.validate(final_config)
diff --git a/wa/framework/configuration/tree.py b/wa/framework/configuration/tree.py
new file mode 100644
index 00000000..1cec5d38
--- /dev/null
+++ b/wa/framework/configuration/tree.py
@@ -0,0 +1,89 @@
+#    Copyright 2016 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class JobSpecSource(object):
+
+    kind = ""
+
+    def __init__(self, config, parent=None):
+        self.config = config
+        self.parent = parent
+
+    @property
+    def id(self):
+        return self.config['id']
+
+    def name(self):
+        raise NotImplementedError()
+
+
+class WorkloadEntry(JobSpecSource):
+    kind = "workload"
+
+    @property
+    def name(self):
+        if self.parent.id == "global":
+            return 'workload "{}"'.format(self.id)
+        else:
+            return 'workload "{}" from section "{}"'.format(self.id, self.parent.id)
+
+
+class SectionNode(JobSpecSource):
+
+    kind = "section"
+
+    @property
+    def name(self):
+        if self.id == "global":
+            return "globally specified configuration"
+        else:
+            return 'section "{}"'.format(self.id)
+
+    @property
+    def is_leaf(self):
+        return not bool(self.children)
+
+    def __init__(self, config, parent=None):
+        super(SectionNode, self).__init__(config, parent=parent)
+        self.workload_entries = []
+        self.children = []
+
+    def add_section(self, section):
+        new_node = SectionNode(section, parent=self)
+        self.children.append(new_node)
+        return new_node
+
+    def add_workload(self, workload_config):
+        self.workload_entries.append(WorkloadEntry(workload_config, self))
+
+    def descendants(self):
+        for child in self.children:
+            for n in child.descendants():
+                yield n
+            yield child
+
+    def ancestors(self):
+        if self.parent is not None:
+            yield self.parent
+            for ancestor in self.parent.ancestors():
+                yield ancestor
+
+    def leaves(self):
+        if self.is_leaf:
+            yield self
+        else:
+            for n in self.descendants():
+                if n.is_leaf:
+                    yield n
diff --git a/wa/framework/entrypoint.py b/wa/framework/entrypoint.py
index f6bf4f51..3e73b910 100644
--- a/wa/framework/entrypoint.py
+++ b/wa/framework/entrypoint.py
@@ -12,72 +12,100 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-import os
+
+
 import sys
 import argparse
 import logging
+import os
 import subprocess
-
-from wa.framework import pluginloader, log
-from wa.framework.configuration import settings
-from wa.framework.exception import WAError
-from wa.utils.doc import format_body
-from wa.utils.misc import init_argument_parser
-
-
 import warnings
+
+from wa.framework import pluginloader
+from wa.framework.command import init_argument_parser
+from wa.framework.configuration import settings
+from wa.framework.configuration.execution import ConfigManager
+from wa.framework.host import init_user_directory
+from wa.framework.exception import WAError, DevlibError, ConfigError
+from wa.utils import log
+from wa.utils.doc import format_body
+from wa.utils.misc import get_traceback
+
 warnings.filterwarnings(action='ignore', category=UserWarning, module='zope')
 
 
-logger = logging.getLogger('wa')
-
-
-def init_settings():
-    settings.load_environment()
-    if not os.path.isdir(settings.user_directory):
-        settings.initialize_user_directory()
-    settings.load_user_config()
-
-
-def get_argument_parser():
-    description = ("Execute automated workloads on a remote device and process "
-                    "the resulting output.\n\nUse \"wa <subcommand> -h\" to see "
-                    "help for individual subcommands.")
-    parser = argparse.ArgumentParser(description=format_body(description, 80),
-                                        prog='wa',
-                                        formatter_class=argparse.RawDescriptionHelpFormatter,
-                                        )
-    init_argument_parser(parser)
-    return parser
+logger = logging.getLogger('command_line')
 
 
 def load_commands(subparsers):
     commands = {}
     for command in pluginloader.list_commands():
-        commands[command.name] = pluginloader.get_command(command.name, subparsers=subparsers)
+        commands[command.name] = pluginloader.get_command(command.name, 
+                                                          subparsers=subparsers)
     return commands
 
 
 def main():
+    if not os.path.exists(settings.user_directory):
+        init_user_directory()
+
     try:
-        log.init()
-        init_settings()
-        parser = get_argument_parser()
-        commands = load_commands(parser.add_subparsers(dest='command'))  # each command will add its own subparser
+
+        description = ("Execute automated workloads on a remote device and process "
+                       "the resulting output.\n\nUse \"wa <subcommand> -h\" to see "
+                       "help for individual subcommands.")
+        parser = argparse.ArgumentParser(description=format_body(description, 80),
+                                         prog='wa',
+                                         formatter_class=argparse.RawDescriptionHelpFormatter,
+                                         )
+        init_argument_parser(parser)
+
+        # load_commands will trigger plugin enumeration, and we want logging
+        # to be enabled for that, which requires the verbosity setting; however
+        # full argument parse cannot be complted until the commands are loaded; so
+        # parse just the base args for know so we can get verbosity.
+        args, _ = parser.parse_known_args()
+        settings.set("verbosity", args.verbose)
+        log.init(settings.verbosity)
+
+        # each command will add its own subparser
+        commands = load_commands(parser.add_subparsers(dest='command'))  
         args = parser.parse_args()
-        settings.set('verbosity', args.verbose)
-        if args.config:
-            settings.load_config_file(args.config)
-        log.set_level(settings.verbosity)
+
+        config = ConfigManager()
+        config.load_config_file(settings.user_config_file)
+        for config_file in args.config:
+            if not os.path.exists(config_file):
+                raise ConfigError("Config file {} not found".format(config_file))
+            config.load_config_file(config_file)
+
         command = commands[args.command]
-        sys.exit(command.execute(args))
+        sys.exit(command.execute(config, args))
+
     except KeyboardInterrupt:
         logging.info('Got CTRL-C. Aborting.')
+        sys.exit(3)
+    except (WAError, DevlibError) as e:
+        logging.critical(e)
         sys.exit(1)
+    except subprocess.CalledProcessError as e:
+        tb = get_traceback()
+        logging.critical(tb)
+        command = e.cmd
+        if e.args:
+            command = '{} {}'.format(command, ' '.join(e.args))
+        message = 'Command \'{}\' returned non-zero exit status {}\nOUTPUT:\n{}\n'
+        logging.critical(message.format(command, e.returncode, e.output))
+        sys.exit(2)
+    except SyntaxError as e:
+        tb = get_traceback()
+        logging.critical(tb)
+        message = 'Syntax Error in {}, line {}, offset {}:'
+        logging.critical(message.format(e.filename, e.lineno, e.offset))
+        logging.critical('\t{}'.format(e.msg))
+        sys.exit(2)
     except Exception as e:  # pylint: disable=broad-except
-        log_error(e, logger, critical=True)
-        if isinstance(e, WAError):
-            sys.exit(2)
-        else:
-            sys.exit(3)
-
+        tb = get_traceback()
+        logging.critical(tb)
+        logging.critical('{}({})'.format(e.__class__.__name__, e))
+        sys.exit(2)
diff --git a/wa/framework/exception.py b/wa/framework/exception.py
index 570c1e59..6f1f0693 100644
--- a/wa/framework/exception.py
+++ b/wa/framework/exception.py
@@ -12,7 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-from wa.utils.misc import get_traceback, TimeoutError  # NOQA pylint: disable=W0611
+from devlib.exception import (DevlibError, HostError, TimeoutError,
+                              TargetError, TargetNotRespondingError)
+
+from wa.utils.misc import get_traceback
 
 
 class WAError(Exception):
@@ -35,11 +38,6 @@ class WorkloadError(WAError):
     pass
 
 
-class HostError(WAError):
-    """Problem with the host on which WA is running."""
-    pass
-
-
 class JobError(WAError):
     """Job execution error."""
     pass
@@ -113,7 +111,8 @@ class PluginLoaderError(WAError):
             if isinstance(orig, WAError):
                 reason = 'because of:\n{}: {}'.format(orig_name, orig)
             else:
-                reason = 'because of:\n{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
+                text = 'because of:\n{}\n{}: {}'
+                reason = text.format(get_traceback(self.exc_info), orig_name, orig)
             return '\n'.join([self.message, reason])
         else:
             return self.message
@@ -121,10 +120,12 @@ class PluginLoaderError(WAError):
 
 class WorkerThreadError(WAError):
     """
-    This should get raised  in the main thread if a non-WAError-derived exception occurs on
-    a worker/background thread. If a WAError-derived exception is raised in the worker, then
-    it that exception should be re-raised on the main thread directly -- the main point of this is
-    to preserve the backtrace in the output, and backtrace doesn't get output for WAErrors.
+    This should get raised  in the main thread if a non-WAError-derived
+    exception occurs on a worker/background thread. If a WAError-derived
+    exception is raised in the worker, then it that exception should be
+    re-raised on the main thread directly -- the main point of this is to
+    preserve the backtrace in the output, and backtrace doesn't get output for
+    WAErrors.
 
     """
 
@@ -133,7 +134,8 @@ class WorkerThreadError(WAError):
         self.exc_info = exc_info
         orig = self.exc_info[1]
         orig_name = type(orig).__name__
-        message = 'Exception of type {} occured on thread {}:\n'.format(orig_name, thread)
-        message += '{}\n{}: {}'.format(get_traceback(self.exc_info), orig_name, orig)
+        text = 'Exception of type {} occured on thread {}:\n{}\n{}: {}'
+        message = text.format(orig_name, thread, get_traceback(self.exc_info), 
+                              orig_name, orig)
         super(WorkerThreadError, self).__init__(message)
 
diff --git a/wa/framework/execution.py b/wa/framework/execution.py
index 1c072a3a..a5c79714 100644
--- a/wa/framework/execution.py
+++ b/wa/framework/execution.py
@@ -1,369 +1,823 @@
-import os
+#    Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=no-member
+
+"""
+This module contains the execution logic for Workload Automation. It defines the
+following actors:
+
+    WorkloadSpec: Identifies the workload to be run and defines parameters under
+                  which it should be executed.
+
+    Executor: Responsible for the overall execution process. It instantiates
+              and/or intialises the other actors, does any necessary vaidation
+              and kicks off the whole process.
+
+    Execution Context: Provides information about the current state of run
+                       execution to instrumentation.
+
+    RunInfo: Information about the current run.
+
+    Runner: This executes workload specs that are passed to it. It goes through
+            stages of execution, emitting an appropriate signal at each step to
+            allow instrumentation to do its stuff.
+
+"""
 import logging
-import shutil
+import os
 import random
+import subprocess
+import uuid
+from collections import Counter, defaultdict, OrderedDict
+from contextlib import contextmanager
 from copy import copy
-from collections import OrderedDict, defaultdict
+from datetime import datetime
+from itertools import izip_longest
 
-from wa.framework import pluginloader, signal, log
-from wa.framework.run import Runner, RunnerJob
-from wa.framework.output import RunOutput
-from wa.framework.actor import JobActor
+import wa.framework.signal as signal
+from wa.framework import instrumentation, pluginloader
+from wa.framework.configuration.core import settings
+from wa.framework.configuration.execution import JobStatus
+from wa.framework.exception import (WAError, ConfigError, TimeoutError,
+                                    InstrumentError, TargetError,
+                                    TargetNotRespondingError)
+from wa.framework.plugin import Artifact
 from wa.framework.resource import ResourceResolver
-from wa.framework.exception import ConfigError, NotFoundError
-from wa.framework.configuration import ConfigurationPoint, PluginConfiguration, WA_CONFIGURATION
-from wa.utils.serializer import read_pod
-from wa.utils.misc import ensure_directory_exists as _d, Namespace
-from wa.utils.types import list_of, identifier, caseless_string
+from wa.framework.target.info import TargetInfo
+from wa.utils.misc import (ensure_directory_exists as _d, 
+                           get_traceback, format_duration)
+from wa.utils.serializer import json
 
 
-__all__ = [
-    'Executor',
-    'ExecutionOutput',
-    'ExecutionwContext',
-    'ExecuteWorkloadContainerActor',
-    'ExecuteWorkloadJobActor',
-]
+# The maximum number of reboot attempts for an iteration.
+MAX_REBOOT_ATTEMPTS = 3
+
+# If something went wrong during device initialization, wait this
+# long (in seconds) before retrying. This is necessary, as retrying
+# immediately may not give the device enough time to recover to be able
+# to reboot.
+REBOOT_DELAY = 3
+
+
+class ExecutionContext(object):
+
+
+    def __init__(self, cm, tm, output):
+        self.logger = logging.getLogger('ExecContext')
+        self.cm = cm
+        self.tm = tm
+        self.output = output
+        self.logger.debug('Loading resource discoverers')
+        self.resolver = ResourceResolver(cm)
+        self.resolver.load()
+
+
+class OldExecutionContext(object):
+    """
+    Provides a context for instrumentation. Keeps track of things like
+    current workload and iteration.
+
+    This class also provides two status members that can be used by workloads
+    and instrumentation to keep track of arbitrary state. ``result``
+    is reset on each new iteration of a workload; run_status is maintained
+    throughout a Workload Automation run.
+
+    """
+
+    # These are the artifacts generated by the core framework.
+    default_run_artifacts = [
+        Artifact('runlog', 'run.log', 'log', mandatory=True,
+                 description='The log for the entire run.'),
+    ]
+
+    @property
+    def current_iteration(self):
+        if self.current_job:
+            spec_id = self.current_job.spec.id
+            return self.job_iteration_counts[spec_id]
+        else:
+            return None
+
+    @property
+    def job_status(self):
+        if not self.current_job:
+            return None
+        return self.current_job.result.status
+
+    @property
+    def workload(self):
+        return getattr(self.spec, 'workload', None)
+
+    @property
+    def spec(self):
+        return getattr(self.current_job, 'spec', None)
+
+    @property
+    def result(self):
+        return getattr(self.current_job, 'result', self.run_result)
+
+    def __init__(self, device_manager, config):
+        self.device_manager = device_manager
+        self.device = self.device_manager.target
+        self.config = config
+        self.reboot_policy = config.reboot_policy
+        self.output_directory = None
+        self.current_job = None
+        self.resolver = None
+        self.last_error = None
+        self.run_info = None
+        self.run_result = None
+        self.run_output_directory = self.config.output_directory
+        self.host_working_directory = self.config.meta_directory
+        self.iteration_artifacts = None
+        self.run_artifacts = copy(self.default_run_artifacts)
+        self.job_iteration_counts = defaultdict(int)
+        self.aborted = False
+        self.runner = None
+        if config.agenda.filepath:
+            self.run_artifacts.append(Artifact('agenda',
+                                               os.path.join(self.host_working_directory,
+                                                            os.path.basename(config.agenda.filepath)),
+                                               'meta',
+                                               mandatory=True,
+                                               description='Agenda for this run.'))
+        for i, filepath in enumerate(settings.config_paths, 1):
+            name = 'config_{}'.format(i)
+            path = os.path.join(self.host_working_directory,
+                                name + os.path.splitext(filepath)[1])
+            self.run_artifacts.append(Artifact(name,
+                                               path,
+                                               kind='meta',
+                                               mandatory=True,
+                                               description='Config file used for the run.'))
+
+    def initialize(self):
+        if not os.path.isdir(self.run_output_directory):
+            os.makedirs(self.run_output_directory)
+        self.output_directory = self.run_output_directory
+        self.resolver = ResourceResolver(self.config)
+        self.run_info = RunInfo(self.config)
+        self.run_result = RunResult(self.run_info, self.run_output_directory)
+
+    def next_job(self, job):
+        """Invoked by the runner when starting a new iteration of workload execution."""
+        self.current_job = job
+        self.job_iteration_counts[self.spec.id] += 1
+        if not self.aborted:
+            outdir_name = '_'.join(map(str, [self.spec.label, self.spec.id, self.current_iteration]))
+            self.output_directory = _d(os.path.join(self.run_output_directory, outdir_name))
+            self.iteration_artifacts = [wa for wa in self.workload.artifacts]
+        self.current_job.result.iteration = self.current_iteration
+        self.current_job.result.output_directory = self.output_directory
+
+    def end_job(self):
+        if self.current_job.result.status == JobStatus.ABORTED:
+            self.aborted = True
+        self.current_job = None
+        self.output_directory = self.run_output_directory
+
+    def add_metric(self, *args, **kwargs):
+        self.result.add_metric(*args, **kwargs)
+
+    def add_artifact(self, name, path, kind, *args, **kwargs):
+        if self.current_job is None:
+            self.add_run_artifact(name, path, kind, *args, **kwargs)
+        else:
+            self.add_iteration_artifact(name, path, kind, *args, **kwargs)
+
+    def add_run_artifact(self, name, path, kind, *args, **kwargs):
+        path = _check_artifact_path(path, self.run_output_directory)
+        self.run_artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs))
+
+    def add_iteration_artifact(self, name, path, kind, *args, **kwargs):
+        path = _check_artifact_path(path, self.output_directory)
+        self.iteration_artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs))
+
+    def get_artifact(self, name):
+        if self.iteration_artifacts:
+            for art in self.iteration_artifacts:
+                if art.name == name:
+                    return art
+        for art in self.run_artifacts:
+            if art.name == name:
+                return art
+        return None
+
+
+def _check_artifact_path(path, rootpath):
+    if path.startswith(rootpath):
+        return os.path.abspath(path)
+    rootpath = os.path.abspath(rootpath)
+    full_path = os.path.join(rootpath, path)
+    if not os.path.isfile(full_path):
+        msg = 'Cannot add artifact because {} does not exist.'
+        raise ValueError(msg.format(full_path))
+    return full_path
+
+
+class FakeTargetManager(object):
+    # TODO: this is a FAKE
+
+    def __init__(self, name, config):
+        self.device_name = name
+        self.device_config = config
+
+        from devlib import LocalLinuxTarget
+        self.target = LocalLinuxTarget({'unrooted': True})
+        
+    def get_target_info(self):
+        return TargetInfo(self.target)
+
+    def validate_runtime_parameters(self, params):
+        pass
+
+    def merge_runtime_parameters(self, params):
+        pass
+
+
+def init_target_manager(config):
+    return FakeTargetManager(config.device, config.device_config)
 
 
 class Executor(object):
-
-    def __init__(self, output):
-        self.output = output
-        self.config = ExecutionRunConfiguration()
-        self.agenda_string =  None
-        self.agenda = None
-        self.jobs = None
-        self.container = None
-        self.target = None
-
-    def load_config(self, filepath):
-        self.config.update(filepath)
-
-    def load_agenda(self, agenda_string):
-        if self.agenda:
-            raise RuntimeError('Only one agenda may be loaded per run.')
-        self.agenda_string = agenda_string
-        if os.path.isfile(agenda_string):
-            self.logger.debug('Loading agenda from {}'.format(agenda_string))
-            self.agenda = Agenda(agenda_string)
-            shutil.copy(agenda_string, self.output.config_directory)
-        else:
-            self.logger.debug('"{}" is not a file; assuming workload name.'.format(agenda_string))
-            self.agenda = Agenda()
-            self.agenda.add_workload_entry(agenda_string)
-
-    def disable_instrument(self, name):
-        if not self.agenda:
-            raise RuntimeError('initialize() must be invoked before disable_instrument()')
-        self.agenda.config['instrumentation'].append('~{}'.format(itd))
-
-    def initialize(self):
-        if not self.agenda:
-            raise RuntimeError('No agenda has been loaded.')
-        self.config.update(self.agenda.config)
-        self.config.consolidate()
-        self._initialize_target()
-        self._initialize_job_config()
-
-    def execute(self, selectors=None):
-        pass
-
-    def finalize(self):
-        pass
-
-    def _initialize_target(self):
-        pass
-
-    def _initialize_job_config(self):
-        self.agenda.expand(self.target)
-        for tup in agenda_iterator(self.agenda, self.config.execution_order):
-            glob, sect, workload, iter_number = tup
-
-
-def agenda_iterator(agenda, order):
     """
-    Iterates over all job components in an agenda, yielding tuples in the form ::
+    The ``Executor``'s job is to set up the execution context and pass to a
+    ``Runner`` along with a loaded run specification. Once the ``Runner`` has
+    done its thing, the ``Executor`` performs some final reporint before
+    returning.
 
-        (global_entry, section_entry, workload_entry, iteration_number)
-
-    Which fully define the job to be crated. The order in which these tuples are 
-    yielded is determined by the ``order`` parameter which may be one of the following
-    values:
-
-    ``"by_iteration"`` 
-      The first iteration of each workload spec is executed one after the other,
-      so all workloads are executed before proceeding on to the second iteration.
-      E.g. A1 B1 C1 A2 C2 A3. This is the default if no order is explicitly specified.
- 
-      In case of multiple sections, this will spread them out, such that specs
-      from the same section are further part. E.g. given sections X and Y, global
-      specs A and B, and two iterations, this will run ::
- 
-                      X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
- 
-    ``"by_section"`` 
-      Same  as ``"by_iteration"``, however this will group specs from the same
-      section together, so given sections X and Y, global specs A and B, and two iterations, 
-      this will run ::
- 
-              X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
- 
-    ``"by_spec"``
-      All iterations of the first spec are executed before moving on to the next
-      spec. E.g. A1 A2 A3 B1 C1 C2. 
- 
-    ``"random"``
-      Execution order is entirely random.
+    The initial context set up involves combining configuration from various
+    sources, loading of requided workloads, loading and installation of
+    instruments and result processors, etc. Static validation of the combined
+    configuration is also performed.
 
     """
-    # TODO: this would be a place to perform section expansions.
-    #       (e.g. sweeps, cross-products, etc).
+    # pylint: disable=R0915
 
-    global_iterations = agenda.global_.number_of_iterations
-    all_iterations = [global_iterations]
-    all_iterations.extend([s.number_of_iterations for s in agenda.sections])
-    all_iterations.extend([w.number_of_iterations for w in agenda.workloads])
-    max_iterations = max(all_iterations)
-
-    if order == 'by_spec':
-        if agenda.sections:
-            for section in agenda.sections:
-                section_iterations = section.number_of_iterations or global_iterations
-                for workload in agenda.workloads + section.workloads:
-                    workload_iterations =  workload.number_of_iterations or section_iterations
-                    for i in xrange(workload_iterations):
-                        yield agenda.global_, section, workload, i
-        else:  # not sections
-            for workload in agenda.workloads:
-                workload_iterations =  workload.number_of_iterations or global_iterations
-                for i in xrange(workload_iterations):
-                    yield agenda.global_, None, workload, i
-    elif order == 'by_section':
-        for i in xrange(max_iterations):
-            if agenda.sections:
-                for section in agenda.sections:
-                    section_iterations = section.number_of_iterations or global_iterations
-                    for workload in agenda.workloads + section.workloads:
-                        workload_iterations =  workload.number_of_iterations or section_iterations
-                        if i < workload_iterations:
-                            yield agenda.global_, section, workload, i
-            else:  # not sections
-                for workload in agenda.workloads:
-                    workload_iterations =  workload.number_of_iterations or global_iterations
-                    if i < workload_iterations:
-                        yield agenda.global_, None, workload, i
-    elif order == 'by_iteration':
-        for i in xrange(max_iterations):
-            if agenda.sections:
-                for workload in agenda.workloads:
-                    for section in agenda.sections:
-                        section_iterations = section.number_of_iterations or global_iterations
-                        workload_iterations =  workload.number_of_iterations or section_iterations or global_iterations
-                        if i < workload_iterations:
-                            yield agenda.global_, section, workload, i
-                # Now do the section-specific workloads
-                for section in agenda.sections:
-                    section_iterations = section.number_of_iterations or global_iterations
-                    for workload in section.workloads:
-                        workload_iterations =  workload.number_of_iterations or section_iterations or global_iterations
-                        if i < workload_iterations:
-                            yield agenda.global_, section, workload, i
-            else:  # not sections
-                for workload in agenda.workloads:
-                    workload_iterations =  workload.number_of_iterations or global_iterations
-                    if i < workload_iterations:
-                        yield agenda.global_, None, workload, i
-    elif order == 'random':
-        tuples = list(agenda_iterator(data, order='by_section'))
-        random.shuffle(tuples)
-        for t in tuples:
-            yield t
-    else:
-        raise ValueError('Invalid order: "{}"'.format(order))
-
-
-
-class RebootPolicy(object):
-    """
-    Represents the reboot policy for the execution -- at what points the device
-    should be rebooted. This, in turn, is controlled by the policy value that is
-    passed in on construction and would typically be read from the user's settings.
-    Valid policy values are:
-
-    :never: The device will never be rebooted.
-    :as_needed: Only reboot the device if it becomes unresponsive, or needs to be flashed, etc.
-    :initial: The device will be rebooted when the execution first starts, just before
-              executing the first workload spec.
-    :each_spec: The device will be rebooted before running a new workload spec.
-    :each_iteration: The device will be rebooted before each new iteration.
-
-    """
-
-    valid_policies = ['never', 'as_needed', 'initial', 'each_spec', 'each_iteration']
-
-    def __init__(self, policy):
-        policy = policy.strip().lower().replace(' ', '_')
-        if policy not in self.valid_policies:
-            message = 'Invalid reboot policy {}; must be one of {}'.format(policy, ', '.join(self.valid_policies))
-            raise ConfigError(message)
-        self.policy = policy
-
-    @property
-    def can_reboot(self):
-        return self.policy != 'never'
-
-    @property
-    def perform_initial_boot(self):
-        return self.policy not in ['never', 'as_needed']
-
-    @property
-    def reboot_on_each_spec(self):
-        return self.policy in ['each_spec', 'each_iteration']
-
-    @property
-    def reboot_on_each_iteration(self):
-        return self.policy == 'each_iteration'
-
-    def __str__(self):
-        return self.policy
-
-    __repr__ = __str__
-
-    def __cmp__(self, other):
-        if isinstance(other, RebootPolicy):
-            return cmp(self.policy, other.policy)
-        else:
-            return cmp(self.policy, other)
-
-
-class RuntimeParameterSetter(object):
-    """
-    Manages runtime parameter state during execution.
-
-    """
-
-    @property
-    def target(self):
-        return self.target_assistant.target
-
-    def __init__(self, target_assistant):
-        self.target_assistant = target_assistant
-        self.to_set = defaultdict(list) # name --> list of values 
-        self.last_set = {}
-        self.to_unset = defaultdict(int) # name --> count
-
-    def validate(self, params):
-        self.target_assistant.validate_runtime_parameters(params)
-
-    def mark_set(self, params):
-        for name, value in params.iteritems():
-            self.to_set[name].append(value)
-            
-    def mark_unset(self, params):
-        for name in params.iterkeys():
-            self.to_unset[name] += 1
-
-    def inact_set(self):
-        self.target_assistant.clear_parameters()
-        for name in self.to_set:
-            self._set_if_necessary(name)
-        self.target_assitant.set_parameters()
-        
-    def inact_unset(self):
-        self.target_assistant.clear_parameters()
-        for name, count in self.to_unset.iteritems():
-            while count:
-                self.to_set[name].pop()
-                count -= 1
-            self._set_if_necessary(name)
-        self.target_assitant.set_parameters()
-
-    def _set_if_necessary(self, name):
-        if not self.to_set[name]:
-            return
-        new_value = self.to_set[name][-1]
-        prev_value = self.last_set.get(name)
-        if new_value != prev_value:
-            self.target_assistant.add_paramter(name, new_value)
-            self.last_set[name] = new_value
-
-
-class WorkloadExecutionConfig(object):
-
-    @staticmethod
-    def from_pod(pod):
-        return WorkloadExecutionConfig(**pod)
-
-    def __init__(self, workload_name, workload_parameters=None,
-                 runtime_parameters=None, components=None, 
-                 assumptions=None):
-        self.workload_name = workload_name or None
-        self.workload_parameters = workload_parameters or {}
-        self.runtime_parameters = runtime_parameters or {}
-        self.components = components or {}
-        self.assumpations = assumptions or {}
-
-    def to_pod(self):
-        return copy(self.__dict__)
-
-
-class WorkloadExecutionActor(JobActor):
-
-    def __init__(self, target, config, loader=pluginloader):
-        self.target = target
-        self.config = config
-        self.logger = logging.getLogger('exec')
+    def __init__(self):
+        self.logger = logging.getLogger('Executor')
+        self.error_logged = False
+        self.warning_logged = False
+        pluginloader = None
+        self.device_manager = None
+        self.device = None
         self.context = None
-        self.workload = loader.get_workload(config.workload_name, target, 
-                                            **config.workload_parameters)
-    def get_config(self):
-        return self.config.to_pod()
 
-    def initialize(self, context):
+    def execute(self, config_manager, output):
+        """
+        Execute the run specified by an agenda. Optionally, selectors may be
+        used to only selecute a subset of the specified agenda.
+
+        Params::
+
+            :state: a ``ConfigManager`` containing processed configuraiton
+            :output: an initialized ``RunOutput`` that will be used to
+                     store the results.
+
+        """
+        signal.connect(self._error_signalled_callback, signal.ERROR_LOGGED)
+        signal.connect(self._warning_signalled_callback, signal.WARNING_LOGGED)
+
+        self.logger.info('Initializing run')
+        self.logger.debug('Finalizing run configuration.')
+        config = config_manager.finalize()
+        output.write_config(config)
+
+        self.logger.info('Connecting to target')
+        target_manager = init_target_manager(config.run_config)
+        output.write_target_info(target_manager.get_target_info())
+
+        self.logger.info('Initializing execution conetext')
+        context = ExecutionContext(config_manager, target_manager, output)
+
+        self.logger.info('Generating jobs')
+        config_manager.generate_jobs(context)
+        output.write_job_specs(config_manager.job_specs)
+
+        self.logger.info('Installing instrumentation')
+        for instrument in config_manager.get_instruments(target_manager.target):
+            instrumentation.install(instrument)
+        instrumentation.validate()
+
+
+    def execute_postamble(self):
+        """
+        This happens after the run has completed. The overall results of the run are
+        summarised to the user.
+
+        """
+        result = self.context.run_result
+        counter = Counter()
+        for ir in result.iteration_results:
+            counter[ir.status] += 1
+        self.logger.info('Done.')
+        self.logger.info('Run duration: {}'.format(format_duration(self.context.run_info.duration)))
+        status_summary = 'Ran a total of {} iterations: '.format(sum(self.context.job_iteration_counts.values()))
+        parts = []
+        for status in JobStatus.values:
+            if status in counter:
+                parts.append('{} {}'.format(counter[status], status))
+        self.logger.info(status_summary + ', '.join(parts))
+        self.logger.info('Results can be found in {}'.format(self.config.output_directory))
+
+        if self.error_logged:
+            self.logger.warn('There were errors during execution.')
+            self.logger.warn('Please see {}'.format(self.config.log_file))
+        elif self.warning_logged:
+            self.logger.warn('There were warnings during execution.')
+            self.logger.warn('Please see {}'.format(self.config.log_file))
+
+    def _get_runner(self, result_manager):
+        if not self.config.execution_order or self.config.execution_order == 'by_iteration':
+            if self.config.reboot_policy == 'each_spec':
+                self.logger.info('each_spec reboot policy with the default by_iteration execution order is '
+                                 'equivalent to each_iteration policy.')
+            runnercls = ByIterationRunner
+        elif self.config.execution_order in ['classic', 'by_spec']:
+            runnercls = BySpecRunner
+        elif self.config.execution_order == 'by_section':
+            runnercls = BySectionRunner
+        elif self.config.execution_order == 'random':
+            runnercls = RandomRunner
+        else:
+            raise ConfigError('Unexpected execution order: {}'.format(self.config.execution_order))
+        return runnercls(self.device_manager, self.context, result_manager)
+
+    def _error_signalled_callback(self):
+        self.error_logged = True
+        signal.disconnect(self._error_signalled_callback, signal.ERROR_LOGGED)
+
+    def _warning_signalled_callback(self):
+        self.warning_logged = True
+        signal.disconnect(self._warning_signalled_callback, signal.WARNING_LOGGED)
+
+
+class Runner(object):
+    """
+    
+    """
+
+
+class RunnerJob(object):
+    """
+    Represents a single execution of a ``RunnerJobDescription``. There will be one created for each iteration
+    specified by ``RunnerJobDescription.number_of_iterations``.
+
+    """
+
+    def __init__(self, spec, retry=0):
+        self.spec = spec
+        self.retry = retry
+        self.iteration = None
+        self.result = JobStatus(self.spec)
+
+
+class OldRunner(object):
+    """
+    This class is responsible for actually performing a workload automation
+    run. The main responsibility of this class is to emit appropriate signals
+    at the various stages of the run to allow things like traces an other
+    instrumentation to hook into the process.
+
+    This is an abstract base class that defines each step of the run, but not
+    the order in which those steps are executed, which is left to the concrete
+    derived classes.
+
+    """
+    class _RunnerError(Exception):
+        """Internal runner error."""
+        pass
+
+    @property
+    def config(self):
+        return self.context.config
+
+    @property
+    def current_job(self):
+        if self.job_queue:
+            return self.job_queue[0]
+        return None
+
+    @property
+    def previous_job(self):
+        if self.completed_jobs:
+            return self.completed_jobs[-1]
+        return None
+
+    @property
+    def next_job(self):
+        if self.job_queue:
+            if len(self.job_queue) > 1:
+                return self.job_queue[1]
+        return None
+
+    @property
+    def spec_changed(self):
+        if self.previous_job is None and self.current_job is not None:  # Start of run
+            return True
+        if self.previous_job is not None and self.current_job is None:  # End of run
+            return True
+        return self.current_job.spec.id != self.previous_job.spec.id
+
+    @property
+    def spec_will_change(self):
+        if self.current_job is None and self.next_job is not None:  # Start of run
+            return True
+        if self.current_job is not None and self.next_job is None:  # End of run
+            return True
+        return self.current_job.spec.id != self.next_job.spec.id
+
+    def __init__(self, device_manager, context, result_manager):
+        self.device_manager = device_manager
+        self.device = device_manager.target
         self.context = context
-        self.workload.init_resources(self.context)
-        self.workload.validate()
-        self.workload.initialize(self.context)
+        self.result_manager = result_manager
+        self.logger = logging.getLogger('Runner')
+        self.job_queue = []
+        self.completed_jobs = []
+        self._initial_reset = True
+
+    def init_queue(self, specs):
+        raise NotImplementedError()
+
+    def run(self):  # pylint: disable=too-many-branches
+        self._send(signal.RUN_START)
+        with signal.wrap('RUN_INIT'):
+            self._initialize_run()
 
-    def run(self):
-        if not self.workload:
-            self.logger.warning('Failed to initialize workload; skipping execution')
-            return
-        self.pre_run()
-        self.logger.info('Setting up workload')
-        with signal.wrap('WORKLOAD_SETUP'):
-            self.workload.setup(self.context)
         try:
-            error = None
-            self.logger.info('Executing workload')
+            while self.job_queue:
+                try:
+                    self._init_job()
+                    self._run_job()
+                except KeyboardInterrupt:
+                    self.current_job.result.status = JobStatus.ABORTED
+                    raise
+                except Exception, e:  # pylint: disable=broad-except
+                    self.current_job.result.status = JobStatus.FAILED
+                    self.current_job.result.add_event(e.message)
+                    if isinstance(e, DeviceNotRespondingError):
+                        self.logger.info('Device appears to be unresponsive.')
+                        if self.context.reboot_policy.can_reboot and self.device.can('reset_power'):
+                            self.logger.info('Attempting to hard-reset the device...')
+                            try:
+                                self.device.boot(hard=True)
+                                self.device.connect()
+                            except DeviceError:  # hard_boot not implemented for the device.
+                                raise e
+                        else:
+                            raise e
+                    else:  # not a DeviceNotRespondingError
+                        self.logger.error(e)
+                finally:
+                    self._finalize_job()
+        except KeyboardInterrupt:
+            self.logger.info('Got CTRL-C. Finalizing run... (CTRL-C again to abort).')
+            # Skip through the remaining jobs.
+            while self.job_queue:
+                self.context.next_job(self.current_job)
+                self.current_job.result.status = JobStatus.ABORTED
+                self._finalize_job()
+        except DeviceNotRespondingError:
+            self.logger.info('Device unresponsive and recovery not possible. Skipping the rest of the run.')
+            self.context.aborted = True
+            while self.job_queue:
+                self.context.next_job(self.current_job)
+                self.current_job.result.status = JobStatus.SKIPPED
+                self._finalize_job()
+
+        instrumentation.enable_all()
+        self._finalize_run()
+        self._process_results()
+
+        self.result_manager.finalize(self.context)
+        self._send(signal.RUN_END)
+
+    def _initialize_run(self):
+        self.context.runner = self
+        self.context.run_info.start_time = datetime.utcnow()
+        self._connect_to_device()
+        self.logger.info('Initializing device')
+        self.device_manager.initialize(self.context)
+
+        self.logger.info('Initializing workloads')
+        for workload_spec in self.context.config.workload_specs:
+            workload_spec.workload.initialize(self.context)
+
+        self.context.run_info.device_properties = self.device_manager.info
+        self.result_manager.initialize(self.context)
+
+        if instrumentation.check_failures():
+            raise InstrumentError('Detected failure(s) during instrumentation initialization.')
+
+    def _connect_to_device(self):
+        if self.context.reboot_policy.perform_initial_boot:
             try:
-                with signal.wrap('WORKLOAD_EXECUTION'):
-                    self.workload.run(self.context)
-            except Exception as e:
-                log.log_error(e, self.logger)
-                error = e
-
-            self.logger.info('Processing execution results')
-            with signal.wrap('WORKLOAD_RESULT_UPDATE'):
-                if not error:
-                    self.workload.update_result(self.context)
+                self.device_manager.connect()
+            except DeviceError:  # device may be offline
+                if self.device.can('reset_power'):
+                    with self._signal_wrap('INITIAL_BOOT'):
+                        self.device.boot(hard=True)
                 else:
-                    self.logger.info('Workload execution failed; not extracting workload results.')
-                    raise error
+                    raise DeviceError('Cannot connect to device for initial reboot; '
+                                      'and device does not support hard reset.')
+            else:  # successfully connected
+                self.logger.info('\tBooting device')
+                with self._signal_wrap('INITIAL_BOOT'):
+                    self._reboot_device()
+        else:
+            self.logger.info('Connecting to device')
+            self.device_manager.connect()
+
+    def _init_job(self):
+        self.current_job.result.status = JobStatus.RUNNING
+        self.context.next_job(self.current_job)
+
+    def _run_job(self):   # pylint: disable=too-many-branches
+        spec = self.current_job.spec
+        if not spec.enabled:
+            self.logger.info('Skipping workload %s (iteration %s)', spec, self.context.current_iteration)
+            self.current_job.result.status = JobStatus.SKIPPED
+            return
+
+        self.logger.info('Running workload %s (iteration %s)', spec, self.context.current_iteration)
+        if spec.flash:
+            if not self.context.reboot_policy.can_reboot:
+                raise ConfigError('Cannot flash as reboot_policy does not permit rebooting.')
+            if not self.device.can('flash'):
+                raise DeviceError('Device does not support flashing.')
+            self._flash_device(spec.flash)
+        elif not self.completed_jobs:
+            # Never reboot on the very fist job of a run, as we would have done
+            # the initial reboot if a reboot was needed.
+            pass
+        elif self.context.reboot_policy.reboot_on_each_spec and self.spec_changed:
+            self.logger.debug('Rebooting on spec change.')
+            self._reboot_device()
+        elif self.context.reboot_policy.reboot_on_each_iteration:
+            self.logger.debug('Rebooting on iteration.')
+            self._reboot_device()
+
+        instrumentation.disable_all()
+        instrumentation.enable(spec.instrumentation)
+        self.device_manager.start()
+
+        if self.spec_changed:
+            self._send(signal.WORKLOAD_SPEC_START)
+        self._send(signal.ITERATION_START)
+
+        try:
+            setup_ok = False
+            with self._handle_errors('Setting up device parameters'):
+                self.device_manager.set_runtime_parameters(spec.runtime_parameters)
+                setup_ok = True
+
+            if setup_ok:
+                with self._handle_errors('running {}'.format(spec.workload.name)):
+                    self.current_job.result.status = JobStatus.RUNNING
+                    self._run_workload_iteration(spec.workload)
+            else:
+                self.logger.info('\tSkipping the rest of the iterations for this spec.')
+                spec.enabled = False
+        except KeyboardInterrupt:
+            self._send(signal.ITERATION_END)
+            self._send(signal.WORKLOAD_SPEC_END)
+            raise
+        else:
+            self._send(signal.ITERATION_END)
+            if self.spec_will_change or not spec.enabled:
+                self._send(signal.WORKLOAD_SPEC_END)
         finally:
-            if self.target.check_responsive():
-                self.logger.info('Tearing down workload')
-                with signal.wrap('WORKLOAD_TEARDOWN'):
-                    self.workload.teardown(self.context)
-            self.post_run()
+            self.device_manager.stop()
 
-    def finalize(self):
-        self.workload.finalize(self.context)
+    def _finalize_job(self):
+        self.context.run_result.iteration_results.append(self.current_job.result)
+        job = self.job_queue.pop(0)
+        job.iteration = self.context.current_iteration
+        if job.result.status in self.config.retry_on_status:
+            if job.retry >= self.config.max_retries:
+                self.logger.error('Exceeded maxium number of retries. Abandoning job.')
+            else:
+                self.logger.info('Job status was {}. Retrying...'.format(job.result.status))
+                retry_job = RunnerJob(job.spec, job.retry + 1)
+                self.job_queue.insert(0, retry_job)
+        self.completed_jobs.append(job)
+        self.context.end_job()
 
-    def pre_run(self):
-        # TODO: enable components, etc
-        pass
+    def _finalize_run(self):
+        self.logger.info('Finalizing workloads')
+        for workload_spec in self.context.config.workload_specs:
+            workload_spec.workload.finalize(self.context)
 
-    def post_run(self):
-        pass
+        self.logger.info('Finalizing.')
+        self._send(signal.RUN_FIN)
+
+        with self._handle_errors('Disconnecting from the device'):
+            self.device.disconnect()
+
+        info = self.context.run_info
+        info.end_time = datetime.utcnow()
+        info.duration = info.end_time - info.start_time
+
+    def _process_results(self):
+        self.logger.info('Processing overall results')
+        with self._signal_wrap('OVERALL_RESULTS_PROCESSING'):
+            if instrumentation.check_failures():
+                self.context.run_result.non_iteration_errors = True
+            self.result_manager.process_run_result(self.context.run_result, self.context)
+
+    def _run_workload_iteration(self, workload):
+        self.logger.info('\tSetting up')
+        with self._signal_wrap('WORKLOAD_SETUP'):
+            try:
+                workload.setup(self.context)
+            except:
+                self.logger.info('\tSkipping the rest of the iterations for this spec.')
+                self.current_job.spec.enabled = False
+                raise
+        try:
+
+            self.logger.info('\tExecuting')
+            with self._handle_errors('Running workload'):
+                with self._signal_wrap('WORKLOAD_EXECUTION'):
+                    workload.run(self.context)
+
+            self.logger.info('\tProcessing result')
+            self._send(signal.BEFORE_WORKLOAD_RESULT_UPDATE)
+            try:
+                if self.current_job.result.status != JobStatus.FAILED:
+                    with self._handle_errors('Processing workload result',
+                                             on_error_status=JobStatus.PARTIAL):
+                        workload.update_result(self.context)
+                        self._send(signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE)
+
+                if self.current_job.result.status == JobStatus.RUNNING:
+                    self.current_job.result.status = JobStatus.OK
+            finally:
+                self._send(signal.AFTER_WORKLOAD_RESULT_UPDATE)
+
+        finally:
+            self.logger.info('\tTearing down')
+            with self._handle_errors('Tearing down workload',
+                                     on_error_status=JobStatus.NONCRITICAL):
+                with self._signal_wrap('WORKLOAD_TEARDOWN'):
+                    workload.teardown(self.context)
+            self.result_manager.add_result(self.current_job.result, self.context)
+
+    def _flash_device(self, flashing_params):
+        with self._signal_wrap('FLASHING'):
+            self.device.flash(**flashing_params)
+            self.device.connect()
+
+    def _reboot_device(self):
+        with self._signal_wrap('BOOT'):
+            for reboot_attempts in xrange(MAX_REBOOT_ATTEMPTS):
+                if reboot_attempts:
+                    self.logger.info('\tRetrying...')
+                with self._handle_errors('Rebooting device'):
+                    self.device.boot(**self.current_job.spec.boot_parameters)
+                    break
+            else:
+                raise DeviceError('Could not reboot device; max reboot attempts exceeded.')
+            self.device.connect()
+
+    def _send(self, s):
+        signal.send(s, self, self.context)
+
+    def _take_screenshot(self, filename):
+        if self.context.output_directory:
+            filepath = os.path.join(self.context.output_directory, filename)
+        else:
+            filepath = os.path.join(settings.output_directory, filename)
+        self.device.capture_screen(filepath)
+
+    @contextmanager
+    def _handle_errors(self, action, on_error_status=JobStatus.FAILED):
+        try:
+            if action is not None:
+                self.logger.debug(action)
+            yield
+        except (KeyboardInterrupt, DeviceNotRespondingError):
+            raise
+        except (WAError, TimeoutError), we:
+            self.device.check_responsive()
+            if self.current_job:
+                self.current_job.result.status = on_error_status
+                self.current_job.result.add_event(str(we))
+            try:
+                self._take_screenshot('error.png')
+            except Exception, e:  # pylint: disable=W0703
+                # We're already in error state, so the fact that taking a
+                # screenshot failed is not surprising...
+                pass
+            if action:
+                action = action[0].lower() + action[1:]
+            self.logger.error('Error while {}:\n\t{}'.format(action, we))
+        except Exception, e:  # pylint: disable=W0703
+            error_text = '{}("{}")'.format(e.__class__.__name__, e)
+            if self.current_job:
+                self.current_job.result.status = on_error_status
+                self.current_job.result.add_event(error_text)
+            self.logger.error('Error while {}'.format(action))
+            self.logger.error(error_text)
+            if isinstance(e, subprocess.CalledProcessError):
+                self.logger.error('Got:')
+                self.logger.error(e.output)
+            tb = get_traceback()
+            self.logger.error(tb)
+
+    @contextmanager
+    def _signal_wrap(self, signal_name):
+        """Wraps the suite in before/after signals, ensuring
+        that after signal is always sent."""
+        before_signal = getattr(signal, 'BEFORE_' + signal_name)
+        success_signal = getattr(signal, 'SUCCESSFUL_' + signal_name)
+        after_signal = getattr(signal, 'AFTER_' + signal_name)
+        try:
+            self._send(before_signal)
+            yield
+            self._send(success_signal)
+        finally:
+            self._send(after_signal)
+
+
+class BySpecRunner(Runner):
+    """
+    This is that "classic" implementation that executes all iterations of a workload
+    spec before proceeding onto the next spec.
+
+    """
+
+    def init_queue(self, specs):
+        jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]  # pylint: disable=unused-variable
+        self.job_queue = [j for spec_jobs in jobs for j in spec_jobs]
+
+
+class BySectionRunner(Runner):
+    """
+    Runs the first iteration for all benchmarks first, before proceeding to the next iteration,
+    i.e. A1, B1, C1, A2, B2, C2...  instead of  A1, A1, B1, B2, C1, C2...
+
+    If multiple sections where specified in the agenda, this will run all specs for the first section
+    followed by all specs for the seciod section, etc.
+
+    e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run
+
+    X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
+
+    """
+
+    def init_queue(self, specs):
+        jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]
+        self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j]
+
+
+class ByIterationRunner(Runner):
+    """
+    Runs the first iteration for all benchmarks first, before proceeding to the next iteration,
+    i.e. A1, B1, C1, A2, B2, C2...  instead of  A1, A1, B1, B2, C1, C2...
+
+    If multiple sections where specified in the agenda, this will run all sections for the first global
+    spec first, followed by all sections for the second spec, etc.
+
+    e.g. given sections X and Y, and global specs A and B, with 2 iterations, this will run
+
+    X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
+
+    """
+
+    def init_queue(self, specs):
+        sections = OrderedDict()
+        for s in specs:
+            if s.section_id not in sections:
+                sections[s.section_id] = []
+            sections[s.section_id].append(s)
+        specs = [s for section_specs in izip_longest(*sections.values()) for s in section_specs if s]
+        jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]
+        self.job_queue = [j for spec_jobs in izip_longest(*jobs) for j in spec_jobs if j]
+
+
+class RandomRunner(Runner):
+    """
+    This will run specs in a random order.
+
+    """
+
+    def init_queue(self, specs):
+        jobs = [[RunnerJob(s) for _ in xrange(s.number_of_iterations)] for s in specs]  # pylint: disable=unused-variable
+        all_jobs = [j for spec_jobs in jobs for j in spec_jobs]
+        random.shuffle(all_jobs)
+        self.job_queue = all_jobs
diff --git a/wa/framework/host.py b/wa/framework/host.py
index 7c5e94aa..33810b93 100644
--- a/wa/framework/host.py
+++ b/wa/framework/host.py
@@ -1,23 +1,33 @@
 import os
 
-from wa.framework.configuration import settings
-from wa.framework.exception import ConfigError
-from wa.utils.misc import ensure_directory_exists
+from wlauto.core.configuration import settings
 
-
-class HostRunConfig(object):
+def init_user_directory(overwrite_existing=False):  # pylint: disable=R0914
     """
-    Host-side configuration for a run.
+    Initialise a fresh user directory. 
     """
+    if os.path.exists(settings.user_directory):
+        if not overwrite_existing:
+            raise RuntimeError('Environment {} already exists.'.format(settings.user_directory))
+        shutil.rmtree(settings.user_directory)
 
-    def __init__(self, output_directory, 
-                 run_info_directory=None,
-                 run_config_directory=None):
-        self.output_directory = output_directory
-        self.run_info_directory = run_info_directory or os.path.join(self.output_directory, '_info')
-        self.run_config_directory = run_config_directory or os.path.join(self.output_directory, '_config')
+    os.makedirs(settings.user_directory)
+    os.makedirs(settings.dependencies_directory)
+    os.makedirs(settings.plugins_directory)
 
-    def initialize(self):
-        ensure_directory_exists(self.output_directory)
-        ensure_directory_exists(self.run_info_directory)
-        ensure_directory_exists(self.run_config_directory)
+    # TODO: generate default config.yaml here
+
+    if os.getenv('USER') == 'root':
+        # If running with sudo on POSIX, change the ownership to the real user.
+        real_user = os.getenv('SUDO_USER')
+        if real_user:
+            import pwd  # done here as module won't import on win32
+            user_entry = pwd.getpwnam(real_user)
+            uid, gid = user_entry.pw_uid, user_entry.pw_gid
+            os.chown(settings.user_directory, uid, gid)
+            # why, oh why isn't there a recusive=True option for os.chown?
+            for root, dirs, files in os.walk(settings.user_directory):
+                for d in dirs:
+                    os.chown(os.path.join(root, d), uid, gid)
+                for f in files: 
+                    os.chown(os.path.join(root, f), uid, gid)
diff --git a/wa/framework/instrumentation.py b/wa/framework/instrumentation.py
new file mode 100644
index 00000000..f4d3e480
--- /dev/null
+++ b/wa/framework/instrumentation.py
@@ -0,0 +1,399 @@
+#    Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+Adding New Instrument
+=====================
+
+Any new instrument should be a subclass of Instrument and it must have a name.
+When a new instrument is added to Workload Automation, the methods of the new
+instrument will be found automatically and hooked up to the supported signals.
+Once a signal is broadcasted, the corresponding registered method is invoked.
+
+Each method in Instrument must take two arguments, which are self and context.
+Supported signals can be found in [... link to signals ...] To make
+implementations easier and common, the basic steps to add new instrument is
+similar to the steps to add new workload.
+
+Hence, the following methods are sufficient to implement to add new instrument:
+
+    - setup: This method is invoked after the workload is setup. All the
+       necessary setups should go inside this method. Setup, includes operations
+       like, pushing the files to the target device, install them, clear logs,
+       etc.
+    - start: It is invoked just before the workload start execution. Here is
+       where instrument measures start being registered/taken.
+    - stop: It is invoked just after the workload execution stops. The measures
+       should stop being taken/registered.
+    - update_result: It is invoked after the workload updated its result.
+       update_result is where the taken measures are added to the result so it
+       can be processed by Workload Automation.
+    - teardown is invoked after the workload is teared down. It is a good place
+       to clean any logs generated by the instrument.
+
+For example, to add an instrument which will trace device errors, we subclass
+Instrument and overwrite the variable name.::
+
+        #BINARY_FILE = os.path.join(os.path.dirname(__file__), 'trace')
+        class TraceErrorsInstrument(Instrument):
+
+            name = 'trace-errors'
+
+            def __init__(self, device):
+                super(TraceErrorsInstrument, self).__init__(device)
+                self.trace_on_device = os.path.join(self.device.working_directory, 'trace')
+
+We then declare and implement the aforementioned methods. For the setup method,
+we want to push the file to the target device and then change the file mode to
+755 ::
+
+    def setup(self, context):
+        self.device.push(BINARY_FILE, self.device.working_directory)
+        self.device.execute('chmod 755 {}'.format(self.trace_on_device))
+
+Then we implemented the start method, which will simply run the file to start
+tracing. ::
+
+    def start(self, context):
+        self.device.execute('{} start'.format(self.trace_on_device))
+
+Lastly, we need to stop tracing once the workload stops and this happens in the
+stop method::
+
+    def stop(self, context):
+        self.device.execute('{} stop'.format(self.trace_on_device))
+
+The generated result can be updated inside update_result, or if it is trace, we
+just pull the file to the host device. context has a result variable which
+has add_metric method. It can be used to add the instrumentation results metrics
+to the final result for the workload. The method can be passed 4 params, which
+are metric key, value, unit and lower_is_better, which is a boolean. ::
+
+    def update_result(self, context):
+        # pull the trace file to the device
+        result = os.path.join(self.device.working_directory, 'trace.txt')
+        self.device.pull(result, context.working_directory)
+
+        # parse the file if needs to be parsed, or add result to
+        # context.result
+
+At the end, we might want to delete any files generated by the instrumentation
+and the code to clear these file goes in teardown method. ::
+
+    def teardown(self, context):
+        self.device.remove(os.path.join(self.device.working_directory, 'trace.txt'))
+
+"""
+
+import logging
+import inspect
+from collections import OrderedDict
+
+import wa.framework.signal as signal
+from wa.framework.plugin import Plugin
+from wa.framework.exception import WAError, TargetNotRespondingError, TimeoutError
+from wa.utils.misc import get_traceback, isiterable
+from wa.utils.types import identifier
+
+
+logger = logging.getLogger('instrumentation')
+
+
+# Maps method names onto signals the should be registered to.
+# Note: the begin/end signals are paired -- if a begin_ signal is sent,
+#       then the corresponding end_ signal is guaranteed to also be sent.
+# Note: using OrderedDict to preserve logical ordering for the table generated
+#       in the documentation
+SIGNAL_MAP = OrderedDict([
+    # Below are "aliases" for some of the more common signals to allow
+    # instrumentation to have similar structure to workloads
+    ('initialize', signal.SUCCESSFUL_RUN_INIT),
+    # ('setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
+    # ('start', signal.BEFORE_WORKLOAD_EXECUTION),
+    # ('stop', signal.AFTER_WORKLOAD_EXECUTION),
+    # ('process_workload_result', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
+    # ('update_result', signal.AFTER_WORKLOAD_RESULT_UPDATE),
+    # ('teardown', signal.AFTER_WORKLOAD_TEARDOWN),
+    # ('finalize', signal.RUN_FIN),
+
+    # ('on_run_start', signal.RUN_START),
+    # ('on_run_end', signal.RUN_END),
+    # ('on_workload_spec_start', signal.WORKLOAD_SPEC_START),
+    # ('on_workload_spec_end', signal.WORKLOAD_SPEC_END),
+    # ('on_iteration_start', signal.ITERATION_START),
+    # ('on_iteration_end', signal.ITERATION_END),
+
+    # ('before_initial_boot', signal.BEFORE_INITIAL_BOOT),
+    # ('on_successful_initial_boot', signal.SUCCESSFUL_INITIAL_BOOT),
+    # ('after_initial_boot', signal.AFTER_INITIAL_BOOT),
+    # ('before_first_iteration_boot', signal.BEFORE_FIRST_ITERATION_BOOT),
+    # ('on_successful_first_iteration_boot', signal.SUCCESSFUL_FIRST_ITERATION_BOOT),
+    # ('after_first_iteration_boot', signal.AFTER_FIRST_ITERATION_BOOT),
+    # ('before_boot', signal.BEFORE_BOOT),
+    # ('on_successful_boot', signal.SUCCESSFUL_BOOT),
+    # ('after_boot', signal.AFTER_BOOT),
+
+    # ('on_spec_init', signal.SPEC_INIT),
+    # ('on_run_init', signal.RUN_INIT),
+    # ('on_iteration_init', signal.ITERATION_INIT),
+
+    # ('before_workload_setup', signal.BEFORE_WORKLOAD_SETUP),
+    # ('on_successful_workload_setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
+    # ('after_workload_setup', signal.AFTER_WORKLOAD_SETUP),
+    # ('before_workload_execution', signal.BEFORE_WORKLOAD_EXECUTION),
+    # ('on_successful_workload_execution', signal.SUCCESSFUL_WORKLOAD_EXECUTION),
+    # ('after_workload_execution', signal.AFTER_WORKLOAD_EXECUTION),
+    # ('before_workload_result_update', signal.BEFORE_WORKLOAD_RESULT_UPDATE),
+    # ('on_successful_workload_result_update', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
+    # ('after_workload_result_update', signal.AFTER_WORKLOAD_RESULT_UPDATE),
+    # ('before_workload_teardown', signal.BEFORE_WORKLOAD_TEARDOWN),
+    # ('on_successful_workload_teardown', signal.SUCCESSFUL_WORKLOAD_TEARDOWN),
+    # ('after_workload_teardown', signal.AFTER_WORKLOAD_TEARDOWN),
+
+    # ('before_overall_results_processing', signal.BEFORE_OVERALL_RESULTS_PROCESSING),
+    # ('on_successful_overall_results_processing', signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING),
+    # ('after_overall_results_processing', signal.AFTER_OVERALL_RESULTS_PROCESSING),
+
+    # ('on_error', signal.ERROR_LOGGED),
+    # ('on_warning', signal.WARNING_LOGGED),
+])
+
+PRIORITY_MAP = OrderedDict([
+    ('very_fast_', 20),
+    ('fast_', 10),
+    ('normal_', 0),
+    ('slow_', -10),
+    ('very_slow_', -20),
+])
+
+installed = []
+
+
+def is_installed(instrument):
+    if isinstance(instrument, Instrument):
+        if instrument in installed:
+            return True
+        if instrument.name in [i.name for i in installed]:
+            return True
+    elif isinstance(instrument, type):
+        if instrument in [i.__class__ for i in installed]:
+            return True
+    else:  # assume string
+        if identifier(instrument) in [identifier(i.name) for i in installed]:
+            return True
+    return False
+
+
+def is_enabled(instrument):
+    if isinstance(instrument, Instrument) or isinstance(instrument, type):
+        name = instrument.name
+    else:  # assume string
+        name = instrument
+    try:
+        installed_instrument = get_instrument(name)
+        return installed_instrument.is_enabled
+    except ValueError:
+        return False
+
+
+failures_detected = False
+
+
+def reset_failures():
+    global failures_detected  # pylint: disable=W0603
+    failures_detected = False
+
+
+def check_failures():
+    result = failures_detected
+    reset_failures()
+    return result
+
+
+class ManagedCallback(object):
+    """
+    This wraps instruments' callbacks to ensure that errors do interfer
+    with run execution.
+
+    """
+
+    def __init__(self, instrument, callback):
+        self.instrument = instrument
+        self.callback = callback
+
+    def __call__(self, context):
+        if self.instrument.is_enabled:
+            try:
+                self.callback(context)
+            except (KeyboardInterrupt, TargetNotRespondingError, TimeoutError):  # pylint: disable=W0703
+                raise
+            except Exception as e:  # pylint: disable=W0703
+                logger.error('Error in insturment {}'.format(self.instrument.name))
+                global failures_detected  # pylint: disable=W0603
+                failures_detected = True
+                if isinstance(e, WAError):
+                    logger.error(e)
+                else:
+                    tb = get_traceback()
+                    logger.error(tb)
+                    logger.error('{}({})'.format(e.__class__.__name__, e))
+                if not context.current_iteration:
+                    # Error occureed outside of an iteration (most likely
+                    # during intial setup or teardown). Since this would affect
+                    # the rest of the run, mark the instument as broken so that
+                    # it doesn't get re-enabled for subsequent iterations.
+                    self.instrument.is_broken = True
+                disable(self.instrument)
+
+
+# Need this to keep track of callbacks, because the dispatcher only keeps
+# weak references, so if the callbacks aren't referenced elsewhere, they will
+# be deallocated before they've had a chance to be invoked.
+_callbacks = []
+
+
+def install(instrument):
+    """
+    This will look for methods (or any callable members) with specific names
+    in the instrument and hook them up to the corresponding signals.
+
+    :param instrument: Instrument instance to install.
+
+    """
+    logger.debug('Installing instrument %s.', instrument)
+    if is_installed(instrument):
+        raise ValueError('Instrument {} is already installed.'.format(instrument.name))
+    for attr_name in dir(instrument):
+        priority = 0
+        stripped_attr_name = attr_name
+        for key, value in PRIORITY_MAP.iteritems():
+            if attr_name.startswith(key):
+                stripped_attr_name = attr_name[len(key):]
+                priority = value
+                break
+        if stripped_attr_name in SIGNAL_MAP:
+            attr = getattr(instrument, attr_name)
+            if not callable(attr):
+                raise ValueError('Attribute {} not callable in {}.'.format(attr_name, instrument))
+            argspec = inspect.getargspec(attr)
+            arg_num = len(argspec.args)
+            # Instrument callbacks will be passed exactly two arguments: self
+            # (the instrument instance to which the callback is bound) and
+            # context. However, we also allow callbacks to capture the context
+            # in variable arguments (declared as "*args" in the definition).
+            if arg_num > 2 or (arg_num < 2 and argspec.varargs is None):
+                message = '{} must take exactly 2 positional arguments; {} given.'
+                raise ValueError(message.format(attr_name, arg_num))
+
+            logger.debug('\tConnecting %s to %s', attr.__name__, SIGNAL_MAP[stripped_attr_name])
+            mc = ManagedCallback(instrument, attr)
+            _callbacks.append(mc)
+            signal.connect(mc, SIGNAL_MAP[stripped_attr_name], priority=priority)
+    installed.append(instrument)
+
+
+def uninstall(instrument):
+    instrument = get_instrument(instrument)
+    installed.remove(instrument)
+
+
+def validate():
+    for instrument in installed:
+        instrument.validate()
+
+
+def get_instrument(inst):
+    if isinstance(inst, Instrument):
+        return inst
+    for installed_inst in installed:
+        if identifier(installed_inst.name) == identifier(inst):
+            return installed_inst
+    raise ValueError('Instrument {} is not installed'.format(inst))
+
+
+def disable_all():
+    for instrument in installed:
+        _disable_instrument(instrument)
+
+
+def enable_all():
+    for instrument in installed:
+        _enable_instrument(instrument)
+
+
+def enable(to_enable):
+    if isiterable(to_enable):
+        for inst in to_enable:
+            _enable_instrument(inst)
+    else:
+        _enable_instrument(to_enable)
+
+
+def disable(to_disable):
+    if isiterable(to_disable):
+        for inst in to_disable:
+            _disable_instrument(inst)
+    else:
+        _disable_instrument(to_disable)
+
+
+def _enable_instrument(inst):
+    inst = get_instrument(inst)
+    if not inst.is_broken:
+        logger.debug('Enabling instrument {}'.format(inst.name))
+        inst.is_enabled = True
+    else:
+        logger.debug('Not enabling broken instrument {}'.format(inst.name))
+
+
+def _disable_instrument(inst):
+    inst = get_instrument(inst)
+    if inst.is_enabled:
+        logger.debug('Disabling instrument {}'.format(inst.name))
+        inst.is_enabled = False
+
+
+def get_enabled():
+    return [i for i in installed if i.is_enabled]
+
+
+def get_disabled():
+    return [i for i in installed if not i.is_enabled]
+
+
+class Instrument(Plugin):
+    """
+    Base class for instrumentation implementations.
+    """
+    kind = "instrument"
+
+    def __init__(self, target, **kwargs):
+        super(Instrument, self).__init__(**kwargs)
+        self.target = target
+        self.is_enabled = True
+        self.is_broken = False
+
+    def initialize(self, context):
+        pass
+
+    def finalize(self, context):
+        pass
+
+    def __str__(self):
+        return self.name
+
+    def __repr__(self):
+        return 'Instrument({})'.format(self.name)
diff --git a/wa/framework/old_output.py b/wa/framework/old_output.py
new file mode 100644
index 00000000..49ce8721
--- /dev/null
+++ b/wa/framework/old_output.py
@@ -0,0 +1,362 @@
+import os
+import shutil
+import logging
+import uuid
+from copy import copy
+from datetime import datetime, timedelta
+
+from wa.framework import signal, log
+from wa.framework.configuration.core import merge_config_values
+from wa.utils import serializer
+from wa.utils.misc import enum_metaclass, ensure_directory_exists as _d
+from wa.utils.types import numeric
+
+
+class Status(object):
+
+    __metaclass__ = enum_metaclass('values', return_name=True)
+
+    values = [
+        'NEW',
+        'PENDING',
+        'RUNNING',
+        'COMPLETE',
+        'OK',
+        'OKISH',
+        'NONCRITICAL',
+        'PARTIAL',
+        'FAILED',
+        'ABORTED',
+        'SKIPPED',
+        'UNKNOWN',
+    ]
+
+
+class WAOutput(object):
+
+    basename = '.wa-output'
+
+    @classmethod
+    def load(cls, source):
+        if os.path.isfile(source):
+            pod = serializer.load(source)
+        elif os.path.isdir(source):
+            pod = serializer.load(os.path.join(source, cls.basename))
+        else:
+            message = 'Cannot load {} from {}'
+            raise ValueError(message.format(cls.__name__, source))
+        return cls.from_pod(pod)
+
+    @classmethod
+    def from_pod(cls, pod):
+        instance = cls(pod['output_directory'])
+        instance.status = pod['status']
+        instance.metrics = [Metric.from_pod(m) for m in pod['metrics']]
+        instance.artifacts = [Artifact.from_pod(a) for a in pod['artifacts']]
+        instance.events = [RunEvent.from_pod(e) for e in pod['events']]
+        instance.classifiers = pod['classifiers']
+        return instance
+
+    def __init__(self, output_directory):
+        self.logger = logging.getLogger('output')
+        self.output_directory = output_directory
+        self.status = Status.UNKNOWN
+        self.classifiers = {}
+        self.metrics = []
+        self.artifacts = []
+        self.events = []
+        
+    def initialize(self, overwrite=False):
+        if os.path.exists(self.output_directory):
+            if not overwrite:
+                raise RuntimeError('"{}" already exists.'.format(self.output_directory))
+            self.logger.info('Removing existing output directory.')
+            shutil.rmtree(self.output_directory)
+        self.logger.debug('Creating output directory {}'.format(self.output_directory))
+        os.makedirs(self.output_directory)
+
+    def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None):
+        classifiers = merge_config_values(self.classifiers, classifiers or {})
+        self.metrics.append(Metric(name, value, units, lower_is_better, classifiers))
+
+    def add_artifact(self, name, path, kind, *args, **kwargs):
+        path = _check_artifact_path(path, self.output_directory)
+        self.artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs))
+
+    def get_path(self, subpath):
+        return os.path.join(self.output_directory, subpath)
+
+    def to_pod(self):
+        return {
+            'output_directory': self.output_directory,
+            'status': self.status,
+            'metrics': [m.to_pod() for m in self.metrics],
+            'artifacts': [a.to_pod() for a in self.artifacts],
+            'events': [e.to_pod() for e in self.events],
+            'classifiers': copy(self.classifiers),
+        }
+
+    def persist(self):
+        statefile = os.path.join(self.output_directory, self.basename)
+        with open(statefile, 'wb') as wfh:
+            serializer.dump(self, wfh)
+        
+
+class RunInfo(object):
+
+    default_name_format = 'wa-run-%y%m%d-%H%M%S'
+
+    def __init__(self, project=None, project_stage=None, name=None):
+        self.uuid = uuid.uuid4()
+        self.project = project
+        self.project_stage = project_stage
+        self.name = name or datetime.now().strftime(self.default_name_format)
+        self.start_time = None
+        self.end_time = None
+        self.duration = None
+
+    @staticmethod
+    def from_pod(pod):
+        instance = RunInfo()
+        instance.uuid = uuid.UUID(pod['uuid'])
+        instance.project = pod['project']
+        instance.project_stage = pod['project_stage']
+        instance.name = pod['name']
+        instance.start_time = pod['start_time']
+        instance.end_time = pod['end_time']
+        instance.duration = timedelta(seconds=pod['duration'])
+        return instance
+
+    def to_pod(self):
+        d = copy(self.__dict__)
+        d['uuid'] = str(self.uuid)
+        d['duration'] = self.duration.days * 3600 * 24 + self.duration.seconds
+        return d
+
+
+class RunOutput(WAOutput):
+
+    @property
+    def info_directory(self):
+        return _d(os.path.join(self.output_directory, '_info'))
+
+    @property
+    def config_directory(self):
+        return _d(os.path.join(self.output_directory, '_config'))
+
+    @property
+    def failed_directory(self):
+        return _d(os.path.join(self.output_directory, '_failed'))
+
+    @property
+    def log_file(self):
+        return os.path.join(self.output_directory, 'run.log')
+
+    @classmethod
+    def from_pod(cls, pod):
+        instance = WAOutput.from_pod(pod)
+        instance.info = RunInfo.from_pod(pod['info'])
+        instance.jobs = [JobOutput.from_pod(i) for i in pod['jobs']]
+        instance.failed = [JobOutput.from_pod(i) for i in pod['failed']]
+        return instance
+
+    def __init__(self, output_directory):
+        super(RunOutput, self).__init__(output_directory)
+        self.logger = logging.getLogger('output')
+        self.info = RunInfo()
+        self.jobs = []
+        self.failed = []
+
+    def initialize(self, overwrite=False):
+        super(RunOutput, self).initialize(overwrite)
+        log.add_file(self.log_file)
+        self.add_artifact('runlog', self.log_file,  'log')
+
+    def create_job_output(self, id):
+        outdir = os.path.join(self.output_directory, id)
+        job_output = JobOutput(outdir)
+        self.jobs.append(job_output)
+        return job_output
+
+    def move_failed(self, job_output):
+        basename = os.path.basename(job_output.output_directory)
+        i = 1
+        dest = os.path.join(self.failed_directory, basename + '-{}'.format(i))
+        while os.path.exists(dest):
+            i += 1
+            dest = '{}-{}'.format(dest[:-2], i)
+        shutil.move(job_output.output_directory, dest)
+
+    def to_pod(self):
+        pod = super(RunOutput, self).to_pod()
+        pod['info'] = self.info.to_pod()
+        pod['jobs'] = [i.to_pod() for i in self.jobs]
+        pod['failed'] = [i.to_pod() for i in self.failed]
+        return pod
+
+
+class JobOutput(WAOutput):
+
+    def add_artifact(self, name, path, kind, *args, **kwargs):
+        path = _check_artifact_path(path, self.output_directory)
+        self.artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs))
+
+
+class Artifact(object):
+    """
+    This is an artifact generated during execution/post-processing of a workload.
+    Unlike metrics, this represents an actual artifact, such as a file, generated.
+    This may be "result", such as trace, or it could be "meta data" such as logs.
+    These are distinguished using the ``kind`` attribute, which also helps WA decide
+    how it should be handled. Currently supported kinds are:
+
+        :log: A log file. Not part of "results" as such but contains information about the
+              run/workload execution that be useful for diagnostics/meta analysis.
+        :meta: A file containing metadata. This is not part of "results", but contains
+               information that may be necessary to reproduce the results (contrast with
+               ``log`` artifacts which are *not* necessary).
+        :data: This file contains new data, not available otherwise and should be considered
+               part of the "results" generated by WA. Most traces would fall into this category.
+        :export: Exported version of results or some other artifact. This signifies that
+                 this artifact does not contain any new data that is not available
+                 elsewhere and that it may be safely discarded without losing information.
+        :raw: Signifies that this is a raw dump/log that is normally processed to extract
+              useful information and is then discarded. In a sense, it is the opposite of
+              ``export``, but in general may also be discarded.
+
+              .. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on
+                        how important it is to preserve this file, e.g. when archiving, vs
+                        how much space it takes up. Unlike ``export`` artifacts which are
+                        (almost) always ignored by other exporters as that would never result
+                        in data loss, ``raw`` files *may* be processed by exporters if they
+                        decided that the risk of losing potentially (though unlikely) useful
+                        data is greater than the time/space cost of handling the artifact (e.g.
+                        a database uploader may choose to ignore ``raw`` artifacts, where as a
+                        network filer archiver may choose to archive them).
+
+        .. note: The kind parameter is intended to represent the logical function of a particular
+                 artifact, not it's intended means of processing -- this is left entirely up to the
+                 result processors.
+
+    """
+
+    RUN = 'run'
+    ITERATION = 'iteration'
+
+    valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
+
+    @staticmethod
+    def from_pod(pod):
+        return Artifact(**pod)
+
+    def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
+        """"
+        :param name: Name that uniquely identifies this artifact.
+        :param path: The *relative* path of the artifact. Depending on the ``level``
+                     must be either relative to the run or iteration output directory.
+                     Note: this path *must* be delimited using ``/`` irrespective of the
+                     operating system.
+        :param kind: The type of the artifact this is (e.g. log file, result, etc.) this
+                     will be used a hit to result processors. This must be one of ``'log'``,
+                     ``'meta'``, ``'data'``, ``'export'``, ``'raw'``.
+        :param level: The level at which the artifact will be generated. Must be either
+                      ``'iteration'`` or ``'run'``.
+        :param mandatory: Boolean value indicating whether this artifact must be present
+                          at the end of result processing for its level.
+        :param description: A free-form description of what this artifact is.
+
+        """
+        if kind not in self.valid_kinds:
+            raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds))
+        self.name = name
+        self.path = path.replace('/', os.sep) if path is not None else path
+        self.kind = kind
+        self.level = level
+        self.mandatory = mandatory
+        self.description = description
+
+    def exists(self, context):
+        """Returns ``True`` if artifact exists within the specified context, and
+        ``False`` otherwise."""
+        fullpath = os.path.join(context.output_directory, self.path)
+        return os.path.exists(fullpath)
+
+    def to_pod(self):
+        return copy(self.__dict__)
+
+
+class RunEvent(object):
+    """
+    An event that occured during a run.
+
+    """
+
+    @staticmethod
+    def from_pod(pod):
+        instance = RunEvent(pod['message'])
+        instance.timestamp = pod['timestamp']
+        return instance
+
+    def __init__(self, message):
+        self.timestamp = datetime.utcnow()
+        self.message = message
+
+    def to_pod(self):
+        return copy(self.__dict__)
+
+    def __str__(self):
+        return '{} {}'.format(self.timestamp, self.message)
+
+    __repr__ = __str__
+
+
+class Metric(object):
+    """
+    This is a single metric collected from executing a workload.
+
+    :param name: the name of the metric. Uniquely identifies the metric
+                 within the results.
+    :param value: The numerical value of the metric for this execution of
+                  a workload. This can be either an int or a float.
+    :param units: Units for the collected value. Can be None if the value
+                  has no units (e.g. it's a count or a standardised score).
+    :param lower_is_better: Boolean flag indicating where lower values are
+                            better than higher ones. Defaults to False.
+    :param classifiers: A set of key-value pairs to further classify this metric
+                        beyond current iteration (e.g. this can be used to identify
+                        sub-tests).
+
+    """
+
+    @staticmethod
+    def from_pod(pod):
+        return Metric(**pod)
+
+    def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None):
+        self.name = name
+        self.value = numeric(value)
+        self.units = units
+        self.lower_is_better = lower_is_better
+        self.classifiers = classifiers or {}
+
+    def to_pod(self):
+        return copy(self.__dict__)
+
+    def __str__(self):
+        result = '{}: {}'.format(self.name, self.value)
+        if self.units:
+            result += ' ' + self.units
+        result += ' ({})'.format('-' if self.lower_is_better else '+')
+        return '<{}>'.format(result)
+
+    __repr__ = __str__
+
+
+def _check_artifact_path(path, rootpath):
+    if path.startswith(rootpath):
+        return os.path.abspath(path)
+    rootpath = os.path.abspath(rootpath)
+    full_path = os.path.join(rootpath, path)
+    if not os.path.isfile(full_path):
+        raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path))
+    return full_path
diff --git a/wa/framework/output.py b/wa/framework/output.py
index 49ce8721..77d5853e 100644
--- a/wa/framework/output.py
+++ b/wa/framework/output.py
@@ -1,362 +1,188 @@
+import logging
 import os
 import shutil
-import logging
+import string
+import sys
 import uuid
 from copy import copy
-from datetime import datetime, timedelta
 
-from wa.framework import signal, log
-from wa.framework.configuration.core import merge_config_values
-from wa.utils import serializer
-from wa.utils.misc import enum_metaclass, ensure_directory_exists as _d
-from wa.utils.types import numeric
+from wlauto.core.configuration.configuration import JobSpec
+from wlauto.core.configuration.manager import ConfigManager
+from wlauto.core.device_manager import TargetInfo
+from wlauto.utils.misc import touch
+from wlauto.utils.serializer import write_pod, read_pod
 
 
-class Status(object):
+logger = logging.getLogger('output')
 
-    __metaclass__ = enum_metaclass('values', return_name=True)
-
-    values = [
-        'NEW',
-        'PENDING',
-        'RUNNING',
-        'COMPLETE',
-        'OK',
-        'OKISH',
-        'NONCRITICAL',
-        'PARTIAL',
-        'FAILED',
-        'ABORTED',
-        'SKIPPED',
-        'UNKNOWN',
-    ]
-
-
-class WAOutput(object):
-
-    basename = '.wa-output'
-
-    @classmethod
-    def load(cls, source):
-        if os.path.isfile(source):
-            pod = serializer.load(source)
-        elif os.path.isdir(source):
-            pod = serializer.load(os.path.join(source, cls.basename))
-        else:
-            message = 'Cannot load {} from {}'
-            raise ValueError(message.format(cls.__name__, source))
-        return cls.from_pod(pod)
-
-    @classmethod
-    def from_pod(cls, pod):
-        instance = cls(pod['output_directory'])
-        instance.status = pod['status']
-        instance.metrics = [Metric.from_pod(m) for m in pod['metrics']]
-        instance.artifacts = [Artifact.from_pod(a) for a in pod['artifacts']]
-        instance.events = [RunEvent.from_pod(e) for e in pod['events']]
-        instance.classifiers = pod['classifiers']
-        return instance
-
-    def __init__(self, output_directory):
-        self.logger = logging.getLogger('output')
-        self.output_directory = output_directory
-        self.status = Status.UNKNOWN
-        self.classifiers = {}
-        self.metrics = []
-        self.artifacts = []
-        self.events = []
-        
-    def initialize(self, overwrite=False):
-        if os.path.exists(self.output_directory):
-            if not overwrite:
-                raise RuntimeError('"{}" already exists.'.format(self.output_directory))
-            self.logger.info('Removing existing output directory.')
-            shutil.rmtree(self.output_directory)
-        self.logger.debug('Creating output directory {}'.format(self.output_directory))
-        os.makedirs(self.output_directory)
-
-    def add_metric(self, name, value, units=None, lower_is_better=False, classifiers=None):
-        classifiers = merge_config_values(self.classifiers, classifiers or {})
-        self.metrics.append(Metric(name, value, units, lower_is_better, classifiers))
-
-    def add_artifact(self, name, path, kind, *args, **kwargs):
-        path = _check_artifact_path(path, self.output_directory)
-        self.artifacts.append(Artifact(name, path, kind, Artifact.RUN, *args, **kwargs))
-
-    def get_path(self, subpath):
-        return os.path.join(self.output_directory, subpath)
-
-    def to_pod(self):
-        return {
-            'output_directory': self.output_directory,
-            'status': self.status,
-            'metrics': [m.to_pod() for m in self.metrics],
-            'artifacts': [a.to_pod() for a in self.artifacts],
-            'events': [e.to_pod() for e in self.events],
-            'classifiers': copy(self.classifiers),
-        }
-
-    def persist(self):
-        statefile = os.path.join(self.output_directory, self.basename)
-        with open(statefile, 'wb') as wfh:
-            serializer.dump(self, wfh)
-        
 
 class RunInfo(object):
+    """
+    Information about the current run, such as its unique ID, run
+    time, etc.
 
-    default_name_format = 'wa-run-%y%m%d-%H%M%S'
+    """
+    @staticmethod
+    def from_pod(pod):
+        uid = pod.pop('uuid')
+        if uid is not None:
+            uid = uuid.UUID(uid)
+        instance = RunInfo(**pod)
+        instance.uuid = uid
+        return instance
 
-    def __init__(self, project=None, project_stage=None, name=None):
+    def __init__(self, run_name=None, project=None, project_stage=None,
+                 start_time=None, end_time=None, duration=None):
         self.uuid = uuid.uuid4()
-        self.project = project
-        self.project_stage = project_stage
-        self.name = name or datetime.now().strftime(self.default_name_format)
+        self.run_name = None
+        self.project = None
+        self.project_stage = None
         self.start_time = None
         self.end_time = None
         self.duration = None
 
-    @staticmethod
-    def from_pod(pod):
-        instance = RunInfo()
-        instance.uuid = uuid.UUID(pod['uuid'])
-        instance.project = pod['project']
-        instance.project_stage = pod['project_stage']
-        instance.name = pod['name']
-        instance.start_time = pod['start_time']
-        instance.end_time = pod['end_time']
-        instance.duration = timedelta(seconds=pod['duration'])
-        return instance
-
     def to_pod(self):
         d = copy(self.__dict__)
         d['uuid'] = str(self.uuid)
-        d['duration'] = self.duration.days * 3600 * 24 + self.duration.seconds
         return d
 
 
-class RunOutput(WAOutput):
-
-    @property
-    def info_directory(self):
-        return _d(os.path.join(self.output_directory, '_info'))
-
-    @property
-    def config_directory(self):
-        return _d(os.path.join(self.output_directory, '_config'))
-
-    @property
-    def failed_directory(self):
-        return _d(os.path.join(self.output_directory, '_failed'))
-
-    @property
-    def log_file(self):
-        return os.path.join(self.output_directory, 'run.log')
-
-    @classmethod
-    def from_pod(cls, pod):
-        instance = WAOutput.from_pod(pod)
-        instance.info = RunInfo.from_pod(pod['info'])
-        instance.jobs = [JobOutput.from_pod(i) for i in pod['jobs']]
-        instance.failed = [JobOutput.from_pod(i) for i in pod['failed']]
-        return instance
-
-    def __init__(self, output_directory):
-        super(RunOutput, self).__init__(output_directory)
-        self.logger = logging.getLogger('output')
-        self.info = RunInfo()
-        self.jobs = []
-        self.failed = []
-
-    def initialize(self, overwrite=False):
-        super(RunOutput, self).initialize(overwrite)
-        log.add_file(self.log_file)
-        self.add_artifact('runlog', self.log_file,  'log')
-
-    def create_job_output(self, id):
-        outdir = os.path.join(self.output_directory, id)
-        job_output = JobOutput(outdir)
-        self.jobs.append(job_output)
-        return job_output
-
-    def move_failed(self, job_output):
-        basename = os.path.basename(job_output.output_directory)
-        i = 1
-        dest = os.path.join(self.failed_directory, basename + '-{}'.format(i))
-        while os.path.exists(dest):
-            i += 1
-            dest = '{}-{}'.format(dest[:-2], i)
-        shutil.move(job_output.output_directory, dest)
-
-    def to_pod(self):
-        pod = super(RunOutput, self).to_pod()
-        pod['info'] = self.info.to_pod()
-        pod['jobs'] = [i.to_pod() for i in self.jobs]
-        pod['failed'] = [i.to_pod() for i in self.failed]
-        return pod
-
-
-class JobOutput(WAOutput):
-
-    def add_artifact(self, name, path, kind, *args, **kwargs):
-        path = _check_artifact_path(path, self.output_directory)
-        self.artifacts.append(Artifact(name, path, kind, Artifact.ITERATION, *args, **kwargs))
-
-
-class Artifact(object):
+class RunState(object):
     """
-    This is an artifact generated during execution/post-processing of a workload.
-    Unlike metrics, this represents an actual artifact, such as a file, generated.
-    This may be "result", such as trace, or it could be "meta data" such as logs.
-    These are distinguished using the ``kind`` attribute, which also helps WA decide
-    how it should be handled. Currently supported kinds are:
-
-        :log: A log file. Not part of "results" as such but contains information about the
-              run/workload execution that be useful for diagnostics/meta analysis.
-        :meta: A file containing metadata. This is not part of "results", but contains
-               information that may be necessary to reproduce the results (contrast with
-               ``log`` artifacts which are *not* necessary).
-        :data: This file contains new data, not available otherwise and should be considered
-               part of the "results" generated by WA. Most traces would fall into this category.
-        :export: Exported version of results or some other artifact. This signifies that
-                 this artifact does not contain any new data that is not available
-                 elsewhere and that it may be safely discarded without losing information.
-        :raw: Signifies that this is a raw dump/log that is normally processed to extract
-              useful information and is then discarded. In a sense, it is the opposite of
-              ``export``, but in general may also be discarded.
-
-              .. note:: whether a file is marked as ``log``/``data`` or ``raw`` depends on
-                        how important it is to preserve this file, e.g. when archiving, vs
-                        how much space it takes up. Unlike ``export`` artifacts which are
-                        (almost) always ignored by other exporters as that would never result
-                        in data loss, ``raw`` files *may* be processed by exporters if they
-                        decided that the risk of losing potentially (though unlikely) useful
-                        data is greater than the time/space cost of handling the artifact (e.g.
-                        a database uploader may choose to ignore ``raw`` artifacts, where as a
-                        network filer archiver may choose to archive them).
-
-        .. note: The kind parameter is intended to represent the logical function of a particular
-                 artifact, not it's intended means of processing -- this is left entirely up to the
-                 result processors.
+    Represents the state of a WA run.
 
     """
-
-    RUN = 'run'
-    ITERATION = 'iteration'
-
-    valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
-
     @staticmethod
     def from_pod(pod):
-        return Artifact(**pod)
+        return RunState()
 
-    def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
-        """"
-        :param name: Name that uniquely identifies this artifact.
-        :param path: The *relative* path of the artifact. Depending on the ``level``
-                     must be either relative to the run or iteration output directory.
-                     Note: this path *must* be delimited using ``/`` irrespective of the
-                     operating system.
-        :param kind: The type of the artifact this is (e.g. log file, result, etc.) this
-                     will be used a hit to result processors. This must be one of ``'log'``,
-                     ``'meta'``, ``'data'``, ``'export'``, ``'raw'``.
-        :param level: The level at which the artifact will be generated. Must be either
-                      ``'iteration'`` or ``'run'``.
-        :param mandatory: Boolean value indicating whether this artifact must be present
-                          at the end of result processing for its level.
-        :param description: A free-form description of what this artifact is.
-
-        """
-        if kind not in self.valid_kinds:
-            raise ValueError('Invalid Artifact kind: {}; must be in {}'.format(kind, self.valid_kinds))
-        self.name = name
-        self.path = path.replace('/', os.sep) if path is not None else path
-        self.kind = kind
-        self.level = level
-        self.mandatory = mandatory
-        self.description = description
-
-    def exists(self, context):
-        """Returns ``True`` if artifact exists within the specified context, and
-        ``False`` otherwise."""
-        fullpath = os.path.join(context.output_directory, self.path)
-        return os.path.exists(fullpath)
+    def __init__(self):
+        pass
 
     def to_pod(self):
-        return copy(self.__dict__)
+        return {}
 
 
-class RunEvent(object):
-    """
-    An event that occured during a run.
+class RunOutput(object):
 
-    """
+    @property
+    def logfile(self):
+        return os.path.join(self.basepath, 'run.log')
 
-    @staticmethod
-    def from_pod(pod):
-        instance = RunEvent(pod['message'])
-        instance.timestamp = pod['timestamp']
-        return instance
+    @property
+    def metadir(self):
+        return os.path.join(self.basepath, '__meta')
 
-    def __init__(self, message):
-        self.timestamp = datetime.utcnow()
-        self.message = message
+    @property
+    def infofile(self):
+        return os.path.join(self.metadir, 'run_info.json')
 
-    def to_pod(self):
-        return copy(self.__dict__)
+    @property
+    def statefile(self):
+        return os.path.join(self.basepath, '.run_state.json')
 
-    def __str__(self):
-        return '{} {}'.format(self.timestamp, self.message)
+    @property
+    def configfile(self):
+        return os.path.join(self.metadir, 'config.json')
 
-    __repr__ = __str__
+    @property
+    def targetfile(self):
+        return os.path.join(self.metadir, 'target_info.json')
+
+    @property
+    def jobsfile(self):
+        return os.path.join(self.metadir, 'jobs.json')
+
+    @property
+    def raw_config_dir(self):
+        return os.path.join(self.metadir, 'raw_config')
+
+    def __init__(self, path):
+        self.basepath = path
+        self.info = None
+        self.state = None
+        if (not os.path.isfile(self.statefile) or
+                not os.path.isfile(self.infofile)):
+            msg = '"{}" does not exist or is not a valid WA output directory.'
+            raise ValueError(msg.format(self.basepath))
+        self.reload()
+
+    def reload(self):
+        self.info = RunInfo.from_pod(read_pod(self.infofile))
+        self.state = RunState.from_pod(read_pod(self.statefile))
+
+    def write_info(self):
+        write_pod(self.info.to_pod(), self.infofile)
+
+    def write_state(self):
+        write_pod(self.state.to_pod(), self.statefile)
+
+    def write_config(self, config):
+        write_pod(config.to_pod(), self.configfile)
+
+    def read_config(self):
+        if not os.path.isfile(self.configfile):
+            return None
+        return ConfigManager.from_pod(read_pod(self.configfile))
+
+    def write_target_info(self, ti):
+        write_pod(ti.to_pod(), self.targetfile)
+
+    def read_config(self):
+        if not os.path.isfile(self.targetfile):
+            return None
+        return TargetInfo.from_pod(read_pod(self.targetfile))
+
+    def write_job_specs(self, job_specs):
+        job_specs[0].to_pod()
+        js_pod = {'jobs': [js.to_pod() for js in job_specs]}
+        write_pod(js_pod, self.jobsfile)
+
+    def read_job_specs(self):
+        if not os.path.isfile(self.jobsfile):
+            return None
+        pod = read_pod(self.jobsfile)
+        return [JobSpec.from_pod(jp) for jp in pod['jobs']]
 
 
-class Metric(object):
-    """
-    This is a single metric collected from executing a workload.
+def init_wa_output(path, wa_state, force=False):
+    if os.path.exists(path):
+        if force:
+            logger.info('Removing existing output directory.')
+            shutil.rmtree(os.path.abspath(path))
+        else:
+            raise RuntimeError('path exists: {}'.format(path))
 
-    :param name: the name of the metric. Uniquely identifies the metric
-                 within the results.
-    :param value: The numerical value of the metric for this execution of
-                  a workload. This can be either an int or a float.
-    :param units: Units for the collected value. Can be None if the value
-                  has no units (e.g. it's a count or a standardised score).
-    :param lower_is_better: Boolean flag indicating where lower values are
-                            better than higher ones. Defaults to False.
-    :param classifiers: A set of key-value pairs to further classify this metric
-                        beyond current iteration (e.g. this can be used to identify
-                        sub-tests).
+    logger.info('Creating output directory.')
+    os.makedirs(path)
+    meta_dir = os.path.join(path, '__meta')
+    os.makedirs(meta_dir)
+    _save_raw_config(meta_dir, wa_state)
+    touch(os.path.join(path, 'run.log'))
 
-    """
+    info = RunInfo(
+            run_name=wa_state.run_config.run_name,
+            project=wa_state.run_config.project,
+            project_stage=wa_state.run_config.project_stage,
+           )
+    write_pod(info.to_pod(), os.path.join(meta_dir, 'run_info.json'))
+    
+    with open(os.path.join(path, '.run_state.json'), 'w') as wfh:
+        wfh.write('{}')
 
-    @staticmethod
-    def from_pod(pod):
-        return Metric(**pod)
-
-    def __init__(self, name, value, units=None, lower_is_better=False, classifiers=None):
-        self.name = name
-        self.value = numeric(value)
-        self.units = units
-        self.lower_is_better = lower_is_better
-        self.classifiers = classifiers or {}
-
-    def to_pod(self):
-        return copy(self.__dict__)
-
-    def __str__(self):
-        result = '{}: {}'.format(self.name, self.value)
-        if self.units:
-            result += ' ' + self.units
-        result += ' ({})'.format('-' if self.lower_is_better else '+')
-        return '<{}>'.format(result)
-
-    __repr__ = __str__
+    return RunOutput(path)
 
 
-def _check_artifact_path(path, rootpath):
-    if path.startswith(rootpath):
-        return os.path.abspath(path)
-    rootpath = os.path.abspath(rootpath)
-    full_path = os.path.join(rootpath, path)
-    if not os.path.isfile(full_path):
-        raise ValueError('Cannot add artifact because {} does not exist.'.format(full_path))
-    return full_path
+def _save_raw_config(meta_dir, state):
+    raw_config_dir = os.path.join(meta_dir, 'raw_config')
+    os.makedirs(raw_config_dir)
+
+    for i, source in enumerate(state.loaded_config_sources):
+        if not os.path.isfile(source):
+            continue
+        basename = os.path.basename(source)
+        dest_path = os.path.join(raw_config_dir, 'cfg{}-{}'.format(i, basename))
+        shutil.copy(source, dest_path)
+                                     
+                                     
+
diff --git a/wa/framework/plugin.py b/wa/framework/plugin.py
index fd5b159f..b642ee29 100644
--- a/wa/framework/plugin.py
+++ b/wa/framework/plugin.py
@@ -21,69 +21,28 @@ import inspect
 import imp
 import string
 import logging
-from copy import copy
-from itertools import chain
 from collections import OrderedDict, defaultdict
+from itertools import chain
+from copy import copy
 
-from wa.framework import log
-from wa.framework.exception import ValidationError, ConfigError, NotFoundError, PluginLoaderError
-from wa.framework.configuration.core import ConfigurationPoint, ConfigurationPointCollection
-from wa.utils.misc import isiterable, ensure_directory_exists as _d, get_article
-from wa.utils.misc import walk_modules, get_article
-from wa.utils.types import identifier, integer, boolean, caseless_string
+from wa.framework.configuration.core import settings, ConfigurationPoint as Parameter
+from wa.framework.exception import (NotFoundError, PluginLoaderError, ValidationError, 
+                                    ConfigError, HostError)
+from wa.utils import log
+from wa.utils.misc import (ensure_directory_exists as _d, walk_modules, load_class, 
+                           merge_dicts_simple, get_article)
+from wa.utils.types import identifier, boolean
 
 
-class Parameter(ConfigurationPoint):
-
-    is_runtime = False
-
-    def __init__(self, name, 
-                 kind=None, 
-                 mandatory=None, 
-                 default=None, 
-                 override=False,
-                 allowed_values=None, 
-                 description=None, 
-                 constraint=None, 
-                 convert_types=True,
-                 global_alias=None,
-                 reconfigurable=True):
-        """
-        :param global_alias: This is an alternative alias for this parameter,
-                             unlike the name, this alias will not be
-                             namespaced under the owning extension's name
-                             (hence the global part). This is introduced
-                             primarily for backward compatibility -- so that
-                             old extension settings names still work. This
-                             should not be used for new parameters.
-
-        :param reconfigurable: This indicated whether this parameter may be 
-                               reconfigured during the run (e.g. between different
-                               iterations). This determines where in run configruation
-                               this parameter may appear.
-
-        For other parameters, see docstring for 
-        ``wa.framework.configuration.core.ConfigurationPoint``
-
-        """
-        super(Parameter, self).__init__(name, kind, mandatory,
-                                        default, override, allowed_values,
-                                        description, constraint,
-                                        convert_types)
-        self.global_alias = global_alias
-        self.reconfigurable = reconfigurable
-
-    def __repr__(self):
-        d = copy(self.__dict__)
-        del d['description']
-        return 'Param({})'.format(d)
+MODNAME_TRANS = string.maketrans(':/\\.', '____')
 
 
-class PluginAliasCollection(object):
+class AttributeCollection(object):
     """
-    Accumulator for extension attribute objects (such as Parameters). This will
-    replace any class member list accumulating such attributes through the magic of
-    metaprogramming\ [*]_.
+    Accumulator for plugin attribute objects (such as Parameters or Artifacts).
+
+    This will replace any class member list accumulating such attributes
+    through the magic of metaprogramming\ [*]_.
 
     .. [*] which is totally safe and not going backfire in any way...
 
@@ -93,7 +52,8 @@ class PluginAliasCollection(object):
     def values(self):
         return self._attrs.values()
 
-    def __init__(self):
+    def __init__(self, attrcls):
+        self._attrcls = attrcls
         self._attrs = OrderedDict()
 
     def add(self, p):
@@ -104,6 +64,8 @@ class PluginAliasCollection(object):
                 for a, v in p.__dict__.iteritems():
                     if v is not None:
                         setattr(newp, a, v)
+                if not hasattr(newp, "_overridden"):
+                    newp._overridden = p._owner
                 self._attrs[p.name] = newp
             else:
                 # Duplicate attribute condition is check elsewhere.
@@ -119,13 +81,19 @@ class PluginAliasCollection(object):
     __repr__ = __str__
 
     def _to_attrcls(self, p):
-        if isinstance(p, tuple) or isinstance(p, list):
-            # must be in the form (name, {param: value, ...})
-            p = Alias(p[1], **p[1])
-        elif not isinstance(p, Alias):
+        old_owner = getattr(p, "_owner", None)
+        if isinstance(p, basestring):
+            p = self._attrcls(p)
+        elif isinstance(p, tuple) or isinstance(p, list):
+            p = self._attrcls(*p)
+        elif isinstance(p, dict):
+            p = self._attrcls(**p)
+        elif not isinstance(p, self._attrcls):
             raise ValueError('Invalid parameter value: {}'.format(p))
-        if p.name in self._attrs:
+        if (p.name in self._attrs and not p.override and
+                p.name != 'modules'):  # TODO: HACK due to "diamond dependecy" in workloads...
             raise ValueError('Attribute {} has already been defined.'.format(p.name))
+        p._owner = old_owner
         return p
 
     def __iadd__(self, other):
@@ -146,83 +114,209 @@ class PluginAliasCollection(object):
         return len(self._attrs)
 
 
+class AliasCollection(AttributeCollection):
+
+    def __init__(self):
+        super(AliasCollection, self).__init__(Alias)
+
+    def _to_attrcls(self, p):
+        if isinstance(p, tuple) or isinstance(p, list):
+            # must be in the form (name, {param: value, ...})
+            p = self._attrcls(p[1], **p[1])
+        elif not isinstance(p, self._attrcls):
+            raise ValueError('Invalid parameter value: {}'.format(p))
+        if p.name in self._attrs:
+            raise ValueError('Attribute {} has already been defined.'.format(p.name))
+        return p
+
+
+class ListCollection(list):
+
+    def __init__(self, attrcls):  # pylint: disable=unused-argument
+        super(ListCollection, self).__init__()
+
+
+class Artifact(object):
+    """
+    This is an artifact generated during execution/post-processing of a workload.
+    Unlike metrics, this represents an actual artifact, such as a file, generated.
+    This may be "result", such as trace, or it could be "meta data" such as logs.
+    These are distinguished using the ``kind`` attribute, which also helps WA decide
+    how it should be handled. Currently supported kinds are:
+
+        :log: A log file. Not part of "results" as such but contains information 
+              about the run/workload execution that be useful for diagnostics/meta 
+              analysis.
+        :meta: A file containing metadata. This is not part of "results", but contains
+               information that may be necessary to reproduce the results
+               (contrast with ``log`` artifacts which are *not* necessary).
+        :data: This file contains new data, not available otherwise and should be 
+               considered part of the "results" generated by WA. Most traces
+               would fall into this category.
+        :export: Exported version of results or some other artifact. This signifies 
+                 that this artifact does not contain any new data that is not
+                 available elsewhere and that it may be safely discarded
+                 without losing information.
+        :raw: Signifies that this is a raw dump/log that is normally processed to 
+              extract useful information and is then discarded. In a sense, it
+              is the opposite of ``export``, but in general may also be
+              discarded.
+
+              .. note:: whether a file is marked as ``log``/``data`` or ``raw`` 
+                        depends on how important it is to preserve this file,
+                        e.g. when archiving, vs how much space it takes up.
+                        Unlike ``export`` artifacts which are (almost) always
+                        ignored by other exporters as that would never result
+                        in data loss, ``raw`` files *may* be processed by
+                        exporters if they decided that the risk of losing
+                        potentially (though unlikely) useful data is greater
+                        than the time/space cost of handling the artifact (e.g.
+                        a database uploader may choose to ignore ``raw``
+                        artifacts, where as a network filer archiver may choose
+                        to archive them).
+
+        .. note: The kind parameter is intended to represent the logical function of 
+                 a particular artifact, not its intended means of processing --
+                 this is left entirely up to the result processors.
+
+    """
+
+    RUN = 'run'
+    ITERATION = 'iteration'
+
+    valid_kinds = ['log', 'meta', 'data', 'export', 'raw']
+
+    def __init__(self, name, path, kind, level=RUN, mandatory=False, description=None):
+        """"
+        :param name: Name that uniquely identifies this artifact.
+        :param path: The *relative* path of the artifact. Depending on the ``level``
+                     must be either relative to the run or iteration output directory.
+
+                     .. note:: this path *must* be delimited using ``/``
+                               irrespective of the operating system.
+        :param kind: The type of the artifact this is (e.g. log file, result, etc.)
+                     this will be used a hit to result processors. This must be
+                     one of ``'log'``, ``'meta'``, ``'data'``, ``'export'``,
+                     ``'raw'``.
+        :param level: The level at which the artifact will be generated. Must be 
+                      either ``'iteration'`` or ``'run'``.
+        :param mandatory: Boolean value indicating whether this artifact must be
+                          present at the end of result processing for its level.
+        :param description: A free-form description of what this artifact is.
+
+        """
+        if kind not in self.valid_kinds:
+            msg = 'Invalid Artifact kind: {}; must be in {}'
+            raise ValueError(msg.format(kind, self.valid_kinds))
+        self.name = name
+        self.path = path.replace('/', os.sep) if path is not None else path
+        self.kind = kind
+        self.level = level
+        self.mandatory = mandatory
+        self.description = description
+
+    def exists(self, context):
+        """
+        Returns ``True`` if artifact exists within the specified context, and
+        ``False`` otherwise.
+
+        """
+        fullpath = os.path.join(context.output_directory, self.path)
+        return os.path.exists(fullpath)
+
+    def to_dict(self):
+        return copy(self.__dict__)
+
+
 class Alias(object):
     """
-    This represents a configuration alias for an extension, mapping an alternative name to
-    a set of parameter values, effectively providing an alternative set of default values.
+    This represents a configuration alias for an plugin, mapping an alternative
+    name to a set of parameter values, effectively providing an alternative set
+    of default values.
 
     """
 
     def __init__(self, name, **kwargs):
         self.name = name
-        self.parameters = kwargs
+        self.params = kwargs
         self.plugin_name = None  # gets set by the MetaClass
 
-    def validate(self, plugin):
-        plugin_params = set(p.name for p in plugin.parameters)
-        for param in self.parameters:
-            if param not in plugin_params:
+    def validate(self, ext):
+        ext_params = set(p.name for p in ext.parameters)
+        for param in self.params:
+            if param not in ext_params:
                 # Raising config error because aliases might have come through
                 # the config.
                 msg = 'Parameter {} (defined in alias {}) is invalid for {}'
-                raise ValueError(msg.format(param, self.name, plugin.name))
+                raise ConfigError(msg.format(param, self.name, ext.name))
 
 
 class PluginMeta(type):
     """
-    This basically adds some magic to extensions to make implementing new extensions, such as
-    workloads less complicated.
+    This basically adds some magic to plugins to make implementing new plugins,
+    such as workloads less complicated.
 
     It ensures that certain class attributes (specified by the ``to_propagate``
-    attribute of the metaclass) get propagated down the inheritance hierarchy. The assumption
-    is that the values of the attributes specified in the class are iterable; if that is not met,
-    Bad Things(tm) will happen.
+    attribute of the metaclass) get propagated down the inheritance hierarchy.
+    The assumption is that the values of the attributes specified in the class
+    are iterable; if that is not met, Bad Things (tm) will happen.
 
-    This also provides "virtual" method implementations. The ``super``'s version of these
-    methods (specified by the ``virtual_methods`` attribute of the metaclass) will be 
-    automatically invoked.
+    This also provides virtual method implementation, similar to those in
+    C-derived OO languages, and alias specifications.
 
     """
 
     to_propagate = [
-        ('parameters', ConfigurationPointCollection),
+        ('parameters', Parameter, AttributeCollection),
+        ('artifacts', Artifact, AttributeCollection),
+        ('core_modules', str, ListCollection),
     ]
 
-    #virtual_methods = ['validate', 'initialize', 'finalize']
-    virtual_methods = []
+    virtual_methods = ['validate', 'initialize', 'finalize']
+    global_virtuals = ['initialize', 'finalize']
 
     def __new__(mcs, clsname, bases, attrs):
-        mcs._propagate_attributes(bases, attrs)
+        mcs._propagate_attributes(bases, attrs, clsname)
         cls = type.__new__(mcs, clsname, bases, attrs)
         mcs._setup_aliases(cls)
         mcs._implement_virtual(cls, bases)
         return cls
 
     @classmethod
-    def _propagate_attributes(mcs, bases, attrs):
+    def _propagate_attributes(mcs, bases, attrs, clsname):
         """
         For attributes specified by to_propagate, their values will be a union of
-        that specified for cls and it's bases (cls values overriding those of bases
+        that specified for cls and its bases (cls values overriding those of bases
         in case of conflicts).
 
         """
-        for prop_attr, attr_collector_cls in mcs.to_propagate:
+        for prop_attr, attr_cls, attr_collector_cls in mcs.to_propagate:
             should_propagate = False
-            propagated = attr_collector_cls()
+            propagated = attr_collector_cls(attr_cls)
             for base in bases:
                 if hasattr(base, prop_attr):
                     propagated += getattr(base, prop_attr) or []
                     should_propagate = True
             if prop_attr in attrs:
-                propagated += attrs[prop_attr] or []
+                pattrs = attrs[prop_attr] or []
+                for pa in pattrs:
+                    if not isinstance(pa, basestring):
+                        pa._owner = clsname
+                propagated += pattrs
                 should_propagate = True
             if should_propagate:
+                for p in propagated:
+                    override = bool(getattr(p, "override", None))
+                    overridden = bool(getattr(p, "_overridden", None))
+                    if override != overridden:
+                        msg = "Overriding non existing parameter '{}' inside '{}'"
+                        raise ValueError(msg.format(p.name, p._owner))
                 attrs[prop_attr] = propagated
 
     @classmethod
     def _setup_aliases(mcs, cls):
         if hasattr(cls, 'aliases'):
-            aliases, cls.aliases = cls.aliases, PluginAliasCollection()
+            aliases, cls.aliases = cls.aliases, AliasCollection()
             for alias in aliases:
                 if isinstance(alias, basestring):
                     alias = Alias(alias)
@@ -248,7 +342,8 @@ class PluginMeta(type):
         for vmname in mcs.virtual_methods:
             clsmethod = getattr(cls, vmname, None)
             if clsmethod:
-                basemethods = [getattr(b, vmname) for b in bases if hasattr(b, vmname)]
+                basemethods = [getattr(b, vmname) for b in bases 
+                               if hasattr(b, vmname)]
                 methods[vmname] = [bm for bm in basemethods if bm != clsmethod]
                 methods[vmname].append(clsmethod)
 
@@ -261,7 +356,12 @@ class PluginMeta(type):
 
                     def wrapper(self, *args, **kwargs):
                         for dm in methods[name__]:
-                            dm(self, *args, **kwargs)
+                            if name__ in mcs.global_virtuals:
+                                if dm not in called_globals:
+                                    dm(self, *args, **kwargs)
+                                    called_globals.add(dm)
+                            else:
+                                dm(self, *args, **kwargs)
                     return wrapper
 
                 setattr(cls, vmname, generate_method_wrapper(vmname))
@@ -269,35 +369,52 @@ class PluginMeta(type):
 
 class Plugin(object):
     """
-    Base class for all WA plugins.
-    A plugin extends the functionality of WA in some way. Plugins are discovered
-    and loaded dynamically by the plugin loader upon invocation of WA scripts.
-    Adding an extension is a matter of placing a class that implements an appropriate
-    interface somewhere it would be discovered by the loader. That "somewhere" is
-    typically one of the plugin subdirectories under ``~/.workload_automation/``.
+    Base class for all WA plugins. An plugin is basically a plug-in.  It
+    extends the functionality of WA in some way. Plugins are discovered and
+    loaded dynamically by the plugin loader upon invocation of WA scripts.
+    Adding an plugin is a matter of placing a class that implements an
+    appropriate interface somewhere it would be discovered by the loader. That
+    "somewhere" is typically one of the plugin subdirectories under
+    ``~/.workload_automation/``.
 
     """
     __metaclass__ = PluginMeta
 
-    name = None
     kind = None
-    parameters = []
+    name = None
+    parameters = [
+        Parameter('modules', kind=list,
+                  description="""
+                  Lists the modules to be loaded by this plugin. A module is a 
+                  plug-in that further extends functionality of an plugin.
+                  """),
+    ]
+    artifacts = []
     aliases = []
+    core_modules = []
 
     @classmethod
     def get_default_config(cls):
         return {p.name: p.default for p in cls.parameters}
 
-    @classmethod
-    def get_parameter(cls, name):
-        for param in cls.parameters:
-            if param.name == name or name in param.aliases:
-                return param
+    @property
+    def dependencies_directory(self):
+        return _d(os.path.join(settings.dependencies_directory, self.name))
+
+    @property
+    def _classname(self):
+        return self.__class__.__name__
 
     def __init__(self, **kwargs):
-        self.logger = logging.getLogger(self.name)
+        self.logger = logging.getLogger(self._classname)
+        self._modules = []
         self.capabilities = getattr(self.__class__, 'capabilities', [])
-        self.update_config(**kwargs)
+        for param in self.parameters:
+            param.set_value(self, kwargs.get(param.name))
+        for key in kwargs:
+            if key not in self.parameters:
+                message = 'Unexpected parameter "{}" for {}'
+                raise ConfigError(message.format(key, self.name))
 
     def get_config(self):
         """
@@ -309,35 +426,21 @@ class Plugin(object):
             config[param.name] = getattr(self, param.name, None)
         return config
 
-    def update_config(self, **kwargs):
-        """
-        Updates current configuration (i.e. parameter values) of this plugin.
-
-        """
-        for param in self.parameters:
-            param.set_value(self, kwargs.get(param.name))
-        for key in kwargs:
-            if key not in self.parameters:
-                message = 'Unexpected parameter "{}" for {}'
-                raise ConfigError(message.format(key, self.name))
-
     def validate(self):
         """
-        Perform basic validation to ensure that this extension is capable of running.
-        This is intended as an early check to ensure the extension has not been mis-configured,
-        rather than a comprehensive check (that may, e.g., require access to the execution
-        context).
+        Perform basic validation to ensure that this plugin is capable of
+        running.  This is intended as an early check to ensure the plugin has
+        not been mis-configured, rather than a comprehensive check (that may,
+        e.g., require access to the execution context).
 
-        This method may also be used to enforce (i.e. set as well as check) inter-parameter
-        constraints for the extension (e.g. if valid values for parameter A depend on the value
-        of parameter B -- something that is not possible to enforce using ``Parameter``\ 's
-        ``constraint`` attribute.
+        This method may also be used to enforce (i.e. set as well as check)
+        inter-parameter constraints for the plugin (e.g. if valid values for
+        parameter A depend on the value of parameter B -- something that is not
+        possible to enfroce using ``Parameter``\ 's ``constraint`` attribute.
 
         """
         if self.name is None:
-            raise ValidationError('name not set for {}'.format(self.__class__.__name__))
-        if self.kind is None:
-            raise ValidationError('kind not set for {}'.format(self.name))
+            raise ValidationError('Name not set for {}'.format(self._classname))
         for param in self.parameters:
             param.validate(self)
 
@@ -347,109 +450,120 @@ class Plugin(object):
     def finalize(self, context):
         pass
 
+    def check_artifacts(self, context, level):
+        """
+        Make sure that all mandatory artifacts have been generated.
+
+        """
+        for artifact in self.artifacts:
+            if artifact.level != level or not artifact.mandatory:
+                continue
+            fullpath = os.path.join(context.output_directory, artifact.path)
+            if not os.path.exists(fullpath):
+                message = 'Mandatory "{}" has not been generated for {}.'
+                raise ValidationError(message.format(artifact.path, self.name))
+
+    def __getattr__(self, name):
+        if name == '_modules':
+            raise ValueError('_modules accessed too early!')
+        for module in self._modules:
+            if hasattr(module, name):
+                return getattr(module, name)
+        raise AttributeError(name)
+
+    def load_modules(self, loader):
+        """
+        Load the modules specified by the "modules" Parameter using the
+        provided loader. A loader can be any object that has an atribute called
+        "get_module" that implements the following signature::
+
+            get_module(name, owner, **kwargs)
+
+        and returns an instance of :class:`wlauto.core.plugin.Module`. If the
+        module with the specified name is not found, the loader must raise an
+        appropriate exception.
+
+        """
+        modules = list(reversed(self.core_modules)) +\
+                    list(reversed(self.modules or []))
+        if not modules:
+            return
+        for module_spec in modules:
+            if not module_spec:
+                continue
+            module = self._load_module(loader, module_spec)
+            self._install_module(module)
+
     def has(self, capability):
-        """Check if this extension has the specified capability. The alternative method ``can`` is
-        identical to this. Which to use is up to the caller depending on what makes semantic sense
-        in the context of the capability, e.g. ``can('hard_reset')`` vs  ``has('active_cooling')``."""
+        """
+        Check if this plugin has the specified capability. The alternative
+        method ``can`` is identical to this. Which to use is up to the caller
+        depending on what makes semantic sense in the context of the
+        capability, e.g. ``can('hard_reset')`` vs  ``has('active_cooling')``.
+
+        """
         return capability in self.capabilities
 
     can = has
 
+    def _load_module(self, loader, module_spec):
+        if isinstance(module_spec, basestring):
+            name = module_spec
+            params = {}
+        elif isinstance(module_spec, dict):
+            if len(module_spec) != 1:
+                msg = 'Invalid module spec: {}; dict must have exctly one key -- '\
+                      'the module name.'
+                raise ValueError(msg.format(module_spec))
+            name, params = module_spec.items()[0]
+        else:
+            message = 'Invalid module spec: {}; must be a string or a one-key dict.'
+            raise ValueError(message.format(module_spec))
 
-class TargetedPluginMeta(PluginMeta):
+        if not isinstance(params, dict):
+            message = 'Invalid module spec: {}; dict value must also be a dict.'
+            raise ValueError(message.format(module_spec))
 
-    to_propagate = PluginMeta.to_propagate + [
-        ('supported_targets', list),
-        ('supported_platforms', list),
-    ]
-    virtual_methods = PluginMeta.virtual_methods + [
-        'validate_on_target',
-    ]
+        module = loader.get_module(name, owner=self, **params)
+        module.initialize(None)
+        return module
+
+    def _install_module(self, module):
+        for capability in module.capabilities:
+            if capability not in self.capabilities:
+                self.capabilities.append(capability)
+        self._modules.append(module)
 
 
 class TargetedPlugin(Plugin):
     """
-    A plugin that operates on a target device.  These kinds of plugins are created
-    with a ``devlib.Target`` instance and may only support certain kinds of targets.
+    A plugin that interacts with a target device.
 
     """
 
-    __metaclass__ = TargetedPluginMeta
+    suppoted_targets = []
 
-    supported_targets = []
-    supported_platforms = []
+    @classmethod
+    def check_compatible(cls, target):
+        if cls.suppoted_targets:
+            if target.os not in cls.suppoted_targets:
+                msg = 'Incompatible target OS "{}" for {}'
+                raise TargetError(msg.format(target.os, cls.name))
 
     def __init__(self, target, **kwargs):
         super(TargetedPlugin, self).__init__(**kwargs)
-        if self.supported_targets and target.os not in self.supported_targets:
-            raise TargetError('Plugin {} does not support target {}'.format(self.name, target.name))
-        if self.supported_platforms and target.platform.name not in self.supported_platforms:
-            raise TargetError('Plugin {} does not support platform {}'.format(self.name, target.platform))
+        self.check_compatible(target)
         self.target = target
 
-    def validate_on_target(self):
-        """
-        This will be invoked once at the beginning of a run after a ``Target`` 
-        has been connected and initialized. This is intended for validation 
-        that cannot be performed offline but does not depend on ephemeral 
-        state that is likely to change during the course of a run (validation
-        against such states should be done during setup of a particular
-        execution.
 
-        """
-        pass
+class PluginLoaderItem(object):
 
+    def __init__(self, ext_tuple):
+        self.name = ext_tuple.name
+        self.default_package = ext_tuple.default_package
+        self.default_path = ext_tuple.default_path
+        self.cls = load_class(ext_tuple.cls)
 
-class GlobalParameterAlias(object):
-    """
-    Represents a "global alias" for an plugin parameter. A global alias
-    is specified at the top-level of config rather namespaced under an plugin
-    name.
-
-    Multiple plugins may have parameters with the same global_alias if they are
-    part of the same inheritance hierarchy and one parameter is an override of the
-    other. This class keeps track of all such cases in its plugins dict.
-
-    """
-
-    def __init__(self, name):
-        self.name = name
-        self.plugins = {}
-
-    def iteritems(self):
-        for ext in self.plugins.itervalues():
-            yield (self.get_param(ext), ext)
-
-    def get_param(self, ext):
-        for param in ext.parameters:
-            if param.global_alias == self.name:
-                return param
-        message = 'Plugin {} does not have a parameter with global alias {}'
-        raise ValueError(message.format(ext.name, self.name))
-
-    def update(self, other_ext):
-        self._validate_ext(other_ext)
-        self.plugins[other_ext.name] = other_ext
-
-    def _validate_ext(self, other_ext):
-        other_param = self.get_param(other_ext)
-        for param, ext in self.iteritems():
-            if ((not (issubclass(ext, other_ext) or issubclass(other_ext, ext))) and
-                    other_param.kind != param.kind):
-                message = 'Duplicate global alias {} declared in {} and {} plugins with different types'
-                raise PluginLoaderError(message.format(self.name, ext.name, other_ext.name))
-            if not param.name == other_param.name:
-                message = 'Two params {} in {} and {} in {} both declare global alias {}'
-                raise PluginLoaderError(message.format(param.name, ext.name,
-                                                 other_param.name, other_ext.name, self.name))
-
-    def __str__(self):
-        text = 'GlobalAlias({} => {})'
-        extlist = ', '.join(['{}.{}'.format(e.name, p.name) for p, e in self.iteritems()])
-        return text.format(self.name, extlist)
-
-
-MODNAME_TRANS = string.maketrans(':/\\.', '____')
 
 class PluginLoader(object):
     """
@@ -461,19 +575,19 @@ class PluginLoader(object):
 
     """
 
-
-    def __init__(self, packages=None, paths=None, ignore_paths=None, keep_going=False):
+    def __init__(self, packages=None, paths=None, ignore_paths=None, 
+                 keep_going=False):
         """
         params::
 
             :packages: List of packages to load plugins from.
             :paths: List of paths to be searched for Python modules containing
                     WA plugins.
-            :ignore_paths: List of paths to ignore when search for WA plugins (these would
-                           typically be subdirectories of one or more locations listed in
-                           ``paths`` parameter.
-            :keep_going: Specifies whether to keep going if an error occurs while loading
-                         plugins.
+            :ignore_paths: List of paths to ignore when search for WA plugins
+                           (these would typically be subdirectories of one or
+                           more locations listed in ``paths`` parameter.
+            :keep_going: Specifies whether to keep going if an error occurs while
+                         loading plugins.
         """
         self.logger = logging.getLogger('pluginloader')
         self.keep_going = keep_going
@@ -490,6 +604,8 @@ class PluginLoader(object):
     def update(self, packages=None, paths=None, ignore_paths=None):
         """ Load plugins from the specified paths/packages
         without clearing or reloading existing plugin. """
+        msg = 'Updating from: packages={} paths={}'
+        self.logger.debug(msg.format(packages, paths))
         if packages:
             self.packages.extend(packages)
             self._discover_from_packages(packages)
@@ -505,6 +621,7 @@ class PluginLoader(object):
 
     def reload(self):
         """ Clear all discovered items and re-run the discovery. """
+        self.logger.debug('Reloading')
         self.clear()
         self._discover_from_packages(self.packages)
         self._discover_from_paths(self.paths, self.ignore_paths)
@@ -519,15 +636,16 @@ class PluginLoader(object):
             try:
                 return self.plugins[name]
             except KeyError:
-                raise NotFoundError('Plugins {} not found.'.format(name))
+                raise NotFoundError('plugins {} not found.'.format(name))
         if kind not in self.kind_map:
             raise ValueError('Unknown plugin type: {}'.format(kind))
         store = self.kind_map[kind]
         if name not in store:
-            raise NotFoundError('Plugins {} is not {} {}.'.format(name, get_article(kind), kind))
+            msg = 'plugins {} is not {} {}.'
+            raise NotFoundError(msg.format(name, get_article(kind), kind))
         return store[name]
 
-    def get_plugin(self, name, kind=None, *args, **kwargs):
+    def get_plugin(self, name=None, kind=None, *args, **kwargs):
         """
         Return plugin of the specified kind with the specified name. Any
         additional parameters will be passed to the plugin's __init__.
@@ -548,7 +666,7 @@ class PluginLoader(object):
         """
         real_name, alias_config = self.resolve_alias(name)
         base_default_config = self.get_plugin_class(real_name).get_default_config()
-        return merge_dicts(base_default_config, alias_config, list_duplicates='last', dict_type=OrderedDict)
+        return merge_dicts_simple(base_default_config, alias_config)
 
     def list_plugins(self, kind=None):
         """
@@ -588,7 +706,7 @@ class PluginLoader(object):
             return (alias_name, {})
         if alias_name in self.aliases:
             alias = self.aliases[alias_name]
-            return (alias.plugin_name, alias.parameters)
+            return (alias.plugin_name, alias.params)
         raise NotFoundError('Could not find plugin or alias "{}"'.format(alias_name))
 
     # Internal methods.
@@ -605,41 +723,45 @@ class PluginLoader(object):
             loader.get_plugin('foo', kind='device')
 
         """
+        error_msg = 'No plugins of type "{}" discovered'
         if name.startswith('get_'):
             name = name.replace('get_', '', 1)
             if name in self.kind_map:
                 def __wrapper(pname, *args, **kwargs):
                     return self.get_plugin(pname, name, *args, **kwargs)
                 return __wrapper
+            raise NotFoundError(error_msg.format(name))
         if name.startswith('list_'):
             name = name.replace('list_', '', 1).rstrip('s')
             if name in self.kind_map:
-                def __wrapper(*args, **kwargs):
+                def __wrapper(*args, **kwargs):  # pylint: disable=E0102
                     return self.list_plugins(name, *args, **kwargs)
                 return __wrapper
+            raise NotFoundError(error_msg.format(name))
         if name.startswith('has_'):
             name = name.replace('has_', '', 1)
             if name in self.kind_map:
-                def __wrapper(pname, *args, **kwargs):
+                def __wrapper(pname, *args, **kwargs):  # pylint: disable=E0102
                     return self.has_plugin(pname, name, *args, **kwargs)
                 return __wrapper
+            raise NotFoundError(error_msg.format(name))
         raise AttributeError(name)
 
-
     def _discover_from_packages(self, packages):
         self.logger.debug('Discovering plugins in packages')
         try:
             for package in packages:
                 for module in walk_modules(package):
                     self._discover_in_module(module)
-        except ImportError as e:
-            source = getattr(e, 'path', package)
+        except HostError as e:
             message = 'Problem loading plugins from {}: {}'
-            raise PluginLoaderError(message.format(source, e.message))
+            raise PluginLoaderError(message.format(e.module, str(e.orig_exc)),
+                                    e.exc_info)
 
     def _discover_from_paths(self, paths, ignore_paths):
         paths = paths or []
         ignore_paths = ignore_paths or []
+
         self.logger.debug('Discovering plugins in paths')
         for path in paths:
             self.logger.debug('Checking path %s', path)
@@ -654,7 +776,7 @@ class PluginLoader(object):
                 if should_skip:
                     continue
                 for fname in files:
-                    if not os.path.splitext(fname)[1].lower() == '.py':
+                    if os.path.splitext(fname)[1].lower() != '.py':
                         continue
                     filepath = os.path.join(root, fname)
                     self._discover_from_file(filepath)
@@ -669,10 +791,11 @@ class PluginLoader(object):
                 self.logger.warning('Failed to load {}'.format(filepath))
                 self.logger.warning('Got: {}'.format(e))
             else:
-                raise PluginLoaderError('Failed to load {}'.format(filepath), sys.exc_info())
+                msg = 'Failed to load {}'
+                raise LoaderError(msg.format(filepath), sys.exc_info())
         except Exception as e:
             message = 'Problem loading plugins from {}: {}'
-            raise PluginLoaderError(message.format(filepath, e))
+            raise LoaderError(message.format(filepath, e))
 
     def _discover_in_module(self, module):  # NOQA pylint: disable=too-many-branches
         self.logger.debug('Checking module %s', module.__name__)
@@ -699,6 +822,7 @@ class PluginLoader(object):
                             raise e
         finally:
             log.dedent()
+            pass
 
     def _add_found_plugin(self, obj):
         """
@@ -708,8 +832,9 @@ class PluginLoader(object):
         self.logger.debug('Adding %s %s', obj.kind, obj.name)
         key = identifier(obj.name.lower())
         if key in self.plugins or key in self.aliases:
-            raise PluginLoaderError('{} "{}" already exists.'.format(obj.kind, obj.name))
-        # Plugins are tracked both, in a common plugins
+            msg = '{} "{}" already exists.'
+            raise PluginLoaderError(msg.format(obj.kind, obj.name))
+        # plugins are tracked both, in a common plugins
         # dict, and in per-plugin kind dict (as retrieving
         # plugins by kind is a common use case.
         self.plugins[key] = obj
@@ -718,17 +843,6 @@ class PluginLoader(object):
         for alias in obj.aliases:
             alias_id = identifier(alias.name.lower())
             if alias_id in self.plugins or alias_id in self.aliases:
-                raise PluginLoaderError('{} "{}" already exists.'.format(obj.kind, obj.name))
+                msg = '{} "{}" already exists.'
+                raise PluginLoaderError(msg.format(obj.kind, obj.name))
             self.aliases[alias_id] = alias
-
-        # Update global aliases list. If a global alias is already in the list,
-        # then make sure this plugin is in the same parent/child hierarchy
-        # as the one already found.
-        for param in obj.parameters:
-            if param.global_alias:
-                if param.global_alias not in self.global_param_aliases:
-                    ga = GlobalParameterAlias(param.global_alias)
-                    ga.update(obj)
-                    self.global_param_aliases[ga.name] = ga
-                else:  # global alias already exists.
-                    self.global_param_aliases[param.global_alias].update(obj)
diff --git a/wa/framework/pluginloader.py b/wa/framework/pluginloader.py
index 17924a4e..d03fa932 100644
--- a/wa/framework/pluginloader.py
+++ b/wa/framework/pluginloader.py
@@ -17,53 +17,73 @@ import sys
 
 class __LoaderWrapper(object):
 
+    @property
+    def kinds(self):
+        if not self._loader:
+            self.reset()
+        return self._loader.kind_map.keys()
+
+    @property
+    def kind_map(self):
+        if not self._loader:
+            self.reset()
+        return self._loader.kind_map
+
     def __init__(self):
         self._loader = None
 
     def reset(self):
-        # These imports cannot be done at top level, because of 
+        # These imports cannot be done at top level, because of
         # sys.modules manipulation below
         from wa.framework.plugin import PluginLoader
         from wa.framework.configuration.core import settings
         self._loader = PluginLoader(settings.plugin_packages,
-                                    settings.plugin_paths,
-                                    settings.plugin_ignore_paths)
+                                    [settings.plugins_directory], [])
 
     def update(self, packages=None, paths=None, ignore_paths=None):
-        if not self._loader: self.reset()
+        if not self._loader:
+            self.reset()
         self._loader.update(packages, paths, ignore_paths)
 
     def reload(self):
-        if not self._loader: self.reset()
+        if not self._loader:
+            self.reset()
         self._loader.reload()
 
     def list_plugins(self, kind=None):
-        if not self._loader: self.reset()
+        if not self._loader:
+            self.reset()
         return self._loader.list_plugins(kind)
 
     def has_plugin(self, name, kind=None):
-        if not self._loader: self.reset()
+        if not self._loader:
+            self.reset()
         return self._loader.has_plugin(name, kind)
 
     def get_plugin_class(self, name, kind=None):
-        if not self._loader: self.reset()
-        return _load.get_plugin_class(name, kind)
+        if not self._loader:
+            self.reset()
+        return self._loader.get_plugin_class(name, kind)
 
-    def get_plugin(self, name, kind=None, *args, **kwargs):
-        if not self._loader: self.reset()
-        return self._loader.get_plugin(name, kind=kind, *args, **kwargs)
+    def get_plugin(self, name=None, kind=None, *args, **kwargs):
+        if not self._loader:
+            self.reset()
+        return self._loader.get_plugin(name=name, kind=kind, *args, **kwargs)
 
     def get_default_config(self, name):
-        if not self._loader: self.reset()
+        if not self._loader:
+            self.reset()
         return self._loader.get_default_config(name)
 
     def resolve_alias(self, name):
-        if not self._loader: self.reset()
+        if not self._loader:
+            self.reset()
         return self._loader.resolve_alias(name)
 
     def __getattr__(self, name):
-        if not self._loader: self.reset()
+        if not self._loader:
+            self.reset()
         return getattr(self._loader, name)
 
 
-sys.modules[__name__] =  __LoaderWrapper()
+sys.modules[__name__] = __LoaderWrapper()
diff --git a/wa/framework/resource.py b/wa/framework/resource.py
index abf77827..e86eb830 100644
--- a/wa/framework/resource.py
+++ b/wa/framework/resource.py
@@ -60,6 +60,23 @@ class GetterPriority(object):
     remote = -20
 
 
+class __NullOwner(object):
+    """Represents an owner for a resource not owned by anyone."""
+
+    name = 'noone'
+    dependencies_directory = settings.dependencies_directory
+
+    def __getattr__(self, name):
+        return None
+
+    def __str__(self):
+        return 'no-one'
+
+    __repr__ = __str__
+
+
+NO_ONE = __NullOwner()
+
 class Resource(object):
     """
     Represents a resource that needs to be resolved. This can be pretty much
@@ -95,6 +112,73 @@ class Resource(object):
         return '<{}\'s {}>'.format(self.owner, self.name)
 
 
+class FileResource(Resource):
+    """
+    Base class for all resources that are a regular file in the
+    file system.
+
+    """
+
+    def delete(self, instance):
+        os.remove(instance)
+
+
+class File(FileResource):
+
+    name = 'file'
+
+    def __init__(self, owner, path, url=None):
+        super(File, self).__init__(owner)
+        self.path = path
+        self.url = url
+
+    def __str__(self):
+        return '<{}\'s {} {}>'.format(self.owner, self.name, self.path or self.url)
+
+
+class PluginAsset(File):
+
+    name = 'plugin_asset'
+
+    def __init__(self, owner, path):
+        super(PluginAsset, self).__init__(owner, os.path.join(owner.name, path))
+
+
+class Executable(FileResource):
+
+    name = 'executable'
+
+    def __init__(self, owner, platform, filename):
+        super(Executable, self).__init__(owner)
+        self.platform = platform
+        self.filename = filename
+
+    def __str__(self):
+        return '<{}\'s {} {}>'.format(self.owner, self.platform, self.filename)
+
+class ReventFile(FileResource):
+
+    name = 'revent'
+
+    def __init__(self, owner, stage):
+        super(ReventFile, self).__init__(owner)
+        self.stage = stage
+
+
+class JarFile(FileResource):
+
+    name = 'jar'
+
+
+class ApkFile(FileResource):
+
+    name = 'apk'
+
+    def __init__(self, owner, version):
+        super(ApkFile, self).__init__(owner)
+        self.version = version
+
+
 class ResourceGetter(Plugin):
     """
     Base class for implementing resolvers. Defines resolver
@@ -201,18 +285,20 @@ class ResourceResolver(object):
 
     """
 
-    def __init__(self):
-        self.logger = logging.getLogger('resolver')
+    def __init__(self, config):
+        self.logger = logging.getLogger(self.__class__.__name__)
         self.getters = defaultdict(prioritylist)
+        self.config = config
 
-    def load(self, loader=pluginloader):
+    def load(self):
         """
         Discover getters under the specified source. The source could
         be either a python package/module or a path.
 
         """
-        for rescls in loader.list_resource_getters():
-            getter = loader.get_resource_getter(rescls.name, resolver=self)
+
+        for rescls in pluginloader.list_resource_getters():
+            getter = self.config.get_plugin(name=rescls.name, kind="resource_getter", resolver=self)
             getter.register()
 
     def get(self, resource, strict=True, *args, **kwargs):
@@ -259,7 +345,7 @@ class ResourceResolver(object):
         means should register with lower (negative) priorities.
 
         """
-        self.logger.debug('Registering {}'.format(getter.name))
+        self.logger.debug('Registering {} for {} resources'.format(getter.name, kind))
         self.getters[kind].add(getter, priority)
 
     def unregister(self, getter, kind):
@@ -273,420 +359,6 @@ class ResourceResolver(object):
         except ValueError:
             raise ValueError('Resource getter {} is not installed.'.format(getter.name))
 
-
-class __NullOwner(object):
-    """Represents an owner for a resource not owned by anyone."""
-
-    name = 'noone'
-    dependencies_directory = settings.dependencies_directory
-
-    def __getattr__(self, name):
-        return None
-
-    def __str__(self):
-        return 'no-one'
-
-    __repr__ = __str__
-
-
-NO_ONE = __NullOwner()
-
-
-class FileResource(Resource):
-    """
-    Base class for all resources that are a regular file in the
-    file system.
-
-    """
-
-    def delete(self, instance):
-        os.remove(instance)
-
-
-class File(FileResource):
-
-    name = 'file'
-
-    def __init__(self, owner, path, url=None):
-        super(File, self).__init__(owner)
-        self.path = path
-        self.url = url
-
-    def __str__(self):
-        return '<{}\'s {} {}>'.format(self.owner, self.name, self.path or self.url)
-
-
-class ExtensionAsset(File):
-
-    name = 'extension_asset'
-
-    def __init__(self, owner, path):
-        super(ExtensionAsset, self).__init__(
-            owner, os.path.join(owner.name, path))
-
-
-class Executable(FileResource):
-
-    name = 'executable'
-
-    def __init__(self, owner, platform, filename):
-        super(Executable, self).__init__(owner)
-        self.platform = platform
-        self.filename = filename
-
-    def __str__(self):
-        return '<{}\'s {} {}>'.format(self.owner, self.platform, self.filename)
-
-
-class ReventFile(FileResource):
-
-    name = 'revent'
-
-    def __init__(self, owner, stage):
-        super(ReventFile, self).__init__(owner)
-        self.stage = stage
-
-
-class JarFile(FileResource):
-
-    name = 'jar'
-
-
-class ApkFile(FileResource):
-
-    name = 'apk'
-
-
-class PackageFileGetter(ResourceGetter):
-
-    name = 'package_file'
-    description = """
-    Looks for exactly one file with the specified extension in the owner's
-    directory. If a version is specified on invocation of get, it will filter
-    the discovered file based on that version.  Versions are treated as
-    case-insensitive.
-    """
-
-    extension = None
-
-    def register(self):
-        self.resolver.register(self, self.extension, GetterPriority.package)
-
-    def get(self, resource, **kwargs):
-        resource_dir = os.path.dirname(
-            sys.modules[resource.owner.__module__].__file__)
-        version = kwargs.get('version')
-        return get_from_location_by_extension(resource, resource_dir, self.extension, version)
-
-
-class EnvironmentFileGetter(ResourceGetter):
-
-    name = 'environment_file'
-    description = """
-    Looks for exactly one file with the specified extension in the owner's
-    directory. If a version is specified on invocation of get, it will filter
-    the discovered file based on that version.  Versions are treated as
-    case-insensitive.
-    """
-
-    extension = None
-
-    def register(self):
-        self.resolver.register(self, self.extension,
-                               GetterPriority.environment)
-
-    def get(self, resource, **kwargs):
-        resource_dir = resource.owner.dependencies_directory
-        version = kwargs.get('version')
-        return get_from_location_by_extension(resource, resource_dir, self.extension, version)
-
-
-class ReventGetter(ResourceGetter):
-    """Implements logic for identifying revent files."""
-
-    def get_base_location(self, resource):
-        raise NotImplementedError()
-
-    def register(self):
-        self.resolver.register(self, 'revent', GetterPriority.package)
-
-    def get(self, resource, **kwargs):
-        filename = '.'.join([resource.owner.device.name,
-                             resource.stage, 'revent']).lower()
-        location = _d(os.path.join(
-            self.get_base_location(resource), 'revent_files'))
-        for candidate in os.listdir(location):
-            if candidate.lower() == filename.lower():
-                return os.path.join(location, candidate)
-
-
-class PackageApkGetter(PackageFileGetter):
-    name = 'package_apk'
-    extension = 'apk'
-
-
-class PackageJarGetter(PackageFileGetter):
-    name = 'package_jar'
-    extension = 'jar'
-
-
-class PackageReventGetter(ReventGetter):
-
-    name = 'package_revent'
-
-    def get_base_location(self, resource):
-        return _get_owner_path(resource)
-
-
-class EnvironmentApkGetter(EnvironmentFileGetter):
-    name = 'environment_apk'
-    extension = 'apk'
-
-
-class EnvironmentJarGetter(EnvironmentFileGetter):
-    name = 'environment_jar'
-    extension = 'jar'
-
-
-class EnvironmentReventGetter(ReventGetter):
-
-    name = 'enviroment_revent'
-
-    def get_base_location(self, resource):
-        return resource.owner.dependencies_directory
-
-
-class ExecutableGetter(ResourceGetter):
-
-    name = 'exe_getter'
-    resource_type = 'executable'
-    priority = GetterPriority.environment
-
-    def get(self, resource, **kwargs):
-        if settings.binaries_repository:
-            path = os.path.join(settings.binaries_repository,
-                                resource.platform, resource.filename)
-            if os.path.isfile(path):
-                return path
-
-
-class PackageExecutableGetter(ExecutableGetter):
-
-    name = 'package_exe_getter'
-    priority = GetterPriority.package
-
-    def get(self, resource, **kwargs):
-        path = os.path.join(_get_owner_path(resource), 'bin',
-                            resource.platform, resource.filename)
-        if os.path.isfile(path):
-            return path
-
-
-class EnvironmentExecutableGetter(ExecutableGetter):
-
-    name = 'env_exe_getter'
-
-    def get(self, resource, **kwargs):
-        paths = [
-            os.path.join(resource.owner.dependencies_directory, 'bin',
-                         resource.platform, resource.filename),
-            os.path.join(settings.environment_root, 'bin',
-                         resource.platform, resource.filename),
-        ]
-        for path in paths:
-            if os.path.isfile(path):
-                return path
-
-
-class DependencyFileGetter(ResourceGetter):
-
-    name = 'filer'
-    description = """
-    Gets resources from the specified mount point. Copies them the local dependencies
-    directory, and returns the path to the local copy.
-
-    """
-    resource_type = 'file'
-    relative_path = ''  # May be overridden by subclasses.
-
-    default_mount_point = '/'
-    priority = GetterPriority.remote
-
-    parameters = [
-        Parameter('mount_point', default='/', global_alias='filer_mount_point',
-                  description='Local mount point for the remote filer.'),
-    ]
-
-    def __init__(self, resolver, **kwargs):
-        super(DependencyFileGetter, self).__init__(resolver, **kwargs)
-        self.mount_point = settings.filer_mount_point or self.default_mount_point
-
-    def get(self, resource, **kwargs):
-        force = kwargs.get('force')
-        remote_path = os.path.join(
-            self.mount_point, self.relative_path, resource.path)
-        local_path = os.path.join(
-            resource.owner.dependencies_directory, os.path.basename(resource.path))
-
-        if not os.path.isfile(local_path) or force:
-            if not os.path.isfile(remote_path):
-                return None
-            self.logger.debug('Copying {} to {}'.format(
-                remote_path, local_path))
-            shutil.copy(remote_path, local_path)
-
-        return local_path
-
-
-class PackageCommonDependencyGetter(ResourceGetter):
-
-    name = 'packaged_common_dependency'
-    resource_type = 'file'
-    priority = GetterPriority.package - 1  # check after owner-specific locations
-
-    def get(self, resource, **kwargs):
-        path = os.path.join(settings.package_directory,
-                            'common', resource.path)
-        if os.path.exists(path):
-            return path
-
-
-class EnvironmentCommonDependencyGetter(ResourceGetter):
-
-    name = 'environment_common_dependency'
-    resource_type = 'file'
-    # check after owner-specific locations
-    priority = GetterPriority.environment - 1
-
-    def get(self, resource, **kwargs):
-        path = os.path.join(settings.dependencies_directory,
-                            os.path.basename(resource.path))
-        if os.path.exists(path):
-            return path
-
-
-class PackageDependencyGetter(ResourceGetter):
-
-    name = 'packaged_dependency'
-    resource_type = 'file'
-    priority = GetterPriority.package
-
-    def get(self, resource, **kwargs):
-        owner_path = inspect.getfile(resource.owner.__class__)
-        path = os.path.join(os.path.dirname(owner_path), resource.path)
-        if os.path.exists(path):
-            return path
-
-
-class EnvironmentDependencyGetter(ResourceGetter):
-
-    name = 'environment_dependency'
-    resource_type = 'file'
-    priority = GetterPriority.environment
-
-    def get(self, resource, **kwargs):
-        path = os.path.join(resource.owner.dependencies_directory,
-                            os.path.basename(resource.path))
-        if os.path.exists(path):
-            return path
-
-
-class ExtensionAssetGetter(DependencyFileGetter):
-
-    name = 'extension_asset'
-    resource_type = 'extension_asset'
-    relative_path = 'workload_automation/assets'
-
-
-class RemoteFilerGetter(ResourceGetter):
-
-    name = 'filer_assets'
-    description = """
-    Finds resources on a (locally mounted) remote filer and caches them locally.
-
-    This assumes that the filer is mounted on the local machine (e.g. as a samba share).
-
-    """
-    priority = GetterPriority.remote
-    resource_type = ['apk', 'file', 'jar', 'revent']
-
-    parameters = [
-        Parameter('remote_path', global_alias='remote_assets_path', default='',
-                  description="""
-                  Path, on the local system, where the assets are located.
-                  """),
-        Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets',
-                  description="""
-                  If ``True``, will always attempt to fetch assets from the
-                  remote, even if a local cached copy is available.
-                  """),
-    ]
-
-    def get(self, resource, **kwargs):
-        version = kwargs.get('version')
-        if resource.owner:
-            remote_path = os.path.join(self.remote_path, resource.owner.name)
-            local_path = os.path.join(
-                settings.environment_root, resource.owner.dependencies_directory)
-            return self.try_get_resource(resource, version, remote_path, local_path)
-        else:
-            result = None
-            for entry in os.listdir(remote_path):
-                remote_path = os.path.join(self.remote_path, entry)
-                local_path = os.path.join(
-                    settings.environment_root, settings.dependencies_directory, entry)
-                result = self.try_get_resource(
-                    resource, version, remote_path, local_path)
-                if result:
-                    break
-            return result
-
-    def try_get_resource(self, resource, version, remote_path, local_path):
-        if not self.always_fetch:
-            result = self.get_from(resource, version, local_path)
-            if result:
-                return result
-        if remote_path:
-            # Didn't find it cached locally; now check the remoted
-            result = self.get_from(resource, version, remote_path)
-            if not result:
-                return result
-        else:  # remote path is not set
-            return None
-        # Found it remotely, cache locally, then return it
-        local_full_path = os.path.join(
-            _d(local_path), os.path.basename(result))
-        self.logger.debug('cp {} {}'.format(result, local_full_path))
-        shutil.copy(result, local_full_path)
-        return local_full_path
-
-    def get_from(self, resource, version, location):  # pylint: disable=no-self-use
-        if resource.name in ['apk', 'jar']:
-            return get_from_location_by_extension(resource, location, resource.name, version)
-        elif resource.name == 'file':
-            filepath = os.path.join(location, resource.path)
-            if os.path.exists(filepath):
-                return filepath
-        elif resource.name == 'revent':
-            filename = '.'.join(
-                [resource.owner.device.name, resource.stage, 'revent']).lower()
-            alternate_location = os.path.join(location, 'revent_files')
-            # There tends to be some confusion as to where revent files should
-            # be placed. This looks both in the extension's directory, and in
-            # 'revent_files' subdirectory under it, if it exists.
-            if os.path.isdir(alternate_location):
-                for candidate in os.listdir(alternate_location):
-                    if candidate.lower() == filename.lower():
-                        return os.path.join(alternate_location, candidate)
-            if os.path.isdir(location):
-                for candidate in os.listdir(location):
-                    if candidate.lower() == filename.lower():
-                        return os.path.join(location, candidate)
-        else:
-            message = 'Unexpected resource type: {}'.format(resource.name)
-            raise ValueError(message)
-
-
 # Utility functions
 
 def get_from_location_by_extension(resource, location, extension, version=None):
diff --git a/wa/framework/resource_getters.py b/wa/framework/resource_getters.py
new file mode 100644
index 00000000..2b49863d
--- /dev/null
+++ b/wa/framework/resource_getters.py
@@ -0,0 +1,510 @@
+#    Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+"""
+This module contains the standard set of resource getters used by Workload Automation.
+
+"""
+import os
+import sys
+import shutil
+import inspect
+import httplib
+import logging
+import json
+
+import requests
+
+from wa import Parameter, settings, __file__ as __base_filepath
+from wa.framework.resource import ResourceGetter, GetterPriority, NO_ONE 
+from wa.framework.exception import ResourceError
+from wa.utils.misc import (ensure_directory_exists as _d, 
+                           ensure_file_directory_exists as _f, sha256, urljoin)
+from wa.utils.types import boolean
+
+
+logging.getLogger("requests").setLevel(logging.WARNING)
+logging.getLogger("urllib3").setLevel(logging.WARNING)
+
+
+class PackageFileGetter(ResourceGetter):
+
+    name = 'package_file'
+    description = """
+    Looks for exactly one file with the specified plugin in the owner's directory. If a version
+    is specified on invocation of get, it will filter the discovered file based on that version.
+    Versions are treated as case-insensitive.
+    """
+
+    plugin = None
+
+    def register(self):
+        self.resolver.register(self, self.plugin, GetterPriority.package)
+
+    def get(self, resource, **kwargs):
+        resource_dir = os.path.dirname(sys.modules[resource.owner.__module__].__file__)
+        version = kwargs.get('version')
+        return get_from_location_by_plugin(resource, resource_dir, self.plugin, version)
+
+
+class EnvironmentFileGetter(ResourceGetter):
+
+    name = 'environment_file'
+    description = """Looks for exactly one file with the specified plugin in the owner's directory. If a version
+    is specified on invocation of get, it will filter the discovered file based on that version.
+    Versions are treated as case-insensitive."""
+
+    plugin = None
+
+    def register(self):
+        self.resolver.register(self, self.plugin, GetterPriority.environment)
+
+    def get(self, resource, **kwargs):
+        resource_dir = resource.owner.dependencies_directory
+
+        version = kwargs.get('version')
+        return get_from_location_by_plugin(resource, resource_dir, self.plugin, version)
+
+
+class ReventGetter(ResourceGetter):
+    """Implements logic for identifying revent files."""
+
+    def get_base_location(self, resource):
+        raise NotImplementedError()
+
+    def register(self):
+        self.resolver.register(self, 'revent', GetterPriority.package)
+
+    def get(self, resource, **kwargs):
+        filename = '.'.join([resource.owner.device.model, resource.stage, 'revent']).lower()
+        location = _d(os.path.join(self.get_base_location(resource), 'revent_files'))
+        for candidate in os.listdir(location):
+            if candidate.lower() == filename.lower():
+                return os.path.join(location, candidate)
+
+
+class PackageApkGetter(PackageFileGetter):
+    name = 'package_apk'
+    plugin = 'apk'
+
+
+class PackageJarGetter(PackageFileGetter):
+    name = 'package_jar'
+    plugin = 'jar'
+
+
+class PackageReventGetter(ReventGetter):
+
+    name = 'package_revent'
+
+    def get_base_location(self, resource):
+        return get_owner_path(resource)
+
+
+class EnvironmentApkGetter(EnvironmentFileGetter):
+    name = 'environment_apk'
+    plugin = 'apk'
+
+
+class EnvironmentJarGetter(EnvironmentFileGetter):
+    name = 'environment_jar'
+    plugin = 'jar'
+
+
+class EnvironmentReventGetter(ReventGetter):
+
+    name = 'enviroment_revent'
+
+    def get_base_location(self, resource):
+        return resource.owner.dependencies_directory
+
+
+class ExecutableGetter(ResourceGetter):
+
+    name = 'exe_getter'
+    resource_type = 'executable'
+    priority = GetterPriority.environment
+
+    def get(self, resource, **kwargs):
+        if settings.assets_repository:
+            path = os.path.join(settings.assets_repository, resource.platform, resource.filename)
+            if os.path.isfile(path):
+                return path
+
+
+class PackageExecutableGetter(ExecutableGetter):
+
+    name = 'package_exe_getter'
+    priority = GetterPriority.package
+
+    def get(self, resource, **kwargs):
+        path = os.path.join(get_owner_path(resource), 'bin', resource.platform, resource.filename)
+        if os.path.isfile(path):
+            return path
+
+
+class EnvironmentExecutableGetter(ExecutableGetter):
+
+    name = 'env_exe_getter'
+
+    def get(self, resource, **kwargs):
+        paths = [
+            os.path.join(resource.owner.dependencies_directory, 'bin',
+                         resource.platform, resource.filename),
+            os.path.join(settings.user_directory, 'bin',
+                         resource.platform, resource.filename),
+        ]
+        for path in paths:
+            if os.path.isfile(path):
+                return path
+
+
+class DependencyFileGetter(ResourceGetter):
+
+    name = 'filer'
+    description = """
+    Gets resources from the specified mount point. Copies them the local dependencies
+    directory, and returns the path to the local copy.
+
+    """
+    resource_type = 'file'
+    relative_path = ''  # May be overridden by subclasses.
+
+    priority = GetterPriority.remote
+
+    parameters = [
+        Parameter('mount_point', default='/', global_alias='remote_assets_path',
+                  description='Local mount point for the remote filer.'),
+    ]
+
+    def __init__(self, resolver, **kwargs):
+        super(DependencyFileGetter, self).__init__(resolver, **kwargs)
+
+    def get(self, resource, **kwargs):
+        force = kwargs.get('force')
+        remote_path = os.path.join(self.mount_point, self.relative_path, resource.path)
+        local_path = os.path.join(resource.owner.dependencies_directory, os.path.basename(resource.path))
+
+        if not os.path.isfile(local_path) or force:
+            if not os.path.isfile(remote_path):
+                return None
+            self.logger.debug('Copying {} to {}'.format(remote_path, local_path))
+            shutil.copy(remote_path, local_path)
+
+        return local_path
+
+
+class PackageCommonDependencyGetter(ResourceGetter):
+
+    name = 'packaged_common_dependency'
+    resource_type = 'file'
+    priority = GetterPriority.package - 1  # check after owner-specific locations
+
+    def get(self, resource, **kwargs):
+        path = os.path.join(settings.package_directory, 'common', resource.path)
+        if os.path.exists(path):
+            return path
+
+
+class EnvironmentCommonDependencyGetter(ResourceGetter):
+
+    name = 'environment_common_dependency'
+    resource_type = 'file'
+    priority = GetterPriority.environment - 1  # check after owner-specific locations
+
+    def get(self, resource, **kwargs):
+        path = os.path.join(settings.dependencies_directory,
+                            os.path.basename(resource.path))
+        if os.path.exists(path):
+            return path
+
+
+class PackageDependencyGetter(ResourceGetter):
+
+    name = 'packaged_dependency'
+    resource_type = 'file'
+    priority = GetterPriority.package
+
+    def get(self, resource, **kwargs):
+        owner_path = inspect.getfile(resource.owner.__class__)
+        path = os.path.join(os.path.dirname(owner_path), resource.path)
+        if os.path.exists(path):
+            return path
+
+
+class EnvironmentDependencyGetter(ResourceGetter):
+
+    name = 'environment_dependency'
+    resource_type = 'file'
+    priority = GetterPriority.environment
+
+    def get(self, resource, **kwargs):
+        path = os.path.join(resource.owner.dependencies_directory, os.path.basename(resource.path))
+        if os.path.exists(path):
+            return path
+
+
+class PluginAssetGetter(DependencyFileGetter):
+
+    name = 'plugin_asset'
+    resource_type = 'plugin_asset'
+
+
+class HttpGetter(ResourceGetter):
+
+    name = 'http_assets'
+    description = """
+    Downloads resources from a server based on an index fetched from the specified URL.
+
+    Given a URL, this will try to fetch ``<URL>/index.json``. The index file maps plugin
+    names to a list of corresponing asset descriptons. Each asset description continas a path
+    (relative to the base URL) of the resource and a SHA256 hash, so that this Getter can
+    verify whether the resource on the remote has changed.
+
+    For example, let's assume we want to get the APK file for workload "foo", and that
+    assets are hosted at ``http://example.com/assets``. This Getter will first try to
+    donwload ``http://example.com/assests/index.json``. The index file may contian
+    something like ::
+
+        {
+            "foo": [
+                {
+                    "path": "foo-app.apk",
+                    "sha256": "b14530bb47e04ed655ac5e80e69beaa61c2020450e18638f54384332dffebe86"
+                },
+                {
+                    "path": "subdir/some-other-asset.file",
+                    "sha256": "48d9050e9802246d820625717b72f1c2ba431904b8484ca39befd68d1dbedfff"
+                }
+            ]
+        }
+
+    This Getter will look through the list of assets for "foo" (in this case, two) check
+    the paths until it finds one matching the resource (in this case, "foo-app.apk").
+    Finally, it will try to dowload that file relative to the base URL and plugin name
+    (in this case, "http://example.com/assets/foo/foo-app.apk"). The downloaded version
+    will be cached locally, so that in the future, the getter will check the SHA256 hash
+    of the local file against the one advertised inside index.json, and provided that hasn't
+    changed, it won't try to download the file again.
+
+    """
+    priority = GetterPriority.remote
+    resource_type = ['apk', 'file', 'jar', 'revent']
+
+    parameters = [
+        Parameter('url', global_alias='remote_assets_url',
+                  description="""URL of the index file for assets on an HTTP server."""),
+        Parameter('username',
+                  description="""User name for authenticating with assets URL"""),
+        Parameter('password',
+                  description="""Password for authenticationg with assets URL"""),
+        Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets',
+                  description="""If ``True``, will always attempt to fetch assets from the remote, even if
+                                 a local cached copy is available."""),
+        Parameter('chunk_size', kind=int, default=1024,
+                  description="""Chunk size for streaming large assets."""),
+    ]
+
+    def __init__(self, resolver, **kwargs):
+        super(HttpGetter, self).__init__(resolver, **kwargs)
+        self.index = None
+
+    def get(self, resource, **kwargs):
+        if not resource.owner:
+            return  # TODO: add support for unowned resources
+        if not self.index:
+            self.index = self.fetch_index()
+        asset = self.resolve_resource(resource)
+        if not asset:
+            return
+        return self.download_asset(asset, resource.owner.name)
+
+    def fetch_index(self):
+        if not self.url:
+            return {}
+        index_url = urljoin(self.url, 'index.json')
+        response = self.geturl(index_url)
+        if response.status_code != httplib.OK:
+            message = 'Could not fetch "{}"; recieved "{} {}"'
+            self.logger.error(message.format(index_url, response.status_code, response.reason))
+            return {}
+        return json.loads(response.content)
+
+    def download_asset(self, asset, owner_name):
+        url = urljoin(self.url, owner_name, asset['path'])
+        local_path = _f(os.path.join(settings.dependencies_directory, '__remote',
+                                     owner_name, asset['path'].replace('/', os.sep)))
+        if os.path.isfile(local_path) and not self.always_fetch:
+            local_sha = sha256(local_path)
+            if local_sha == asset['sha256']:
+                self.logger.debug('Local SHA256 matches; not re-downloading')
+                return local_path
+        self.logger.debug('Downloading {}'.format(url))
+        response = self.geturl(url, stream=True)
+        if response.status_code != httplib.OK:
+            message = 'Could not download asset "{}"; recieved "{} {}"'
+            self.logger.warning(message.format(url, response.status_code, response.reason))
+            return
+        with open(local_path, 'wb') as wfh:
+            for chunk in response.iter_content(chunk_size=self.chunk_size):
+                wfh.write(chunk)
+        return local_path
+
+    def geturl(self, url, stream=False):
+        if self.username:
+            auth = (self.username, self.password)
+        else:
+            auth = None
+        return requests.get(url, auth=auth, stream=stream)
+
+    def resolve_resource(self, resource):
+        assets = self.index.get(resource.owner.name, {})
+        if not assets:
+            return {}
+        if resource.name in ['apk', 'jar']:
+            paths = [a['path'] for a in assets]
+            version = getattr(resource, 'version', None)
+            found = get_from_list_by_plugin(resource, paths, resource.name, version)
+            if found:
+                for a in assets:
+                    if a['path'] == found:
+                        return a
+        elif resource.name == 'revent':
+            filename = '.'.join([resource.owner.device.name, resource.stage, 'revent']).lower()
+            for asset in assets:
+                pathname = os.path.basename(asset['path']).lower()
+                if pathname == filename:
+                    return asset
+        else:  # file
+            for asset in assets:
+                if asset['path'].lower() == resource.path.lower():
+                    return asset
+
+
+class RemoteFilerGetter(ResourceGetter):
+
+    name = 'filer_assets'
+    description = """
+    Finds resources on a (locally mounted) remote filer and caches them locally.
+
+    This assumes that the filer is mounted on the local machine (e.g. as a samba share).
+
+    """
+    priority = GetterPriority.remote
+    resource_type = ['apk', 'file', 'jar', 'revent']
+
+    parameters = [
+        Parameter('remote_path', global_alias='remote_assets_path', default='',
+                  description="""Path, on the local system, where the assets are located."""),
+        Parameter('always_fetch', kind=boolean, default=False, global_alias='always_fetch_remote_assets',
+                  description="""If ``True``, will always attempt to fetch assets from the remote, even if
+                                 a local cached copy is available."""),
+    ]
+
+    def get(self, resource, **kwargs):
+        version = kwargs.get('version')
+        if resource.owner:
+            remote_path = os.path.join(self.remote_path, resource.owner.name)
+            local_path = os.path.join(settings.user_directory, '__filer', resource.owner.dependencies_directory)
+            return self.try_get_resource(resource, version, remote_path, local_path)
+        else:
+            result = None
+            for entry in os.listdir(remote_path):
+                remote_path = os.path.join(self.remote_path, entry)
+                local_path = os.path.join(settings.user_directory, '__filer', settings.dependencies_directory, entry)
+                result = self.try_get_resource(resource, version, remote_path, local_path)
+                if result:
+                    break
+            return result
+
+    def try_get_resource(self, resource, version, remote_path, local_path):
+        if not self.always_fetch:
+            result = self.get_from(resource, version, local_path)
+            if result:
+                return result
+        if remote_path:
+            # Didn't find it cached locally; now check the remoted
+            result = self.get_from(resource, version, remote_path)
+            if not result:
+                return result
+        else:  # remote path is not set
+            return None
+        # Found it remotely, cache locally, then return it
+        local_full_path = os.path.join(_d(local_path), os.path.basename(result))
+        self.logger.debug('cp {} {}'.format(result, local_full_path))
+        shutil.copy(result, local_full_path)
+        return local_full_path
+
+    def get_from(self, resource, version, location):  # pylint: disable=no-self-use
+        if resource.name in ['apk', 'jar']:
+            return get_from_location_by_plugin(resource, location, resource.name, version)
+        elif resource.name == 'file':
+            filepath = os.path.join(location, resource.path)
+            if os.path.exists(filepath):
+                return filepath
+        elif resource.name == 'revent':
+            filename = '.'.join([resource.owner.device.model, resource.stage, 'revent']).lower()
+            alternate_location = os.path.join(location, 'revent_files')
+            # There tends to be some confusion as to where revent files should
+            # be placed. This looks both in the plugin's directory, and in
+            # 'revent_files' subdirectory under it, if it exists.
+            if os.path.isdir(alternate_location):
+                for candidate in os.listdir(alternate_location):
+                    if candidate.lower() == filename.lower():
+                        return os.path.join(alternate_location, candidate)
+            if os.path.isdir(location):
+                for candidate in os.listdir(location):
+                    if candidate.lower() == filename.lower():
+                        return os.path.join(location, candidate)
+        else:
+            raise ValueError('Unexpected resource type: {}'.format(resource.name))
+
+
+# Utility functions
+
+def get_from_location_by_plugin(resource, location, plugin, version=None):
+    try:
+        found_files = [os.path.join(location, f) for f in os.listdir(location)]
+    except OSError:
+        return None
+    try:
+        return get_from_list_by_plugin(resource, found_files, plugin, version)
+    except ResourceError:
+        raise ResourceError('More than one .{} found in {} for {}.'.format(plugin,
+                                                                           location,
+                                                                           resource.owner.name))
+
+
+def get_from_list_by_plugin(resource, filelist, plugin, version=None):
+    filelist = [ff for ff in filelist
+                if os.path.splitext(ff)[1].lower().endswith(plugin)]
+    if version:
+        filelist = [ff for ff in filelist if version.lower() in os.path.basename(ff).lower()]
+    if len(filelist) == 1:
+        return filelist[0]
+    elif not filelist:
+        return None
+    else:
+        raise ResourceError('More than one .{} found in {} for {}.'.format(plugin,
+                                                                           filelist,
+                                                                           resource.owner.name))
+
+
+def get_owner_path(resource):
+    if resource.owner is NO_ONE:
+        return os.path.join(os.path.dirname(__base_filepath), 'common')
+    else:
+        return os.path.dirname(sys.modules[resource.owner.__module__].__file__)
diff --git a/wa/framework/run.py b/wa/framework/run.py
index 90e717e9..72aacffd 100644
--- a/wa/framework/run.py
+++ b/wa/framework/run.py
@@ -26,7 +26,7 @@ from wa.framework.exception import JobError
 from wa.utils import counter
 from wa.utils.serializer import json
 from wa.utils.misc import ensure_directory_exists as _d
-from wa.utils.types import TreeNode, caseless_string
+from wa.utils.types import caseless_string
 
 
 
diff --git a/wa/framework/signal.py b/wa/framework/signal.py
index 1f9a5024..dd19a5e5 100644
--- a/wa/framework/signal.py
+++ b/wa/framework/signal.py
@@ -45,11 +45,14 @@ class Signal(object):
             :param name: name is the identifier of the Signal object. Signal instances with
                         the same name refer to the same execution stage/stage.
             :param invert_priority: boolean parameter that determines whether multiple
-                                    callbacks for the same signal should be ordered with
-                                    ascending or descending priorities. Typically this flag
-                                    should be set to True if the Signal is triggered AFTER an
-                                    a state/stage has been reached. That way callbacks with high
-                                    priorities will be called right after the event has occured.
+                                    callbacks for the same signal should be
+                                    ordered with ascending or descending
+                                    priorities. Typically this flag should be
+                                    set to True if the Signal is triggered
+                                    AFTER an a state/stage has been reached.
+                                    That way callbacks with high priorities
+                                    will be called right after the event has
+                                    occured.
         """
         self.name = name
         self.description = description
@@ -94,6 +97,10 @@ WARNING_LOGGED = Signal('warning-logged')
 # even if there is an error, so you cannot assume in the handler that the
 # device has booted successfully. In most cases, you should instead use the
 # non-paired signals below.
+BEFORE_RUN_INIT = Signal('before-run-init', invert_priority=True)
+SUCCESSFUL_RUN_INIT = Signal('successful-run-init')
+AFTER_RUN_INIT = Signal('after-run-init')
+
 BEFORE_FLASHING = Signal('before-flashing', invert_priority=True)
 SUCCESSFUL_FLASHING = Signal('successful-flashing')
 AFTER_FLASHING = Signal('after-flashing')
diff --git a/wa/framework/target.py b/wa/framework/target.py
new file mode 100644
index 00000000..fa9323cd
--- /dev/null
+++ b/wa/framework/target.py
@@ -0,0 +1,80 @@
+import string
+from copy import copy
+
+from devlib import Platform, AndroidTarget
+
+
+class TargetInfo(object):
+
+    @staticmethod
+    def from_pod(pod):
+        instance = TargetInfo()
+        instance.target = pod['target']
+        instance.abi = pod['abi']
+        instance.cpuinfo = Cpuinfo(pod['cpuinfo'])
+        instance.os = pod['os']
+        instance.os_version = pod['os_version']
+        instance.abi = pod['abi']
+        instance.is_rooted = pod['is_rooted']
+        instance.kernel_version = KernelVersion(pod['kernel_release'], 
+                                                pod['kernel_version'])
+        instance.kernel_config = KernelConfig(pod['kernel_config'])
+
+        if pod["target"] == "AndroidTarget":
+            instance.screen_resolution = pod['screen_resolution']
+            instance.prop = pod['prop']
+            instance.prop = pod['android_id']
+
+        return instance
+
+    def __init__(self, target=None):
+        if target:
+            self.target = target.__class__.__name__
+            self.cpuinfo = target.cpuinfo
+            self.os = target.os
+            self.os_version = target.os_version
+            self.abi = target.abi
+            self.is_rooted = target.is_rooted
+            self.kernel_version = target.kernel_version
+            self.kernel_config = target.config
+
+            if isinstance(target, AndroidTarget):
+                self.screen_resolution = target.screen_resolution
+                self.prop = target.getprop()
+                self.android_id = target.android_id
+
+        else:
+            self.target = None
+            self.cpuinfo = None
+            self.os = None
+            self.os_version = None
+            self.abi = None
+            self.is_rooted = None
+            self.kernel_version = None
+            self.kernel_config = None
+
+            if isinstance(target, AndroidTarget):
+                self.screen_resolution = None
+                self.prop = None
+                self.android_id = None
+
+    def to_pod(self):
+        pod = {}
+        pod['target'] = self.target
+        pod['abi'] = self.abi
+        pod['cpuinfo'] = self.cpuinfo.sections
+        pod['os'] = self.os
+        pod['os_version'] = self.os_version
+        pod['abi'] = self.abi
+        pod['is_rooted'] = self.is_rooted
+        pod['kernel_release'] = self.kernel_version.release
+        pod['kernel_version'] = self.kernel_version.version
+        pod['kernel_config'] = dict(self.kernel_config.iteritems())
+
+        if self.target == "AndroidTarget":
+            pod['screen_resolution'] = self.screen_resolution
+            pod['prop'] = self.prop
+            pod['android_id'] = self.android_id
+
+        return pod
+
diff --git a/wa/target/__init__.py b/wa/framework/target/__init__.py
similarity index 100%
rename from wa/target/__init__.py
rename to wa/framework/target/__init__.py
diff --git a/wa/target/config.py b/wa/framework/target/config.py
similarity index 100%
rename from wa/target/config.py
rename to wa/framework/target/config.py
diff --git a/wa/framework/target/descriptor.py b/wa/framework/target/descriptor.py
new file mode 100644
index 00000000..34966367
--- /dev/null
+++ b/wa/framework/target/descriptor.py
@@ -0,0 +1,252 @@
+from collections import OrderedDict
+from copy import copy
+
+from devlib import (LinuxTarget, AndroidTarget, LocalLinuxTarget,
+                    Platform, Juno, TC2, Gem5SimulationPlatform)
+
+from wa.framework import pluginloader
+from wa.framework.exception import PluginLoaderError
+from wa.framework.plugin import Plugin, Parameter
+from wa.utils.types import list_of_strings, list_of_ints
+
+
+def get_target_descriptions(loader=pluginloader):
+    targets = {}
+    for cls in loader.list_target_descriptors():
+        descriptor = cls()
+        for desc in descriptor.get_descriptions():
+            if desc.name in targets:
+                msg = 'Duplicate target "{}" returned by {} and {}'
+                prev_dtor = targets[desc.name].source
+                raise PluginLoaderError(msg.format(dsc.name, prev_dtor.name,
+                                                   descriptor.name))
+            targets[desc.name] = desc
+    return targets.values()
+
+
+class TargetDescription(object):
+
+    def __init__(self, name, source, description=None, target=None, platform=None, 
+                 conn=None, target_params=None, platform_params=None,
+                 conn_params=None):
+        self.name = name
+        self.source = source
+        self.description = description
+        self.target = target
+        self.platform = platform
+        self.connection = conn
+        self._set('target_params', target_params)
+        self._set('platform_params', platform_params)
+        self._set('conn_params', conn_params)
+
+    def _set(self, attr, vals):
+        if vals is None:
+            vals = {}
+        elif isiterable(vals):
+            if not hasattr(vals, 'iteritems'):
+                vals = {v.name: v for v in vals}
+        else:
+            msg = '{} must be iterable; got "{}"'
+            raise ValueError(msg.format(attr, vals))
+        setattr(self, attr, vals)
+
+
+class TargetDescriptor(Plugin):
+
+    kind = 'target_descriptor'
+
+    def get_descriptions(self):
+        return []
+
+
+COMMON_TARGET_PARAMS = [
+    Parameter('working_directory', kind=str,
+              description='''
+              On-target working directory that will be used by WA. This 
+              directory must be writable by the user WA logs in as without
+              the need for privilege elevation.
+              '''),
+    Parameter('executables_directory', kind=str,
+              description='''
+              On-target directory where WA will install its executable
+              binaries.  This location must allow execution. This location does
+              *not* need to be writable by unprivileged users or rooted devices
+              (WA will install with elevated privileges as necessary).
+              '''),
+    Parameter('modules', kind=list_of_strings,
+              description='''
+              A list of additional modules to be installed for the target.
+
+              ``devlib`` implements functionality for particular subsystems as
+              modules.  A number of "default" modules (e.g. for cpufreq
+              subsystem) are loaded automatically, unless explicitly disabled.
+              If additional modules need to be loaded, they may be specified
+              using this parameter.
+
+              Please see ``devlab`` documentation for information on the available
+              modules.
+              '''),
+]
+
+COMMON_PLATFORM_PARAMS = [
+    Parameter('core_names', kind=list_of_strings,
+              description='''
+              List of names of CPU cores in the order that they appear to the
+              kernel. If not specified, it will be inferred from the platform.
+              '''),
+    Parameter('core_clusters', kind=list_of_ints,
+              description='''
+              Cluster mapping corresponding to the cores in ``core_names``.
+              Cluster indexing starts at ``0``.  If not specified, this will be
+              inferred from ``core_names`` -- consecutive cores with the same
+              name will be assumed to share a cluster.
+              '''),
+    Parameter('big_core', kind=str,
+              description='''
+              The name of the big cores in a big.LITTLE system. If not
+              specified, this will be inferred, either from the name (if one of
+              the names in ``core_names`` matches known big cores), or by
+              assuming that the last cluster is big.
+              '''),
+    Parameter('model', kind=str,
+              description='''
+              Hardware model of the platform. If not specified, an attempt will
+              be made to read it from target.
+              '''),
+    Parameter('modules', kind=list_of_strings,
+              description='''
+              An additional list of modules to be loaded into the target.
+              '''),
+]
+
+VEXPRESS_PLATFORM_PARAMS = [
+    Parameter('serial_port', kind=str,
+              description='''
+              The serial device/port on the host for the initial connection to
+              the target (used for early boot, flashing, etc).
+              '''),
+    Parameter('baudrate', kind=int,
+              description='''
+              Baud rate for the serial connection.
+              '''),
+    Parameter('vemsd_mount', kind=str,
+              description='''
+              VExpress MicroSD card mount location. This is a MicroSD card in
+              the VExpress device that is mounted on the host via USB. The card
+              contains configuration files for the platform and firmware and
+              kernel images to be flashed.
+              '''),
+    Parameter('bootloader', kind=str,
+              allowed_values=['uefi', 'uefi-shell', 'u-boot', 'bootmon'],
+              description='''
+              Selects the bootloader mechanism used by the board. Depending on
+              firmware version, a number of possible boot mechanisms may be use.
+
+              Please see ``devlib`` documentation for descriptions.
+              '''),
+    Parameter('hard_reset_method', kind=str,
+              allowed_values=['dtr', 'reboottxt'],
+              description='''
+              There are a couple of ways to reset VersatileExpress board if the
+              software running on the board becomes unresponsive. Both require
+              configuration to be enabled (please see ``devlib`` documentation).
+
+              ``dtr``: toggle the DTR line on the serial connection
+              ``reboottxt``: create ``reboot.txt`` in the root of the VEMSD mount.
+
+              '''),
+]
+
+GEM5_PLATFORM_PARAMS = [
+    Parameter('host_output_dir', kind=str, mandatory=True,
+              description='''
+              Path on the host where gem5 output (e.g. stats file) will be placed.
+              '''),
+    Parameter('gem5_bin', kind=str, mandatory=True,
+              description='''
+              Path to the gem5 binary
+              '''),
+    Parameter('gem5_args', kind=str, mandatory=True,
+              description='''
+              Arguments to be passed to the gem5 binary
+              '''),
+    Parameter('gem5_virtio', kind=str, mandatory=True,
+              description='''
+              VirtIO device setup arguments to be passed to gem5. VirtIO is used
+              to transfer files between the simulation and the host.
+              '''),
+]
+
+# name --> (target_class, params_list, defaults)
+TARGETS = {
+    'linux': (LinuxTarget, COMMON_TARGET_PARAMS, None),
+    'android': (AndroidTarget, COMMON_TARGET_PARAMS +
+                [Parameter('package_data_directory', kind=str, default='/data/data',
+                           description='''
+                           Directory containing Android data
+                           '''),
+                ], None),
+    'local': (LocalLinuxTarget, COMMON_TARGET_PARAMS, None),
+}
+
+# name --> (platform_class, params_list, defaults)
+PLATFORMS = {
+    'generic': (Platform, COMMON_PLATFORM_PARAMS, None),
+    'juno': (Juno, COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
+            {
+                 'vemsd_mount': '/media/JUNO',
+                 'baudrate': 115200,
+                 'bootloader': 'u-boot',
+                 'hard_reset_method': 'dtr',
+            }),
+    'tc2': (TC2, COMMON_PLATFORM_PARAMS + VEXPRESS_PLATFORM_PARAMS,
+            {
+                 'vemsd_mount': '/media/VEMSD',
+                 'baudrate': 38400,
+                 'bootloader': 'bootmon',
+                 'hard_reset_method': 'reboottxt',
+            }),
+    'gem5': (Gem5SimulationPlatform, GEM5_PLATFORM_PARAMS, None),
+}
+
+
+class DefaultTargetDescriptor(TargetDescriptor):
+
+    name = 'devlib_targets'
+
+    description = """
+    The default target descriptor that provides descriptions in the form
+    <platform>_<target>.
+
+    These map directly onto ``Target``\ s and ``Platform``\ s supplied by ``devlib``.
+
+    """
+
+    def get_descriptions(self):
+        result = []
+        for target_name, target_tuple in TARGETS.iteritems():
+            target, target_params = self._get_item(target_tuple)
+            for platform_name, platform_tuple in PLATFORMS.iteritems():
+                platform, platform_params = self._get_item(platform_tuple)
+
+                name = '{}_{}'.format(platform_name, target_name)
+                td = TargetDescription(name, self)
+                td.target = target
+                td.platform = platform
+                td.target_params = target_params
+                td.platform_params = platform_params
+                result.append(td)
+        return result
+
+    def _get_item(self, item_tuple):
+        cls, params, defaults = item_tuple
+        if not defaults:
+            return cls, params
+
+        param_map = OrderedDict((p.name, copy(p)) for p in params)
+        for name, value in defaults.iteritems():
+            if name not in param_map:
+                raise ValueError('Unexpected default "{}"'.format(name))
+            param_map[name].default = value
+        return cls, param_map.values()
+
diff --git a/wa/framework/target/info.py b/wa/framework/target/info.py
new file mode 100644
index 00000000..4341e155
--- /dev/null
+++ b/wa/framework/target/info.py
@@ -0,0 +1,78 @@
+from devlib import AndroidTarget
+from devlib.exception import TargetError
+from devlib.target import KernelConfig, KernelVersion, Cpuinfo
+
+
+class TargetInfo(object):
+
+    @staticmethod
+    def from_pod(pod):
+        instance = TargetInfo()
+        instance.target = pod['target']
+        instance.abi = pod['abi']
+        instance.cpuinfo = Cpuinfo(pod['cpuinfo'])
+        instance.os = pod['os']
+        instance.os_version = pod['os_version']
+        instance.abi = pod['abi']
+        instance.is_rooted = pod['is_rooted']
+        instance.kernel_version = KernelVersion(pod['kernel_release'], 
+                                                pod['kernel_version'])
+        instance.kernel_config = KernelConfig(pod['kernel_config'])
+
+        if pod["target"] == "AndroidTarget":
+            instance.screen_resolution = pod['screen_resolution']
+            instance.prop = pod['prop']
+            instance.prop = pod['android_id']
+
+        return instance
+
+    def __init__(self, target=None):
+        if target:
+            self.target = target.__class__.__name__
+            self.cpuinfo = target.cpuinfo
+            self.os = target.os
+            self.os_version = target.os_version
+            self.abi = target.abi
+            self.is_rooted = target.is_rooted
+            self.kernel_version = target.kernel_version
+            self.kernel_config = target.config
+
+            if isinstance(target, AndroidTarget):
+                self.screen_resolution = target.screen_resolution
+                self.prop = target.getprop()
+                self.android_id = target.android_id
+
+        else:
+            self.target = None
+            self.cpuinfo = None
+            self.os = None
+            self.os_version = None
+            self.abi = None
+            self.is_rooted = None
+            self.kernel_version = None
+            self.kernel_config = None
+
+            if isinstance(target, AndroidTarget):
+                self.screen_resolution = None
+                self.prop = None
+                self.android_id = None
+
+    def to_pod(self):
+        pod = {}
+        pod['target'] = self.target
+        pod['abi'] = self.abi
+        pod['cpuinfo'] = self.cpuinfo.sections
+        pod['os'] = self.os
+        pod['os_version'] = self.os_version
+        pod['abi'] = self.abi
+        pod['is_rooted'] = self.is_rooted
+        pod['kernel_release'] = self.kernel_version.release
+        pod['kernel_version'] = self.kernel_version.version
+        pod['kernel_config'] = dict(self.kernel_config.iteritems())
+
+        if self.target == "AndroidTarget":
+            pod['screen_resolution'] = self.screen_resolution
+            pod['prop'] = self.prop
+            pod['android_id'] = self.android_id
+
+        return pod
diff --git a/wa/target/manager.py b/wa/framework/target/manager.py
similarity index 97%
rename from wa/target/manager.py
rename to wa/framework/target/manager.py
index 2ee09c34..659516d6 100644
--- a/wa/target/manager.py
+++ b/wa/framework/target/manager.py
@@ -6,28 +6,31 @@ import time
 import shutil
 import sys
 
-from wa.framework.plugin import Parameter
 from wa.framework import signal
 from wa.framework.exception import WorkerThreadError, ConfigError
-from wa.target.info import TargetInfo
-from wa.target.runtime_config import (SysfileValuesRuntimeConfig,
-                                      HotplugRuntimeConfig,
-                                      CpufreqRuntimeConfig,
-                                      CpuidleRuntimeConfig)
+from wa.framework.plugin import Parameter
+from wa.framework.target.info import TargetInfo
+from wa.framework.target.runtime_config import (SysfileValuesRuntimeConfig,
+                                                HotplugRuntimeConfig,
+                                                CpufreqRuntimeConfig,
+                                                CpuidleRuntimeConfig)
+from wa.utils.misc import isiterable
 from wa.utils.serializer import json
 
 
 from devlib import LocalLinuxTarget, LinuxTarget, AndroidTarget
 from devlib.utils.types import identifier
 # from wa.target.manager import AndroidTargetManager, LinuxTargetManager
-# from wa.framework.plugin import Plugin, Parameter
 
 
 class TargetManager(object):
+
     name = 'target-manager'
 
     description = """
-    Instanciated the required target and performs configuration and validation of the device.
+    Instanciated the required target and performs configuration and validation
+    of the device.
+
     """
 
     parameters = [
diff --git a/wa/target/runtime_config.py b/wa/framework/target/runtime_config.py
similarity index 100%
rename from wa/target/runtime_config.py
rename to wa/framework/target/runtime_config.py
diff --git a/wa/framework/workload.py b/wa/framework/workload.py
index b6defc03..850bceca 100644
--- a/wa/framework/workload.py
+++ b/wa/framework/workload.py
@@ -32,9 +32,10 @@ class Workload(TargetedPlugin):
 
     def init_resources(self, context):
         """
-        This method may be used to perform early resource discovery and initialization. This is invoked
-        during the initial loading stage and before the device is ready, so cannot be used for any
-        device-dependent initialization. This method is invoked before the workload instance is
+        This method may be used to perform early resource discovery and
+        initialization. This is invoked during the initial loading stage and
+        before the device is ready, so cannot be used for any device-dependent
+        initialization. This method is invoked before the workload instance is
         validated.
 
         """
@@ -59,7 +60,10 @@ class Workload(TargetedPlugin):
         pass
 
     def run(self, context):
-        """Execute the workload. This is the method that performs the actual "work" of the"""
+        """
+        Execute the workload. This is the method that performs the actual
+        "work" of the.
+        """
         pass
 
     def update_result(self, context):
@@ -83,7 +87,8 @@ class Workload(TargetedPlugin):
 
 class UiAutomatorGUI(object):
 
-    def __init__(self, target, package='', klass='UiAutomation', method='runUiAutoamtion'):
+    def __init__(self, target, package='', klass='UiAutomation',
+                 method='runUiAutoamtion'):
         self.target = target
         self.uiauto_package = package
         self.uiauto_class = klass
diff --git a/wa/target/info.py b/wa/target/info.py
deleted file mode 100644
index 75b00dea..00000000
--- a/wa/target/info.py
+++ /dev/null
@@ -1,85 +0,0 @@
-from devlib.exception import TargetError
-from devlib.target import KernelConfig, KernelVersion, Cpuinfo
-
-
-class TargetInfo(object):
-
-    hmp_config_dir = '/sys/kernel/hmp'
-
-    def __init__(self):
-        self.os = None
-        self.kernel_version = None
-        self.kernel_cmdline = None
-        self.kernel_config = {}
-        self.sched_features = []
-        self.cpuinfo = None
-        self.os_version = {}
-        self.properties = {}
-
-    @staticmethod
-    def from_pod(pod):
-        kconfig_text = '\n'.join('{}={}'.format(k, v) for k, v in pod['kernel_config'].iteritems())
-        sections = []
-        for section in pod['cpuinfo']:
-            text = '\n'.join('{} : {}'.format(k, v) for k, v in section.iteritems())
-            sections.append(text)
-        cpuinfo_text = '\n\n'.join(sections)
-
-        instance = TargetInfo()
-        instance.os = pod['os']
-        instance.kernel_version = KernelVersion(pod['kernel_version'])
-        instance.kernel_cmdline = pod['kernel_cmdline']
-        instance.kernel_config = KernelConfig(kconfig_text)
-        instance.sched_features = pod['sched_features']
-        instance.cpuinfo = Cpuinfo(cpuinfo_text)
-        instance.os_version = pod['os_version']
-        instance.properties = pod['properties']
-        return instance
-
-    def to_pod(self):
-        kversion = str(self.kernel_version)
-        kconfig = {k: v for k, v in self.kernel_config.iteritems()}
-        return dict(
-            os=self.os,
-            kernel_version=kversion,
-            kernel_cmdline=self.kernel_cmdline,
-            kernel_config=kconfig,
-            sched_features=self.sched_features,
-            cpuinfo=self.cpuinfo.sections,
-            os_version=self.os_version,
-            properties=self.properties,
-        )
-
-    def load(self, target):
-        self.os = target.os
-        print target.is_rooted
-        self.os_version = target.os_version
-        self.kernel_version = target.kernel_version
-        self.kernel_cmdline = target.execute('cat /proc/cmdline',
-                                             as_root=target.is_rooted).strip()
-        self.kernel_config = target.config
-        self.cpuinfo = target.cpuinfo
-        try:
-            output = target.read_value('/sys/kernel/debug/sched_features')
-            self.sched_features = output.strip().split()
-        except TargetError:
-            pass
-        self.properties = self._get_properties(target)
-
-    def _get_properties(self, target):
-        props = {}
-        if target.file_exists(self.hmp_config_dir):
-            props['hmp'] = self._get_hmp_configuration(target)
-        if target.os == 'android':
-            props.update(target.getprop().iteritems())
-        return props
-
-    def _get_hmp_configuration(self, target):
-        hmp_props = {}
-        for entry in target.list_directory(self.hmp_config_dir):
-            path = target.path.join(self.hmp_config_dir, entry)
-            try:
-                hmp_props[entry] = target.read_value(path)
-            except TargetError:
-                pass
-        return hmp_props
diff --git a/wa/utils/formatter.py b/wa/utils/formatter.py
new file mode 100644
index 00000000..de5af654
--- /dev/null
+++ b/wa/utils/formatter.py
@@ -0,0 +1,148 @@
+#    Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from wa.utils.terminalsize import get_terminal_size
+
+
+INDENTATION_FROM_TITLE = 4
+
+
+class TextFormatter(object):
+
+    """
+    This is a base class for text formatting. It mainly ask to implement two
+    methods which are add_item and format_data. The formar will add new text to
+    the formatter, whereas the latter will return a formatted text. The name
+    attribute represents the name of the foramtter.
+    """
+
+    name = None
+    data = None
+
+    def __init__(self):
+        pass
+
+    def add_item(self, new_data, item_title):
+        """
+        Add new item to the text formatter.
+
+        :param new_data: The data to be added
+        :param item_title: A title for the added data
+        """
+        raise NotImplementedError()
+
+    def format_data(self):
+        """
+        It returns a formatted text
+        """
+        raise NotImplementedError()
+
+
+class DescriptionListFormatter(TextFormatter):
+
+    name = 'description_list_formatter'
+    data = None
+
+    def get_text_width(self):
+        if not self._text_width:
+            self._text_width, _ = get_terminal_size()  # pylint: disable=unpacking-non-sequence
+        return self._text_width
+
+    def set_text_width(self, value):
+        self._text_width = value
+
+    text_width = property(get_text_width, set_text_width)
+
+    def __init__(self, title=None, width=None):
+        super(DescriptionListFormatter, self).__init__()
+        self.data_title = title
+        self._text_width = width
+        self.longest_word_length = 0
+        self.data = []
+
+    def add_item(self, new_data, item_title):
+        if len(item_title) > self.longest_word_length:
+            self.longest_word_length = len(item_title)
+        self.data[len(self.data):] = [(item_title, self._remove_newlines(new_data))]
+
+    def format_data(self):
+        parag_indentation = self.longest_word_length + INDENTATION_FROM_TITLE
+        string_formatter = '{}:<{}{} {}'.format('{', parag_indentation, '}', '{}')
+
+        formatted_data = ''
+        if self.data_title:
+            formatted_data += self.data_title
+
+        line_width = self.text_width - parag_indentation
+        for title, paragraph in self.data:
+            formatted_data += '\n'
+            title_len = self.longest_word_length - len(title)
+            title += ':'
+            if title_len > 0:
+                title = (' ' * title_len) + title
+
+            parag_lines = self._break_lines(paragraph, line_width).splitlines()
+            if parag_lines:
+                formatted_data += string_formatter.format(title, parag_lines[0])
+                for line in parag_lines[1:]:
+                    formatted_data += '\n' + string_formatter.format('', line)
+            else:
+                formatted_data += title[:-1]
+
+        self.text_width = None
+        return formatted_data
+
+    # Return text's paragraphs sperated in a list, such that each index in the
+    # list is a single text paragraph with no new lines
+    def _remove_newlines(self, new_data):  # pylint: disable=R0201
+        parag_list = ['']
+        parag_num = 0
+        prv_parag = None
+        # For each paragraph sperated by a new line
+        for paragraph in new_data.splitlines():
+            if paragraph:
+                parag_list[parag_num] += ' ' + paragraph
+            # if the previous line is NOT empty, then add new empty index for
+            # the next paragraph
+            elif prv_parag:
+                parag_num = 1
+                parag_list.append('')
+            prv_parag = paragraph
+
+        # sometimes, we end up with an empty string as the last item so we reomve it
+        if not parag_list[-1]:
+            return parag_list[:-1]
+        return parag_list
+
+    def _break_lines(self, parag_list, line_width):  # pylint: disable=R0201
+        formatted_paragraphs = []
+        for para in parag_list:
+            words = para.split()
+            if words:
+                formatted_text = words.pop(0)
+                current_width = len(formatted_text)
+                # for each word in the paragraph, line width is an accumlation of
+                # word length + 1 (1 is for the space after each word).
+                for word in words:
+                    word = word.strip()
+                    if current_width + len(word) + 1 >= line_width:
+                        formatted_text += '\n' + word
+                        current_width = len(word)
+                    else:
+                        formatted_text += ' ' + word
+                        current_width += len(word) + 1
+                formatted_paragraphs.append(formatted_text)
+        return '\n\n'.join(formatted_paragraphs)
diff --git a/wa/utils/log.py b/wa/utils/log.py
new file mode 100644
index 00000000..8dbe5f20
--- /dev/null
+++ b/wa/utils/log.py
@@ -0,0 +1,306 @@
+#    Copyright 2013-2015 ARM Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=E1101
+import logging
+import string
+import threading
+import subprocess
+
+import colorama
+
+from wa.framework import signal
+from wa.framework.exception import WAError
+from wa.utils.misc import get_traceback
+
+
+COLOR_MAP = {
+    logging.DEBUG: colorama.Fore.BLUE,
+    logging.INFO: colorama.Fore.GREEN,
+    logging.WARNING: colorama.Fore.YELLOW,
+    logging.ERROR: colorama.Fore.RED,
+    logging.CRITICAL: colorama.Style.BRIGHT + colorama.Fore.RED,
+}
+
+RESET_COLOR = colorama.Style.RESET_ALL
+
+_indent_level = 0
+_indent_width = 4
+_console_handler = None
+
+
+def init(verbosity=logging.INFO, color=True, indent_with=4,
+         regular_fmt='%(levelname)-8s %(message)s',
+         verbose_fmt='%(asctime)s %(levelname)-8s %(name)10.10s: %(message)s',
+         debug=False):
+    global _indent_width, _console_handler
+    _indent_width = indent_with
+    signal.log_error_func = lambda m: log_error(m, signal.logger)
+
+    root_logger = logging.getLogger()
+    root_logger.setLevel(logging.DEBUG)
+
+    error_handler = ErrorSignalHandler(logging.DEBUG)
+    root_logger.addHandler(error_handler)
+
+    _console_handler = logging.StreamHandler()
+    if color:
+        formatter = ColorFormatter
+    else:
+        formatter = LineFormatter
+    if verbosity:
+        _console_handler.setLevel(logging.DEBUG)
+        _console_handler.setFormatter(formatter(verbose_fmt))
+    else:
+        _console_handler.setLevel(logging.INFO)
+        _console_handler.setFormatter(formatter(regular_fmt))
+    root_logger.addHandler(_console_handler)
+    logging.basicConfig(level=logging.DEBUG)
+    if not debug:
+        logging.raiseExceptions = False
+
+
+def set_level(level):
+    _console_handler.setLevel(level)
+
+
+def add_file(filepath, level=logging.DEBUG,
+             fmt='%(asctime)s %(levelname)-8s %(name)s: %(message)-10.10s'):
+    root_logger = logging.getLogger()
+    file_handler = logging.FileHandler(filepath)
+    file_handler.setLevel(level)
+    file_handler.setFormatter(LineFormatter(fmt))
+    root_logger.addHandler(file_handler)
+
+
+def enable(logs):
+    if isinstance(logs, list):
+        for log in logs:
+            __enable_logger(log)
+    else:
+        __enable_logger(logs)
+
+
+def disable(logs):
+    if isinstance(logs, list):
+        for log in logs:
+            __disable_logger(log)
+    else:
+        __disable_logger(logs)
+
+
+def __enable_logger(logger):
+    if isinstance(logger, basestring):
+        logger = logging.getLogger(logger)
+    logger.propagate = True
+
+
+def __disable_logger(logger):
+    if isinstance(logger, basestring):
+        logger = logging.getLogger(logger)
+    logger.propagate = False
+
+
+def indent():
+    global _indent_level
+    _indent_level += 1
+
+
+def dedent():
+    global _indent_level
+    _indent_level -= 1
+
+
+def log_error(e, logger, critical=False):
+    """
+    Log the specified Exception as an error. The Error message will be formatted
+    differently depending on the nature of the exception.
+
+    :e: the error to log. should be an instance of ``Exception``
+    :logger: logger to be used.
+    :critical: if ``True``,  this error will be logged at ``logging.CRITICAL`` 
+               level, otherwise it will be logged as ``logging.ERROR``.
+    
+    """
+    if critical:
+        log_func = logger.critical
+    else:
+        log_func = logger.error
+
+    if isinstance(e, KeyboardInterrupt):
+        log_func('Got CTRL-C. Aborting.')
+    elif isinstance(e, WAError):
+        log_func(e)
+    elif isinstance(e, subprocess.CalledProcessError):
+        tb = get_traceback()
+        log_func(tb)
+        command = e.cmd
+        if e.args:
+            command = '{} {}'.format(command, ' '.join(e.args))
+        message = 'Command \'{}\' returned non-zero exit status {}\nOUTPUT:\n{}\n'
+        log_func(message.format(command, e.returncode, e.output))
+    elif isinstance(e, SyntaxError):
+        tb = get_traceback()
+        log_func(tb)
+        message = 'Syntax Error in {}, line {}, offset {}:'
+        log_func(message.format(e.filename, e.lineno, e.offset))
+        log_func('\t{}'.format(e.msg))
+    else:
+        tb = get_traceback()
+        log_func(tb)
+        log_func('{}({})'.format(e.__class__.__name__, e))
+
+
+class ErrorSignalHandler(logging.Handler):
+    """
+    Emits signals for ERROR and WARNING level traces.
+
+    """
+
+    def emit(self, record):
+        if record.levelno == logging.ERROR:
+            signal.send(signal.ERROR_LOGGED, self)
+        elif record.levelno == logging.WARNING:
+            signal.send(signal.WARNING_LOGGED, self)
+
+
+class LineFormatter(logging.Formatter):
+    """
+    Logs each line of the message separately.
+
+    """
+
+    def format(self, record):
+        record.message = record.getMessage()
+        if self.usesTime():
+            record.asctime = self.formatTime(record, self.datefmt)
+
+        indent = _indent_width * _indent_level
+        d = record.__dict__
+        parts = []
+        for line in record.message.split('\n'):
+            line = ' ' * indent + line
+            d.update({'message': line.strip('\r')})
+            parts.append(self._fmt % d)
+
+        return '\n'.join(parts)
+
+
+class ColorFormatter(LineFormatter):
+    """
+    Formats logging records with color and prepends record info
+    to each line of the message.
+
+        BLUE for DEBUG logging level
+        GREEN for INFO logging level
+        YELLOW for WARNING logging level
+        RED for ERROR logging level
+        BOLD RED for CRITICAL logging level
+
+    """
+
+    def __init__(self, fmt=None, datefmt=None):
+        super(ColorFormatter, self).__init__(fmt, datefmt)
+        template_text = self._fmt.replace('%(message)s', RESET_COLOR + '%(message)s${color}')
+        template_text = '${color}' + template_text + RESET_COLOR
+        self.fmt_template = string.Template(template_text)
+
+    def format(self, record):
+        self._set_color(COLOR_MAP[record.levelno])
+        return super(ColorFormatter, self).format(record)
+
+    def _set_color(self, color):
+        self._fmt = self.fmt_template.substitute(color=color)
+
+
+class BaseLogWriter(object):
+
+    def __init__(self, name, level=logging.DEBUG):
+        """
+        File-like object class designed to be used for logging from streams
+        Each complete line (terminated by new line character) gets logged
+        at DEBUG level. In complete lines are buffered until the next new line.
+
+        :param name: The name of the logger that will be used.
+
+        """
+        self.logger = logging.getLogger(name)
+        self.buffer = ''
+        if level == logging.DEBUG:
+            self.do_write = self.logger.debug
+        elif level == logging.INFO:
+            self.do_write = self.logger.info
+        elif level == logging.WARNING:
+            self.do_write = self.logger.warning
+        elif level == logging.ERROR:
+            self.do_write = self.logger.error
+        else:
+            raise Exception('Unknown logging level: {}'.format(level))
+
+    def flush(self):
+        # Defined to match the interface expected by pexpect.
+        return self
+
+    def close(self):
+        if self.buffer:
+            self.logger.debug(self.buffer)
+            self.buffer = ''
+        return self
+
+    def __del__(self):
+        # Ensure we don't lose bufferd output
+        self.close()
+
+
+class LogWriter(BaseLogWriter):
+
+    def write(self, data):
+        data = data.replace('\r\n', '\n').replace('\r', '\n')
+        if '\n' in data:
+            parts = data.split('\n')
+            parts[0] = self.buffer + parts[0]
+            for part in parts[:-1]:
+                self.do_write(part)
+            self.buffer = parts[-1]
+        else:
+            self.buffer += data
+        return self
+
+
+class LineLogWriter(BaseLogWriter):
+
+    def write(self, data):
+        self.do_write(data)
+
+
+class StreamLogger(threading.Thread):
+    """
+    Logs output from a stream in a thread.
+
+    """
+
+    def __init__(self, name, stream, level=logging.DEBUG, klass=LogWriter):
+        super(StreamLogger, self).__init__()
+        self.writer = klass(name, level)
+        self.stream = stream
+        self.daemon = True
+
+    def run(self):
+        line = self.stream.readline()
+        while line:
+            self.writer.write(line.rstrip('\n'))
+            line = self.stream.readline()
+        self.writer.close()
diff --git a/wa/utils/misc.py b/wa/utils/misc.py
index 2067f792..bb3f647d 100644
--- a/wa/utils/misc.py
+++ b/wa/utils/misc.py
@@ -24,7 +24,6 @@ import sys
 import re
 import math
 import imp
-import uuid
 import string
 import threading
 import signal
@@ -33,154 +32,28 @@ import pkgutil
 import traceback
 import logging
 import random
+import hashlib
 from datetime import datetime, timedelta
 from operator import mul, itemgetter
 from StringIO import StringIO
-from itertools import cycle, groupby
+from itertools import cycle, groupby, chain
+from functools import partial
 from distutils.spawn import find_executable
 
 import yaml
 from dateutil import tz
 
-from wa.framework.version import get_wa_version
-
-
-# ABI --> architectures list
-ABI_MAP = {
-    'armeabi': ['armeabi', 'armv7', 'armv7l', 'armv7el', 'armv7lh'],
-    'arm64': ['arm64', 'armv8', 'arm64-v8a'],
-}
-
-
-def preexec_function():
-    # Ignore the SIGINT signal by setting the handler to the standard
-    # signal handler SIG_IGN.
-    signal.signal(signal.SIGINT, signal.SIG_IGN)
-    # Change process group in case we have to kill the subprocess and all of
-    # its children later.
-    # TODO: this is Unix-specific; would be good to find an OS-agnostic way
-    #       to do this in case we wanna port WA to Windows.
-    os.setpgrp()
-
+from devlib.utils.misc import (ABI_MAP, check_output, walk_modules,
+                               ensure_directory_exists, ensure_file_directory_exists,
+                               normalize, convert_new_lines, get_cpu_mask, unique,
+                               escape_quotes, escape_single_quotes, escape_double_quotes,
+                               isiterable, getch, as_relative, ranges_to_list,
+                               list_to_ranges, list_to_mask, mask_to_list, which)
 
 check_output_logger = logging.getLogger('check_output')
 
 
 # Defined here rather than in wlauto.exceptions due to module load dependencies
-class TimeoutError(Exception):
-    """Raised when a subprocess command times out. This is basically a ``WAError``-derived version
-    of ``subprocess.CalledProcessError``, the thinking being that while a timeout could be due to
-    programming error (e.g. not setting long enough timers), it is often due to some failure in the
-    environment, and there fore should be classed as a "user error"."""
-
-    def __init__(self, command, output):
-        super(TimeoutError, self).__init__('Timed out: {}'.format(command))
-        self.command = command
-        self.output = output
-
-    def __str__(self):
-        return '\n'.join([self.message, 'OUTPUT:', self.output or ''])
-
-
-def check_output(command, timeout=None, ignore=None, **kwargs):
-    """This is a version of subprocess.check_output that adds a timeout parameter to kill
-    the subprocess if it does not return within the specified time."""
-    # pylint: disable=too-many-branches
-    if ignore is None:
-        ignore = []
-    elif isinstance(ignore, int):
-        ignore = [ignore]
-    elif not isinstance(ignore, list) and ignore != 'all':
-        message = 'Invalid value for ignore parameter: "{}"; must be an int or a list'
-        raise ValueError(message.format(ignore))
-    if 'stdout' in kwargs:
-        raise ValueError('stdout argument not allowed, it will be overridden.')
-
-    def callback(pid):
-        try:
-            check_output_logger.debug('{} timed out; sending SIGKILL'.format(pid))
-            os.killpg(pid, signal.SIGKILL)
-        except OSError:
-            pass  # process may have already terminated.
-
-    process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
-                               preexec_fn=preexec_function, **kwargs)
-
-    if timeout:
-        timer = threading.Timer(timeout, callback, [process.pid, ])
-        timer.start()
-
-    try:
-        output, error = process.communicate()
-    finally:
-        if timeout:
-            timer.cancel()
-
-    retcode = process.poll()
-    if retcode:
-        if retcode == -9:  # killed, assume due to timeout callback
-            raise TimeoutError(command, output='\n'.join([output, error]))
-        elif ignore != 'all' and retcode not in ignore:
-            raise subprocess.CalledProcessError(retcode, command, output='\n'.join([output, error]))
-    return output, error
-
-
-def init_argument_parser(parser):
-    parser.add_argument('-c', '--config', help='specify an additional config.py')
-    parser.add_argument('-v', '--verbose', action='count',
-                        help='The scripts will produce verbose output.')
-    parser.add_argument('--debug', action='store_true',
-                        help='Enable debug mode. Note: this implies --verbose.')
-    parser.add_argument('--version', action='version', version='%(prog)s {}'.format(get_wa_version()))
-    return parser
-
-
-def walk_modules(path):
-    """
-    Given a path to a Python package, iterate over all the modules  and
-    sub-packages in that package.
-
-    """
-    try:
-        root_mod = __import__(path, {}, {}, [''])
-        yield root_mod
-    except ImportError as e:
-        e.path = path
-        raise e
-    if not hasattr(root_mod, '__path__'):  # module, not package
-        return
-    for _, name, ispkg in pkgutil.iter_modules(root_mod.__path__):
-        try:
-            submod_path = '.'.join([path, name])
-            if ispkg:
-                for submod in walk_modules(submod_path):
-                    yield submod
-            else:
-                yield __import__(submod_path, {}, {}, [''])
-        except ImportError as e:
-            e.path = submod_path
-            raise e
-
-
-def ensure_directory_exists(dirpath):
-    """A filter for directory paths to ensure they exist."""
-    if not os.path.isdir(dirpath):
-        os.makedirs(dirpath)
-    return dirpath
-
-
-def ensure_file_directory_exists(filepath):
-    """
-    A filter for file paths to ensure the directory of the
-    file exists and the file can be created there. The file
-    itself is *not* going to be created if it doesn't already
-    exist.
-
-    """
-    ensure_directory_exists(os.path.dirname(filepath))
-    return filepath
-
-
 def diff_tokens(before_token, after_token):
     """
     Creates a diff of two tokens.
@@ -269,22 +142,18 @@ def get_traceback(exc=None):
     return sio.getvalue()
 
 
-def normalize(value, dict_type=dict):
-    """Normalize values. Recursively normalizes dict keys to be lower case,
-    no surrounding whitespace, underscore-delimited strings."""
-    if isinstance(value, dict):
-        normalized = dict_type()
-        for k, v in value.iteritems():
-            if isinstance(k, basestring):
-                k = k.strip().lower().replace(' ', '_')
-            normalized[k] = normalize(v, dict_type)
-        return normalized
-    elif isinstance(value, list):
-        return [normalize(v, dict_type) for v in value]
-    elif isinstance(value, tuple):
-        return tuple([normalize(v, dict_type) for v in value])
-    else:
-        return value
+def _check_remove_item(the_list, item):
+    """Helper function for merge_lists that implements checking wether an items
+    should be removed from the list and doing so if needed. Returns ``True`` if
+    the item has been removed and ``False`` otherwise."""
+    if not isinstance(item, basestring):
+        return False
+    if not item.startswith('~'):
+        return False
+    actual_item = item[1:]
+    if actual_item in the_list:
+        del the_list[the_list.index(actual_item)]
+    return True
 
 
 VALUE_REGEX = re.compile(r'(\d+(?:\.\d+)?)\s*(\w*)')
@@ -338,50 +207,6 @@ def capitalize(text):
     return text[0].upper() + text[1:].lower()
 
 
-def convert_new_lines(text):
-    """ Convert new lines to a common format.  """
-    return text.replace('\r\n', '\n').replace('\r', '\n')
-
-
-def escape_quotes(text):
-    """Escape quotes, and escaped quotes, in the specified text."""
-    return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\\\'').replace('\"', '\\\"')
-
-
-def escape_single_quotes(text):
-    """Escape single quotes, and escaped single quotes, in the specified text."""
-    return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\'\\\'\'')
-
-
-def escape_double_quotes(text):
-    """Escape double quotes, and escaped double quotes, in the specified text."""
-    return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\"', '\\\"')
-
-
-def getch(count=1):
-    """Read ``count`` characters from standard input."""
-    if os.name == 'nt':
-        import msvcrt  # pylint: disable=F0401
-        return ''.join([msvcrt.getch() for _ in xrange(count)])
-    else:  # assume Unix
-        import tty  # NOQA
-        import termios  # NOQA
-        fd = sys.stdin.fileno()
-        old_settings = termios.tcgetattr(fd)
-        try:
-            tty.setraw(sys.stdin.fileno())
-            ch = sys.stdin.read(count)
-        finally:
-            termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
-        return ch
-
-
-def isiterable(obj):
-    """Returns ``True`` if the specified object is iterable and
-    *is not a string type*, ``False`` otherwise."""
-    return hasattr(obj, '__iter__') and not isinstance(obj, basestring)
-
-
 def utc_to_local(dt):
     """Convert naive datetime to local time zone, assuming UTC."""
     return dt.replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())
@@ -392,21 +217,6 @@ def local_to_utc(dt):
     return dt.replace(tzinfo=tz.tzlocal()).astimezone(tz.tzutc())
 
 
-def as_relative(path):
-    """Convert path to relative by stripping away the leading '/' on UNIX or
-    the equivant on other platforms."""
-    path = os.path.splitdrive(path)[1]
-    return path.lstrip(os.sep)
-
-
-def get_cpu_mask(cores):
-    """Return a string with the hex for the cpu mask for the specified core numbers."""
-    mask = 0
-    for i in cores:
-        mask |= 1 << i
-    return '0x{0:x}'.format(mask)
-
-
 def load_class(classpath):
     """Loads the specified Python class. ``classpath`` must be a fully-qualified
     class name (i.e. namspaced under module/package)."""
@@ -468,29 +278,7 @@ def enum_metaclass(enum_param, return_name=False, start=0):
     return __EnumMeta
 
 
-def which(name):
-    """Platform-independent version of UNIX which utility."""
-    if os.name == 'nt':
-        paths = os.getenv('PATH').split(os.pathsep)
-        exts = os.getenv('PATHEXT').split(os.pathsep)
-        for path in paths:
-            testpath = os.path.join(path, name)
-            if os.path.isfile(testpath):
-                return testpath
-            for ext in exts:
-                testpathext = testpath + ext
-                if os.path.isfile(testpathext):
-                    return testpathext
-        return None
-    else:  # assume UNIX-like
-        try:
-            result = check_output(['which', name])[0]
-            return result.strip()  # pylint: disable=E1103
-        except subprocess.CalledProcessError:
-            return None
-
-
-_bash_color_regex = re.compile('\x1b\\[[0-9;]+m')
+_bash_color_regex = re.compile('\x1b\[[0-9;]+m')
 
 
 def strip_bash_colors(text):
@@ -536,6 +324,18 @@ def get_random_string(length):
     return ''.join(random.choice(string.ascii_letters + string.digits) for _ in xrange(length))
 
 
+class LoadSyntaxError(Exception):
+
+    def __init__(self, message, filepath, lineno):
+        super(LoadSyntaxError, self).__init__(message)
+        self.filepath = filepath
+        self.lineno = lineno
+
+    def __str__(self):
+        message = 'Syntax Error in {}, line {}:\n\t{}'
+        return message.format(self.filepath, self.lineno, self.message)
+
+
 RAND_MOD_NAME_LEN = 30
 BAD_CHARS = string.punctuation + string.whitespace
 TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
@@ -544,23 +344,63 @@ TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
 def to_identifier(text):
     """Converts text to a valid Python identifier by replacing all
     whitespace and punctuation."""
-    result = re.sub('_+', '_', text.translate(TRANS_TABLE))
-    if result and result[0] in string.digits:
-        result = '_' + result
-    return result
+    return re.sub('_+', '_', text.translate(TRANS_TABLE))
 
 
-def unique(alist):
+def load_struct_from_python(filepath=None, text=None):
+    """Parses a config structure from a .py file. The structure should be composed
+    of basic Python types (strings, ints, lists, dicts, etc.)."""
+    if not (filepath or text) or (filepath and text):
+        raise ValueError('Exactly one of filepath or text must be specified.')
+    try:
+        if filepath:
+            modname = to_identifier(filepath)
+            mod = imp.load_source(modname, filepath)
+        else:
+            modname = get_random_string(RAND_MOD_NAME_LEN)
+            while modname in sys.modules:  # highly unlikely, but...
+                modname = get_random_string(RAND_MOD_NAME_LEN)
+            mod = imp.new_module(modname)
+            exec text in mod.__dict__  # pylint: disable=exec-used
+        return dict((k, v)
+                    for k, v in mod.__dict__.iteritems()
+                    if not k.startswith('_'))
+    except SyntaxError as e:
+        raise LoadSyntaxError(e.message, filepath, e.lineno)
+
+
+def load_struct_from_yaml(filepath=None, text=None):
+    """Parses a config structure from a .yaml file. The structure should be composed
+    of basic Python types (strings, ints, lists, dicts, etc.)."""
+    if not (filepath or text) or (filepath and text):
+        raise ValueError('Exactly one of filepath or text must be specified.')
+    try:
+        if filepath:
+            with open(filepath) as fh:
+                return yaml.load(fh)
+        else:
+            return yaml.load(text)
+    except yaml.YAMLError as e:
+        lineno = None
+        if hasattr(e, 'problem_mark'):
+            lineno = e.problem_mark.line  # pylint: disable=no-member
+        raise LoadSyntaxError(e.message, filepath=filepath, lineno=lineno)
+
+
+def load_struct_from_file(filepath):
     """
-    Returns a list containing only unique elements from the input list (but preserves
-    order, unlike sets).
+    Attempts to parse a Python structure consisting of basic types from the specified file.
+    Raises a ``ValueError`` if the specified file is of unkown format; ``LoadSyntaxError`` if
+    there is an issue parsing the file.
 
     """
-    result = []
-    for item in alist:
-        if item not in result:
-            result.append(item)
-    return result
+    extn = os.path.splitext(filepath)[1].lower()
+    if (extn == '.py') or (extn == '.pyc') or (extn == '.pyo'):
+        return load_struct_from_python(filepath)
+    elif extn == '.yaml':
+        return load_struct_from_yaml(filepath)
+    else:
+        raise ValueError('Unknown format "{}": {}'.format(extn, filepath))
 
 
 def open_file(filepath):
@@ -576,68 +416,170 @@ def open_file(filepath):
         return subprocess.call(['xdg-open', filepath])
 
 
-def ranges_to_list(ranges_string):
-    """Converts a sysfs-style ranges string, e.g. ``"0,2-4"``, into a list ,e.g ``[0,2,3,4]``"""
-    values = []
-    for rg in ranges_string.split(','):
-        if '-' in rg:
-            first, last = map(int, rg.split('-'))
-            values.extend(xrange(first, last + 1))
-        else:
-            values.append(int(rg))
-    return values
+def sha256(path, chunk=2048):
+    """Calculates SHA256 hexdigest of the file at the specified path."""
+    h = hashlib.sha256()
+    with open(path, 'rb') as fh:
+        buf = fh.read(chunk)
+        while buf:
+            h.update(buf)
+            buf = fh.read(chunk)
+    return h.hexdigest()
 
 
-def list_to_ranges(values):
-    """Converts a list, e.g ``[0,2,3,4]``, into a sysfs-style ranges string, e.g. ``"0,2-4"``"""
-    range_groups = []
-    for _, g in groupby(enumerate(values), lambda (i, x): i - x):
-        range_groups.append(map(itemgetter(1), g))
-    range_strings = []
-    for group in range_groups:
-        if len(group) == 1:
-            range_strings.append(str(group[0]))
-        else:
-            range_strings.append('{}-{}'.format(group[0], group[-1]))
-    return ','.join(range_strings)
+def urljoin(*parts):
+    return '/'.join(p.rstrip('/') for p in parts)
 
 
-def list_to_mask(values, base=0x0):
-    """Converts the specified list of integer values into
-    a bit mask for those values. Optinally, the list can be
-    applied to an existing mask."""
-    for v in values:
-        base |= (1 << v)
-    return base
-
-
-def mask_to_list(mask):
-    """Converts the specfied integer bitmask into a list of
-    indexes of bits that are set in the mask."""
-    size = len(bin(mask)) - 2  # because of "0b"
-    return [size - i - 1 for i in xrange(size)
-            if mask & (1 << size - i - 1)]
-
-
-class Namespace(dict):
+# From: http://eli.thegreenplace.net/2011/10/19/perls-guess-if-file-is-text-or-binary-implemented-in-python/
+def istextfile(fileobj, blocksize=512):
+    """ Uses heuristics to guess whether the given file is text or binary,
+        by reading a single block of bytes from the file.
+        If more than 30% of the chars in the block are non-text, or there
+        are NUL ('\x00') bytes in the block, assume this is a binary file.
     """
-    A dict-like object that allows treating keys and attributes
-    interchangeably (this means that keys are restricted to strings
-    that are valid Python identifiers).
+    _text_characters = (b''.join(chr(i) for i in range(32, 127)) +
+                        b'\n\r\t\f\b')
+
+    block = fileobj.read(blocksize)
+    if b'\x00' in block:
+        # Files with null bytes are binary
+        return False
+    elif not block:
+        # An empty file is considered a valid text file
+        return True
+
+    # Use translate's 'deletechars' argument to efficiently remove all
+    # occurrences of _text_characters from the block
+    nontext = block.translate(None, _text_characters)
+    return float(len(nontext)) / len(block) <= 0.30
+
+
+def categorize(v):
+    if hasattr(v, 'merge_with') and hasattr(v, 'merge_into'):
+        return 'o'
+    elif hasattr(v, 'iteritems'):
+        return 'm'
+    elif isiterable(v):
+        return 's'
+    elif v is None:
+        return 'n'
+    else:
+        return 'c'
+
+
+def merge_config_values(base, other):
+    """
+    This is used to merge two objects, typically when setting the value of a
+    ``ConfigurationPoint``. First, both objects are categorized into
+
+        c: A scalar value. Basically, most objects. These values
+           are treated as atomic, and not mergeable.
+        s: A sequence. Anything iterable that is not a dict or
+           a string (strings are considered scalars).
+        m: A key-value mapping. ``dict`` and its derivatives.
+        n: ``None``.
+        o: A mergeable object; this is an object that implements both
+          ``merge_with`` and ``merge_into`` methods.
+
+    The merge rules based on the two categories are then as follows:
+
+        (c1, c2) --> c2
+        (s1, s2) --> s1 . s2
+        (m1, m2) --> m1 . m2
+        (c, s) --> [c] . s
+        (s, c) --> s . [c]
+        (s, m) --> s . [m]
+        (m, s) --> [m] . s
+        (m, c) --> ERROR
+        (c, m) --> ERROR
+        (o, X) --> o.merge_with(X)
+        (X, o) --> o.merge_into(X)
+        (X, n) --> X
+        (n, X) --> X
+
+    where:
+
+        '.'  means concatenation (for maps, contcationation of (k, v) streams
+             then converted back into a map). If the types of the two objects
+             differ, the type of ``other`` is used for the result.
+        'X'  means "any category"
+        '[]' used to indicate a literal sequence (not necessarily a ``list``).
+             when this is concatenated with an actual sequence, that sequencies
+             type is used.
+
+    notes:
+
+        - When a mapping is combined with a sequence, that mapping is
+          treated as a scalar value.
+        - When combining two mergeable objects, they're combined using
+          ``o1.merge_with(o2)`` (_not_ using o2.merge_into(o1)).
+        - Combining anything with ``None`` yields that value, irrespective
+          of the order. So a ``None`` value is eqivalent to the corresponding
+          item being omitted.
+        - When both values are scalars, merging is equivalent to overwriting.
+        - There is no recursion (e.g. if map values are lists, they will not
+          be merged; ``other`` will overwrite ``base`` values). If complicated
+          merging semantics (such as recursion) are required, they should be
+          implemented within custom mergeable types (i.e. those that implement
+          ``merge_with`` and ``merge_into``).
+
+    While this can be used as a generic "combine any two arbitry objects"
+    function, the semantics have been selected specifically for merging
+    configuration point values.
 
     """
+    cat_base = categorize(base)
+    cat_other = categorize(other)
 
-    def __getattr__(self, name):
-        try:
-            return self[name]
-        except KeyError:
-            raise AttributeError(name)
+    if cat_base == 'n':
+        return other
+    elif cat_other == 'n':
+        return base
 
-    def __setattr__(self, name, value):
-        self[name] = value
+    if cat_base == 'o':
+        return base.merge_with(other)
+    elif cat_other == 'o':
+        return other.merge_into(base)
 
-    def __setitem__(self, name, value):
-        if to_identifier(name) != name:
-            message = 'Key must be a valid identifier; got "{}"'
-            raise ValueError(message.format(name))
-        dict.__setitem__(self, name, value)
+    if cat_base == 'm':
+        if cat_other == 's':
+            return merge_sequencies([base], other)
+        elif cat_other == 'm':
+            return merge_maps(base, other)
+        else:
+            message = 'merge error ({}, {}): "{}" and "{}"'
+            raise ValueError(message.format(cat_base, cat_other, base, other))
+    elif cat_base == 's':
+        if cat_other == 's':
+            return merge_sequencies(base, other)
+        else:
+            return merge_sequencies(base, [other])
+    else:  # cat_base == 'c'
+        if cat_other == 's':
+            return merge_sequencies([base], other)
+        elif cat_other == 'm':
+            message = 'merge error ({}, {}): "{}" and "{}"'
+            raise ValueError(message.format(cat_base, cat_other, base, other))
+        else:
+            return other
+
+
+def merge_sequencies(s1, s2):
+    return type(s2)(unique(chain(s1, s2)))
+
+
+def merge_maps(m1, m2):
+    return type(m2)(chain(m1.iteritems(), m2.iteritems()))
+
+
+def merge_dicts_simple(base, other):
+    result = base.copy()
+    for key, value in (base or {}).iteritems():
+        result[key] = merge_config_values(result.get(key), value)
+    return result
+
+
+def touch(path):
+    with open(path, 'w'):
+        pass
diff --git a/wa/utils/serializer.py b/wa/utils/serializer.py
index 40fa93c3..b2535961 100644
--- a/wa/utils/serializer.py
+++ b/wa/utils/serializer.py
@@ -1,13 +1,13 @@
 """
 This module contains wrappers for Python serialization modules for
 common formats that make it easier to serialize/deserialize WA
-Plain Old Data structures (serilizable WA classes implement 
-``to_pod()``/``from_pod()`` methods for converting between POD 
+Plain Old Data structures (serilizable WA classes implement
+``to_pod()``/``from_pod()`` methods for converting between POD
 structures and Python class instances).
 
 The modifications to standard serilization procedures are:
 
-    - mappings are deserialized as ``OrderedDict``\ 's are than standard
+    - mappings are deserialized as ``OrderedDict``\ 's rather than standard
       Python ``dict``\ 's. This allows for cleaner syntax in certain parts
       of WA configuration (e.g. values to be written to files can be specified
       as a dict, and they will be written in the order specified in the config).
@@ -16,7 +16,7 @@ The modifications to standard serilization procedures are:
       in the POD config.
 
 This module exports the "wrapped" versions of serialization libraries,
-and this should be imported and used instead of importing the libraries 
+and this should be imported and used instead of importing the libraries
 directly. i.e. ::
 
     from wa.utils.serializer import yaml
@@ -27,7 +27,7 @@ instead of ::
     import yaml
     pod = yaml.load(fh)
 
-It's also possible to suse the serializer directly::
+It's also possible to use the serializer directly::
 
     from wa.utils import serializer
     pod = serializer.load(fh)
@@ -35,13 +35,14 @@ It's also possible to suse the serializer directly::
 This can also be used to ``dump()`` POD structures. By default,
 ``dump()`` will produce JSON, but ``fmt`` parameter may be used to
 specify an alternative format (``yaml`` or ``python``). ``load()`` will
-use the file extension to guess the format, but ``fmt`` may also be used
+use the file plugin to guess the format, but ``fmt`` may also be used
 to specify it explicitly.
 
 """
+# pylint: disable=unused-argument
+
 import os
 import re
-import sys
 import json as _json
 from collections import OrderedDict
 from datetime import datetime
@@ -50,8 +51,8 @@ import yaml as _yaml
 import dateutil.parser
 
 from wa.framework.exception import SerializerSyntaxError
-from wa.utils.types import regex_type
 from wa.utils.misc import isiterable
+from wa.utils.types import regex_type, none_type
 
 
 __all__ = [
@@ -60,16 +61,29 @@ __all__ = [
     'read_pod',
     'dump',
     'load',
+    'is_pod',
+    'POD_TYPES',
 ]
 
-
+POD_TYPES = [
+    list,
+    tuple,
+    dict,
+    set,
+    str,
+    unicode,
+    int,
+    float,
+    bool,
+    datetime,
+    regex_type,
+    none_type,
+]
 
 class WAJSONEncoder(_json.JSONEncoder):
 
-    def default(self, obj):
-        if hasattr(obj, 'to_pod'):
-            return obj.to_pod()
-        elif isinstance(obj, regex_type):
+    def default(self, obj):  # pylint: disable=method-hidden
+        if isinstance(obj, regex_type):
             return 'REGEX:{}:{}'.format(obj.flags, obj.pattern)
         elif isinstance(obj, datetime):
             return 'DATET:{}'.format(obj.isoformat())
@@ -79,8 +93,8 @@ class WAJSONEncoder(_json.JSONEncoder):
 
 class WAJSONDecoder(_json.JSONDecoder):
 
-    def decode(self, s):
-        d = _json.JSONDecoder.decode(self, s)
+    def decode(self, s, **kwargs):
+        d = _json.JSONDecoder.decode(self, s, **kwargs)
 
         def try_parse_object(v):
             if isinstance(v, basestring) and v.startswith('REGEX:'):
@@ -112,7 +126,6 @@ class json(object):
     def dump(o, wfh, indent=4, *args, **kwargs):
         return _json.dump(o, wfh, cls=WAJSONEncoder, indent=indent, *args, **kwargs)
 
-
     @staticmethod
     def load(fh, *args, **kwargs):
         try:
@@ -176,7 +189,7 @@ class yaml(object):
         except _yaml.YAMLError as e:
             lineno = None
             if hasattr(e, 'problem_mark'):
-                lineno = e.problem_mark.line
+                lineno = e.problem_mark.line  # pylint: disable=no-member
             raise SerializerSyntaxError(e.message, lineno)
 
     loads = load
@@ -196,7 +209,7 @@ class python(object):
     def loads(s, *args, **kwargs):
         pod = {}
         try:
-            exec s in pod
+            exec s in pod  # pylint: disable=exec-used
         except SyntaxError as e:
             raise SerializerSyntaxError(e.message, e.lineno)
         for k in pod.keys():
@@ -209,20 +222,29 @@ def read_pod(source, fmt=None):
     if isinstance(source, basestring):
         with open(source) as fh:
             return _read_pod(fh, fmt)
-    elif hasattr(source, 'read') and (hasattr(sourc, 'name') or fmt):
+    elif hasattr(source, 'read') and (hasattr(source, 'name') or fmt):
         return _read_pod(source, fmt)
     else:
         message = 'source must be a path or an open file handle; got {}'
         raise ValueError(message.format(type(source)))
 
+def write_pod(pod, dest, fmt=None):
+    if isinstance(dest, basestring):
+        with open(dest, 'w') as wfh:
+            return _write_pod(pod, wfh, fmt)
+    elif hasattr(dest, 'write') and (hasattr(dest, 'name') or fmt):
+        return _write_pod(pod, dest, fmt)
+    else:
+        message = 'dest must be a path or an open file handle; got {}'
+        raise ValueError(message.format(type(dest)))
+
 
 def dump(o, wfh, fmt='json', *args, **kwargs):
-    serializer = {
-                'yaml': yaml,
-                'json': json,
-                'python': python,
-                'py': python,
-            }.get(fmt)
+    serializer = {'yaml': yaml,
+                  'json': json,
+                  'python': python,
+                  'py': python,
+                  }.get(fmt)
     if serializer is None:
         raise ValueError('Unknown serialization format: "{}"'.format(fmt))
     serializer.dump(o, wfh, *args, **kwargs)
@@ -242,4 +264,20 @@ def _read_pod(fh, fmt=None):
     elif fmt == 'py':
         return python.load(fh)
     else:
-        raise ValueError('Unknown format "{}": {}'.format(fmt, path))
+        raise ValueError('Unknown format "{}": {}'.format(fmt, getattr(fh, 'name', '<none>')))
+
+def _write_pod(pod, wfh, fmt=None):
+    if fmt is None:
+        fmt = os.path.splitext(wfh.name)[1].lower().strip('.')
+    if fmt == 'yaml':
+        return yaml.dump(pod, wfh)
+    elif fmt == 'json':
+        return json.dump(pod, wfh)
+    elif fmt == 'py':
+        raise ValueError('Serializing to Python is not supported')
+    else:
+        raise ValueError('Unknown format "{}": {}'.format(fmt, getattr(wfh, 'name', '<none>')))
+
+def is_pod(obj):
+    return type(obj) in POD_TYPES
+
diff --git a/wa/utils/terminalsize.py b/wa/utils/terminalsize.py
new file mode 100644
index 00000000..828ca3e6
--- /dev/null
+++ b/wa/utils/terminalsize.py
@@ -0,0 +1,93 @@
+# Adapted from
+# https://gist.github.com/jtriley/1108174
+# pylint: disable=bare-except,unpacking-non-sequence
+import os
+import shlex
+import struct
+import platform
+import subprocess
+
+
+def get_terminal_size():
+    """ getTerminalSize()
+     - get width and height of console
+     - works on linux,os x,windows,cygwin(windows)
+     originally retrieved from:
+     http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
+    """
+    current_os = platform.system()
+    tuple_xy = None
+    if current_os == 'Windows':
+        tuple_xy = _get_terminal_size_windows()
+        if tuple_xy is None:
+            # needed for window's python in cygwin's xterm
+            tuple_xy = _get_terminal_size_tput()
+    if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
+        tuple_xy = _get_terminal_size_linux()
+    if tuple_xy is None or tuple_xy == (0, 0):
+        tuple_xy = (80, 25)      # assume "standard" terminal
+    return tuple_xy
+
+
+def _get_terminal_size_windows():
+    # pylint: disable=unused-variable,redefined-outer-name,too-many-locals
+    try:
+        from ctypes import windll, create_string_buffer
+        # stdin handle is -10
+        # stdout handle is -11
+        # stderr handle is -12
+        h = windll.kernel32.GetStdHandle(-12)
+        csbi = create_string_buffer(22)
+        res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
+        if res:
+            (bufx, bufy, curx, cury, wattr,
+             left, top, right, bottom,
+             maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
+            sizex = right - left + 1
+            sizey = bottom - top + 1
+            return sizex, sizey
+    except:
+        pass
+
+
+def _get_terminal_size_tput():
+    # get terminal width
+    # src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
+    try:
+        cols = int(subprocess.check_call(shlex.split('tput cols')))
+        rows = int(subprocess.check_call(shlex.split('tput lines')))
+        return (cols, rows)
+    except:
+        pass
+
+
+def _get_terminal_size_linux():
+    def ioctl_GWINSZ(fd):
+        try:
+            import fcntl
+            import termios
+            cr = struct.unpack('hh',
+                               fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
+            return cr
+        except:
+            pass
+    cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
+    if not cr:
+        try:
+            fd = os.open(os.ctermid(), os.O_RDONLY)
+            cr = ioctl_GWINSZ(fd)
+            os.close(fd)
+        except:
+            pass
+    if not cr:
+        try:
+            cr = (os.environ['LINES'], os.environ['COLUMNS'])
+        except:
+            return None
+    return int(cr[1]), int(cr[0])
+
+
+if __name__ == "__main__":
+    sizex, sizey = get_terminal_size()
+    print 'width =', sizex, 'height =', sizey
+
diff --git a/wa/utils/types.py b/wa/utils/types.py
index 7a2e47ea..c23d2886 100644
--- a/wa/utils/types.py
+++ b/wa/utils/types.py
@@ -15,77 +15,29 @@
 
 
 """
-Routines for doing various type conversions. These usually embody some higher-level
-semantics than are present in standard Python types (e.g. ``boolean`` will convert the
-string ``"false"`` to ``False``, where as non-empty strings are usually considered to be
-``True``).
+Routines for doing various type conversions. These usually embody some
+higher-level semantics than are present in standard Python types (e.g.
+``boolean`` will convert the string ``"false"`` to ``False``, where as
+non-empty strings are usually considered to be ``True``).
 
-A lot of these are intened to stpecify type conversions declaratively in place like
-``Parameter``'s ``kind`` argument. These are basically "hacks" around the fact that Python
-is not the best language to use for configuration.
+A lot of these are intened to stpecify type conversions declaratively in place
+like ``Parameter``'s ``kind`` argument. These are basically "hacks" around the
+fact that Python is not the best language to use for configuration.
 
 """
 import os
 import re
 import math
 import shlex
-import numbers
 from bisect import insort
-from collections import defaultdict
+from collections import defaultdict, MutableMapping
+from copy import copy
+
+from devlib.utils.types import identifier, boolean, integer, numeric, caseless_string
 
 from wa.utils.misc import isiterable, to_identifier
 
 
-def identifier(text):
-    """Converts text to a valid Python identifier by replacing all
-    whitespace and punctuation."""
-    return to_identifier(text)
-
-
-def boolean(value):
-    """
-    Returns bool represented by the value. This is different from
-    calling the builtin bool() in that it will interpret string representations.
-    e.g. boolean('0') and boolean('false') will both yield False.
-
-    """
-    false_strings = ['', '0', 'n', 'no']
-    if isinstance(value, basestring):
-        value = value.lower()
-        if value in false_strings or 'false'.startswith(value):
-            return False
-    return bool(value)
-
-
-def integer(value):
-    """Handles conversions for string respresentations of binary, octal and hex."""
-    if isinstance(value, basestring):
-        return int(value, 0)
-    else:
-        return int(value)
-
-
-def numeric(value):
-    """
-    Returns the value as number (int if possible, or float otherwise), or
-    raises ``ValueError`` if the specified ``value`` does not have a straight
-    forward numeric conversion.
-
-    """
-    if isinstance(value, int):
-        return value
-    try:
-        fvalue = float(value)
-    except ValueError:
-        raise ValueError('Not numeric: {}'.format(value))
-    if not math.isnan(fvalue) and not math.isinf(fvalue):
-        ivalue = int(fvalue)
-        # yeah, yeah, I know. Whatever. This is best-effort.
-        if ivalue == fvalue:
-            return ivalue
-    return fvalue
-
-
 def list_of_strs(value):
     """
     Value must be iterable. All elements will be converted to strings.
@@ -142,7 +94,6 @@ def list_of(type_):
     """Generates a "list of" callable for the specified type. The callable
     attempts to convert all elements in the passed value to the specifed
     ``type_``, raising ``ValueError`` on error."""
-
     def __init__(self, values):
         list.__init__(self, map(type_, values))
 
@@ -204,7 +155,6 @@ def list_or(type_):
     list_type = list_of(type_)
 
     class list_or_type(list_type):
-
         def __init__(self, value):
             # pylint: disable=non-parent-init-called,super-init-not-called
             if isiterable(value):
@@ -220,6 +170,7 @@ list_or_bool = list_or(boolean)
 
 
 regex_type = type(re.compile(''))
+none_type = type(None)
 
 
 def regex(value):
@@ -234,28 +185,25 @@ def regex(value):
         return re.compile(value)
 
 
-class caseless_string(str):
+__counters = defaultdict(int)
+
+
+def reset_counter(name=None):
+    __counters[name] = 0
+
+
+def counter(name=None):
     """
-    Just like built-in Python string except case-insensitive on comparisons. However, the
-    case is preserved otherwise.
+    An auto incremeting value (kind of like an AUTO INCREMENT field in SQL).
+    Optionally, the name of the counter to be used is specified (each counter
+    increments separately).
+
+    Counts start at 1, not 0.
 
     """
-
-    def __eq__(self, other):
-        if isinstance(other, basestring):
-            other = other.lower()
-        return self.lower() == other
-
-    def __ne__(self, other):
-        return not self.__eq__(other)
-
-    def __cmp__(self, other):
-        if isinstance(basestring, other):
-            other = other.lower()
-        return cmp(self.lower(), other)
-
-    def format(self, *args, **kwargs):
-        return caseless_string(super(caseless_string, self).format(*args, **kwargs))
+    __counters[name] += 1
+    value = __counters[name]
+    return value
 
 
 class arguments(list):
@@ -375,7 +323,8 @@ class prioritylist(object):
             raise ValueError('Invalid index {}'.format(index))
         current_global_offset = 0
         priority_counts = {priority: count for (priority, count) in
-                           zip(self.priorities, [len(self.elements[p]) for p in self.priorities])}
+                           zip(self.priorities, [len(self.elements[p]) 
+                                                 for p in self.priorities])}
         for priority in self.priorities:
             if not index_range:
                 break
@@ -395,103 +344,134 @@ class prioritylist(object):
         return self.size
 
 
-class TreeNode(object):
+class toggle_set(set):
+    """
+    A list that contains items to enable or disable something.
 
-    @property
-    def is_root(self):
-        return self.parent is None
-    
-    @property
-    def is_leaf(self):
-        return not self.children
+    A prefix of ``~`` is used to denote disabling something, for example
+    the list ['apples', '~oranges', 'cherries'] enables both ``apples``
+    and ``cherries`` but disables ``oranges``.
+    """
 
-    @property
-    def parent(self):
-        return self._parent
+    @staticmethod
+    def from_pod(pod):
+        return toggle_set(pod)
 
-    @parent.setter
-    def parent(self, parent):
-        if self._parent:
-            self._parent.remove_child(self)
-        self._parent = parent
-        if self._parent:
-            self._parent.add_child(self)
+    @staticmethod
+    def merge(source, dest):
+        for item in source:
+            if item not in dest:
+                #Disable previously enabled item
+                if item.startswith('~') and item[1:] in dest:
+                    dest.remove(item[1:])
+                #Enable previously disabled item
+                if not item.startswith('~') and ('~' + item) in dest:
+                    dest.remove('~' + item)
+                dest.add(item)
+        return dest
 
-    @property
-    def children(self):
-        return [c for c in self._children]
+    def merge_with(self, other):
+        new_self = copy(self)
+        return toggle_set.merge(other, new_self)
 
-    def __init__(self):
-        self._parent = None
-        self._children = []
+    def merge_into(self, other):
+        other = copy(other)
+        return toggle_set.merge(self, other)
 
-    def add_child(self, node):
-        if node == self:
-            raise ValueError('A node cannot be its own child.')
-        if node in self._children:
-            return
-        for ancestor in self.iter_ancestors():
-            if ancestor == node:
-                raise ValueError('Can\'t add {} as a child, as it already an ancestor')
-        if node.parent and node.parent != self:
-            raise ValueError('Cannot add {}, as it already has a parent.'.format(node))
-        self._children.append(node)
-        node._parent = self
+    def values(self):
+        """
+        returns a list of enabled items.
+        """
+        return set([item for item in self if not item.startswith('~')])
 
-    def remove_child(self, node):
-        if node not in self._children:
-            message = 'Cannot remove: {} is not a child of {}'
-            raise ValueError(message.format(node, self))
-        self._children.remove(node)
-        node._parent = None
+    def conflicts_with(self, other):
+        """
+        Checks if any items in ``other`` conflict with items already in this list.
 
-    def iter_ancestors(self, after=None, upto=None):
-        if upto == self:
-            return
-        ancestor = self
-        if after:
-            while ancestor != after:
-                ancestor = ancestor.parent
-        while ancestor and ancestor != upto:
-            yield ancestor
-            ancestor = ancestor.parent
+        Args:
+            other (list): The list to be checked against
 
-    def iter_descendants(self):
-        for child in self.children:
-            yield child
-            for grandchild in child.iter_descendants():
-                yield grandchild
+        Returns:
+            A list of items in ``other`` that conflict with items in this list
+        """
+        conflicts = []
+        for item in other:
+            if item.startswith('~') and item[1:] in self:
+                conflicts.append(item)
+            if not item.startswith('~') and ('~' + item) in self:
+                conflicts.append(item)
+        return conflicts
 
-    def iter_leaves(self):
-        for descendant in self.iter_descendants():
-            if descendant.is_leaf:
-                yield descendant
+    def to_pod(self):
+        return list(self.values())
 
-    def get_common_ancestor(self, other):
-        if self.has_ancestor(other):
-            return other
-        if other.has_ancestor(self):
-            return self
-        for my_ancestor in self.iter_ancestors():
-            for other_ancestor in other.iter_ancestors():
-                if my_ancestor == other_ancestor:
-                    return my_ancestor
 
-    def get_root(self):
-        node = self
-        while not node.is_root:
-            node = node.parent
-        return node
+class ID(str):
 
-    def has_ancestor(self, other):
-        for ancestor in self.iter_ancestors():
-            if other == ancestor:
-                return True
-        return False
+    def merge_with(self, other):
+        return '_'.join(self, other)
 
-    def has_descendant(self, other):
-        for descendant in self.iter_descendants():
-            if other == descendant:
-                return True
-        return False
+    def merge_into(self, other):
+        return '_'.join(other, self)
 
+
+class obj_dict(MutableMapping):
+    """
+    An object that behaves like a dict but each dict entry can also be accesed
+    as an attribute.
+
+    :param not_in_dict: A list of keys that can only be accessed as attributes
+
+    """
+
+    @staticmethod
+    def from_pod(pod):
+        return obj_dict(pod)
+
+    def __init__(self, values=None, not_in_dict=None):
+        self.__dict__['dict'] = dict(values or {})
+        self.__dict__['not_in_dict'] = not_in_dict if not_in_dict is not None else []
+
+    def to_pod(self):
+        return self.__dict__['dict']
+
+    def __getitem__(self, key):
+        if key in self.not_in_dict:
+            msg = '"{}" is in the list keys that can only be accessed as attributes'
+            raise KeyError(msg.format(key))
+        return self.__dict__['dict'][key]
+
+    def __setitem__(self, key, value):
+        self.__dict__['dict'][key] = value
+
+    def __delitem__(self, key):
+        del self.__dict__['dict'][key]
+
+    def __len__(self):
+        return sum(1 for _ in self)
+
+    def __iter__(self):
+        for key in self.__dict__['dict']:
+            if key not in self.__dict__['not_in_dict']:
+                yield key
+
+    def __repr__(self):
+        return repr(dict(self))
+
+    def __str__(self):
+        return str(dict(self))
+
+    def __setattr__(self, name, value):
+        self.__dict__['dict'][name] = value
+
+    def __delattr__(self, name):
+        if name in self:
+            del self.__dict__['dict'][name]
+        else:
+            raise AttributeError("No such attribute: " + name)
+
+    def __getattr__(self, name):
+        if name in self.__dict__['dict']:
+            return self.__dict__['dict'][name]
+        else:
+            raise AttributeError("No such attribute: " + name)
diff --git a/wa/workloads/dhrystone/__init__.py b/wa/workloads/dhrystone/__init__.py
index 69f554a0..9cc2d9ba 100644
--- a/wa/workloads/dhrystone/__init__.py
+++ b/wa/workloads/dhrystone/__init__.py
@@ -18,7 +18,7 @@
 import os
 import re
 
-from wa import Workload, Parameter, ConfigError, runmethod
+from wa import Workload, Parameter, ConfigError
 
 
 this_dir = os.path.dirname(__file__)
@@ -62,7 +62,6 @@ class Dhrystone(Workload):
                   description='The processes spawned by sysbench will be pinned to cores as specified by this parameter'),
     ]
 
-    @runmethod
     def initialize(self, context):
         host_exe = os.path.join(this_dir, 'dhrystone')
         Dhrystone.target_exe = self.target.install(host_exe)
@@ -118,7 +117,6 @@ class Dhrystone(Workload):
         context.add_metric('total DMIPS', total_dmips)
         context.add_metric('total score', total_score)
 
-    @runmethod
     def finalize(self, context):
         self.target.uninstall('dhrystone')
 
diff --git a/wlauto/core/configuration/plugin_cache.py b/wlauto/core/configuration/plugin_cache.py
index 4c02192d..fe403843 100644
--- a/wlauto/core/configuration/plugin_cache.py
+++ b/wlauto/core/configuration/plugin_cache.py
@@ -76,7 +76,8 @@ class PluginCache(object):
             msg = "Source '{}' has not been added to the plugin cache."
             raise RuntimeError(msg.format(source))
 
-        if not self.loader.has_plugin(plugin_name) and plugin_name not in GENERIC_CONFIGS:
+        if (not self.loader.has_plugin(plugin_name) and 
+                plugin_name not in GENERIC_CONFIGS):
             msg = 'configuration provided for unknown plugin "{}"'
             raise ConfigError(msg.format(plugin_name))
 

From d9458c876727f8704df09152620752ed09ac83f6 Mon Sep 17 00:00:00 2001
From: Sergei Trofimov <sergei.trofimov@arm.com>
Date: Tue, 7 Mar 2017 15:17:23 +0000
Subject: [PATCH 7/8] Integerated new target stuff into execution so far

---
 wa/framework/configuration/core.py         |   6 +-
 wa/framework/configuration/parsers.py      |   1 +
 wa/framework/configuration/plugin_cache.py |  85 +++++++++++------
 wa/framework/execution.py                  |  28 +-----
 wa/framework/output.py                     |  10 +-
 wa/framework/target/descriptor.py          |  39 ++++++++
 wa/framework/target/info.py                |   8 +-
 wa/framework/target/manager.py             | 103 +++++++--------------
 wa/framework/target/runtime_config.py      |  37 +++++---
 9 files changed, 170 insertions(+), 147 deletions(-)

diff --git a/wa/framework/configuration/core.py b/wa/framework/configuration/core.py
index c79df8b8..8d11ceb5 100644
--- a/wa/framework/configuration/core.py
+++ b/wa/framework/configuration/core.py
@@ -638,7 +638,8 @@ class RunConfiguration(Configuration):
 
     name = "Run Configuration"
 
-    # Metadata is separated out because it is not loaded into the auto generated config file
+    # Metadata is separated out because it is not loaded into the auto
+    # generated config file
     meta_data = [
         ConfigurationPoint('run_name', kind=str,
                            description='''
@@ -917,7 +918,8 @@ class JobSpec(Configuration):
         except NotFoundError:
             global_runtime_params = {}
         for source in plugin_cache.sources:
-            runtime_parameters[source] = global_runtime_params[source]
+            if source in global_runtime_params:
+                runtime_parameters[source] = global_runtime_params[source]
 
         # Add runtime parameters from JobSpec
         for source, values in self.to_merge['runtime_parameters'].iteritems():
diff --git a/wa/framework/configuration/parsers.py b/wa/framework/configuration/parsers.py
index df6d019e..70f50857 100644
--- a/wa/framework/configuration/parsers.py
+++ b/wa/framework/configuration/parsers.py
@@ -32,6 +32,7 @@ class ConfigParser(object):
 
     def load(self, state, raw, source, wrap_exceptions=True):  # pylint: disable=too-many-branches
         try:
+            state.plugin_cache.add_source(source)
             if 'run_name' in raw:
                 msg = '"run_name" can only be specified in the config '\
                       'section of an agenda'
diff --git a/wa/framework/configuration/plugin_cache.py b/wa/framework/configuration/plugin_cache.py
index bfabb97c..d8a3f8e8 100644
--- a/wa/framework/configuration/plugin_cache.py
+++ b/wa/framework/configuration/plugin_cache.py
@@ -181,47 +181,74 @@ class PluginCache(object):
         :rtype: A fully merged and validated configuration in the form of a
                 obj_dict.
         """
-        generic_config = copy(self.plugin_configs[generic_name])
-        specific_config = copy(self.plugin_configs[specific_name])
-        cfg_points = self.get_plugin_parameters(specific_name)
+        ms = MergeState()
+        ms.generic_name = generic_name
+        ms.specific_name = specific_name
+        ms.generic_config = copy(self.plugin_configs[generic_name])
+        ms.specific_config = copy(self.plugin_configs[specific_name])
+        ms.cfg_points = self.get_plugin_parameters(specific_name)
         sources = self.sources
-        seen_specific_config = defaultdict(list)
 
         # set_value uses the 'name' attribute of the passed object in it error
         # messages, to ensure these messages make sense the name will have to be
         # changed several times during this function.
         final_config.name = specific_name
 
-        # pylint: disable=too-many-nested-blocks
         for source in sources:
             try:
-                if source in generic_config:
-                    final_config.name = generic_name
-                    for name, cfg_point in cfg_points.iteritems():
-                        if name in generic_config[source]:
-                            if name in seen_specific_config:
-                                msg = ('"{generic_name}" configuration "{config_name}" has already been '
-                                       'specified more specifically for {specific_name} in:\n\t\t{sources}')
-                                msg = msg.format(generic_name=generic_name,
-                                                 config_name=name,
-                                                 specific_name=specific_name,
-                                                 sources=", ".join(seen_specific_config[name]))
-                                raise ConfigError(msg)
-                            value = generic_config[source][name]
-                            cfg_point.set_value(final_config, value, check_mandatory=False)
-
-                if source in specific_config:
-                    final_config.name = specific_name
-                    for name, cfg_point in cfg_points.iteritems():
-                        if name in specific_config[source]:
-                            seen_specific_config[name].append(str(source))
-                            value = specific_config[source][name]
-                            cfg_point.set_value(final_config, value, check_mandatory=False)
-
+                update_config_from_source(final_config, source, ms)
             except ConfigError as e:
                 raise ConfigError('Error in "{}":\n\t{}'.format(source, str(e)))
 
         # Validate final configuration
         final_config.name = specific_name
-        for cfg_point in cfg_points.itervalues():
+        for cfg_point in ms.cfg_points.itervalues():
             cfg_point.validate(final_config)
+
+
+class MergeState(object):
+
+    def __init__(self):
+        self.generic_name = None
+        self.specific_name = None
+        self.generic_config = None
+        self.specific_config = None
+        self.cfg_points = None
+        self.seen_specific_config = defaultdict(list)
+
+
+def update_config_from_source(final_config, source, state):
+    if source in state.generic_config:
+        final_config.name = state.generic_name
+        for name, cfg_point in state.cfg_points.iteritems():
+            if name in state.generic_config[source]:
+                if name in state.seen_specific_config:
+                    msg = ('"{generic_name}" configuration "{config_name}" has '
+                            'already been specified more specifically for '
+                            '{specific_name} in:\n\t\t{sources}')
+                    seen_sources = state.seen_specific_config[name]
+                    msg = msg.format(generic_name=generic_name,
+                                        config_name=name,
+                                        specific_name=specific_name,
+                                        sources=", ".join(seen_sources))
+                    raise ConfigError(msg)
+                value = state.generic_config[source].pop(name)
+                cfg_point.set_value(final_config, value, check_mandatory=False)
+
+        if state.generic_config[source]:
+            msg = 'Unexected values for {}: {}'
+            raise ConfigError(msg.format(state.generic_name,
+                                         state.generic_config[source]))
+
+    if source in state.specific_config:
+        final_config.name = state.specific_name
+        for name, cfg_point in state.cfg_points.iteritems():
+            if name in state.specific_config[source]:
+                seen_state.specific_config[name].append(str(source))
+                value = state.specific_config[source].pop(name)
+                cfg_point.set_value(final_config, value, check_mandatory=False)
+
+        if state.specific_config[source]:
+            msg = 'Unexected values for {}: {}'
+            raise ConfigError(msg.format(state.specific_name,
+                                         state.specific_config[source]))
diff --git a/wa/framework/execution.py b/wa/framework/execution.py
index a5c79714..699f6494 100644
--- a/wa/framework/execution.py
+++ b/wa/framework/execution.py
@@ -57,6 +57,7 @@ from wa.framework.exception import (WAError, ConfigError, TimeoutError,
 from wa.framework.plugin import Artifact
 from wa.framework.resource import ResourceResolver
 from wa.framework.target.info import TargetInfo
+from wa.framework.target.manager import TargetManager
 from wa.utils.misc import (ensure_directory_exists as _d, 
                            get_traceback, format_duration)
 from wa.utils.serializer import json
@@ -228,30 +229,6 @@ def _check_artifact_path(path, rootpath):
     return full_path
 
 
-class FakeTargetManager(object):
-    # TODO: this is a FAKE
-
-    def __init__(self, name, config):
-        self.device_name = name
-        self.device_config = config
-
-        from devlib import LocalLinuxTarget
-        self.target = LocalLinuxTarget({'unrooted': True})
-        
-    def get_target_info(self):
-        return TargetInfo(self.target)
-
-    def validate_runtime_parameters(self, params):
-        pass
-
-    def merge_runtime_parameters(self, params):
-        pass
-
-
-def init_target_manager(config):
-    return FakeTargetManager(config.device, config.device_config)
-
-
 class Executor(object):
     """
     The ``Executor``'s job is to set up the execution context and pass to a
@@ -297,7 +274,8 @@ class Executor(object):
         output.write_config(config)
 
         self.logger.info('Connecting to target')
-        target_manager = init_target_manager(config.run_config)
+        target_manager = TargetManager(config.run_config.device,
+                                       config.run_config.device_config)
         output.write_target_info(target_manager.get_target_info())
 
         self.logger.info('Initializing execution conetext')
diff --git a/wa/framework/output.py b/wa/framework/output.py
index 77d5853e..07912bb4 100644
--- a/wa/framework/output.py
+++ b/wa/framework/output.py
@@ -6,11 +6,11 @@ import sys
 import uuid
 from copy import copy
 
-from wlauto.core.configuration.configuration import JobSpec
-from wlauto.core.configuration.manager import ConfigManager
-from wlauto.core.device_manager import TargetInfo
-from wlauto.utils.misc import touch
-from wlauto.utils.serializer import write_pod, read_pod
+from wa.framework.configuration.core import JobSpec
+from wa.framework.configuration.manager import ConfigManager
+from wa.framework.target.info import TargetInfo
+from wa.utils.misc import touch
+from wa.utils.serializer import write_pod, read_pod
 
 
 logger = logging.getLogger('output')
diff --git a/wa/framework/target/descriptor.py b/wa/framework/target/descriptor.py
index 34966367..bcaba382 100644
--- a/wa/framework/target/descriptor.py
+++ b/wa/framework/target/descriptor.py
@@ -24,6 +24,33 @@ def get_target_descriptions(loader=pluginloader):
     return targets.values()
 
 
+def instantiate_target(tdesc, params):
+    target_params = {p.name: p for p in tdesc.target_params}
+    platform_params = {p.name: p for p in tdesc.platform_params}
+    conn_params = {p.name: p for p in tdesc.conn_params}
+
+    tp, pp, cp = {}, {}, {}
+
+    for name, value in params.iteritems():
+        if name in target_params:
+            tp[name] = value
+        elif name in platform_params:
+            pp[name] = value
+        elif name in conn_params:
+            cp[name] = value
+        else:
+            msg = 'Unexpected parameter for {}: {}'
+            raise ValueError(msg.format(tdesc.name, name))
+
+    tp['platform'] = (tdesc.platform or Platform)(**pp)
+    if cp:
+        tp['connection_settings'] = cp
+    if tdesc.connection:
+        tp['conn_cls'] = tdesc.connection
+
+    return tdesc.target(**tp)
+
+
 class TargetDescription(object):
 
     def __init__(self, name, source, description=None, target=None, platform=None, 
@@ -86,6 +113,18 @@ COMMON_TARGET_PARAMS = [
               Please see ``devlab`` documentation for information on the available
               modules.
               '''),
+    Parameter('load_default_modules', kind=bool, default=True,
+              description='''
+              A number of modules (e.g. for working with the cpufreq subsystem) are
+              loaded by default when a Target is instantiated. Setting this to
+              ``True`` would suppress that, ensuring that only the base Target
+              interface is initialized.
+
+              You may want to set this if there is a problem with one or more default
+              modules on your platform (e.g. your device is unrooted and cpufreq is
+              not accessible to unprivileged users), or if Target initialization is
+              taking too long for your platform.
+              '''),
 ]
 
 COMMON_PLATFORM_PARAMS = [
diff --git a/wa/framework/target/info.py b/wa/framework/target/info.py
index 4341e155..f3e40119 100644
--- a/wa/framework/target/info.py
+++ b/wa/framework/target/info.py
@@ -1,6 +1,7 @@
 from devlib import AndroidTarget
 from devlib.exception import TargetError
 from devlib.target import KernelConfig, KernelVersion, Cpuinfo
+from devlib.utils.android import AndroidProperties
 
 
 class TargetInfo(object):
@@ -21,8 +22,9 @@ class TargetInfo(object):
 
         if pod["target"] == "AndroidTarget":
             instance.screen_resolution = pod['screen_resolution']
-            instance.prop = pod['prop']
-            instance.prop = pod['android_id']
+            instance.prop = AndroidProperties('')
+            instance.prop._properties = pod['prop']
+            instance.android_id = pod['android_id']
 
         return instance
 
@@ -72,7 +74,7 @@ class TargetInfo(object):
 
         if self.target == "AndroidTarget":
             pod['screen_resolution'] = self.screen_resolution
-            pod['prop'] = self.prop
+            pod['prop'] = self.prop._properties
             pod['android_id'] = self.android_id
 
         return pod
diff --git a/wa/framework/target/manager.py b/wa/framework/target/manager.py
index 659516d6..4da43bd5 100644
--- a/wa/framework/target/manager.py
+++ b/wa/framework/target/manager.py
@@ -9,6 +9,8 @@ import sys
 from wa.framework import signal
 from wa.framework.exception import WorkerThreadError, ConfigError
 from wa.framework.plugin import Parameter
+from wa.framework.target.descriptor import (get_target_descriptions,
+                                            instantiate_target)
 from wa.framework.target.info import TargetInfo
 from wa.framework.target.runtime_config import (SysfileValuesRuntimeConfig,
                                                 HotplugRuntimeConfig,
@@ -41,54 +43,29 @@ class TargetManager(object):
                   """),
     ]
 
-    DEVICE_MAPPING = {'test' : {'platform_name':'generic',
-                               'target_name': 'android'},
-                      'other':  {'platform_name':'test',
-                                'target_name': 'linux'},
-                      }
-
     runtime_config_cls = [
-                            # order matters
-                            SysfileValuesRuntimeConfig,
-                            HotplugRuntimeConfig,
-                            CpufreqRuntimeConfig,
-                            CpuidleRuntimeConfig,
-                          ]
+        # order matters
+        SysfileValuesRuntimeConfig,
+        HotplugRuntimeConfig,
+        CpufreqRuntimeConfig,
+        CpuidleRuntimeConfig,
+    ]
 
     def __init__(self, name, parameters):
-        self.name = name
+        self.target_name = name
         self.target = None
         self.assistant = None
-        self.target_name = None
         self.platform_name = None
         self.parameters = parameters
         self.disconnect = parameters.get('disconnect')
         self.info = TargetInfo()
 
-        # Determine platform and target based on passed name
-        self._parse_name()
-        # Create target
-        self._get_target()
-        # Create an assistant to perform target specific configuration
-        self._get_assistant()
+        self._init_target()
+        self._init_assistant()
 
-        ### HERE FOR TESTING, WILL BE CALLED EXTERNALLY ###
-        # Connect to device and retrieve details.
-        # self.initialize()
-        # self.add_parameters()
-        # self.validate_parameters()
-        # self.set_parameters()
-
-    def initialize(self):
         self.runtime_configs = [cls(self.target) for cls in self.runtime_config_cls]
-        # if self.parameters:
-        # self.logger.info('Connecting to the device')
         with signal.wrap('TARGET_CONNECT'):
             self.target.connect()
-            # self.info.load(self.target)
-            # info_file = os.path.join(self.context.info_directory, 'target.json')
-            # with open(info_file, 'w') as wfh:
-            #     json.dump(self.info.to_pod(), wfh)
 
     def finalize(self):
         # self.logger.info('Disconnecting from the device')
@@ -108,10 +85,16 @@ class TargetManager(object):
                 if any(parameter in name for parameter in cfg.supported_parameters):
                     cfg.add(name, self.parameters.pop(name))
 
-    def validate_parameters(self):
+    def get_target_info(self):
+        return TargetInfo(self.target)
+
+    def validate_runtime_parameters(self, params):
         for cfg in self.runtime_configs:
             cfg.validate()
 
+    def merge_runtime_parameters(self, params):
+        pass
+
     def set_parameters(self):
         for cfg in self.runtime_configs:
             cfg.set()
@@ -120,47 +103,23 @@ class TargetManager(object):
         for cfg in self.runtime_configs:
             cfg.clear()
 
-    def _parse_name(self):
-        # Try and get platform and target
-        self.name = identifier(self.name.replace('-', '_'))
-        if '_' in self.name:
-            self.platform_name, self.target_name = self.name.split('_', 1)
-        elif self.name in self.DEVICE_MAPPING:
-            self.platform_name = self.DEVICE_MAPPING[self.name]['platform_name']
-            self.target_name = self.DEVICE_MAPPING[self.name]['target_name']
-        else:
-            raise ConfigError('Unknown Device Specified {}'.format(self.name))
+    def _init_target(self):
+        target_map = {td.name: td for td in get_target_descriptions()}
+        if self.target_name not in target_map:
+            raise ValueError('Unknown Target: {}'.format(self.target_name))
+        tdesc = target_map[self.target_name]
+        self.target = instantiate_target(tdesc, self.parameters)
+        self.target.setup()
 
-    def _get_target(self):
-        # Create a corresponding target and target-assistant
-        if self.target_name == 'android':
-            self.target = AndroidTarget()
-        elif self.target_name == 'linux':
-            self.target = LinuxTarget()  # pylint: disable=redefined-variable-type
-        elif self.target_name == 'localLinux':
-            self.target = LocalLinuxTarget()
-        else:
-            raise ConfigError('Unknown Target Specified {}'.format(self.target_name))
-
-    def _get_assistant(self):
-        # Create a corresponding target and target-assistant to help with platformy stuff?
-        if self.target_name == 'android':
+    def _init_assistant(self):
+        # Create a corresponding target and target-assistant to help with
+        # platformy stuff?
+        if self.target.os == 'android':
             self.assistant = AndroidAssistant(self.target)
-        elif self.target_name in ['linux', 'localLinux']:
+        elif self.target.os == 'linux':
             self.assistant = LinuxAssistant(self.target)  # pylint: disable=redefined-variable-type
         else:
-            raise ConfigError('Unknown Target Specified {}'.format(self.target_name))
-
-    # def validate_runtime_parameters(self, parameters):
-    #     for  name, value in parameters.iteritems():
-    #         self.add_parameter(name, value)
-    #     self.validate_parameters()
-
-    # def set_runtime_parameters(self, parameters):
-    #     # self.clear()
-    #     for  name, value in parameters.iteritems():
-    #         self.add_parameter(name, value)
-    #     self.set_parameters()
+            raise ValueError('Unknown Target OS: {}'.format(self.target.os))
 
 
 class LinuxAssistant(object):
diff --git a/wa/framework/target/runtime_config.py b/wa/framework/target/runtime_config.py
index c978165c..4c8188ee 100644
--- a/wa/framework/target/runtime_config.py
+++ b/wa/framework/target/runtime_config.py
@@ -15,8 +15,6 @@ class RuntimeConfig(Plugin):
     parameters = [
     ]
 
-# class RuntimeConfig(object):
-
     @property
     def supported_parameters(self):
         raise NotImplementedError()
@@ -25,8 +23,8 @@ class RuntimeConfig(Plugin):
     def core_names(self):
         return unique(self.target.core_names)
 
-    def __init__(self, target):
-        super(RuntimeConfig, self).__init__()
+    def __init__(self, target, **kwargs):
+        super(RuntimeConfig, self).__init__(**kwargs)
         self.target = target
 
     def initialize(self, context):
@@ -47,6 +45,9 @@ class RuntimeConfig(Plugin):
 
 class HotplugRuntimeConfig(RuntimeConfig):
 ##### NOTE: Currently if initialized with cores hotplugged, this will fail when trying to hotplug back in
+
+    name = 'rt-hotplug'
+
     @property
     def supported_parameters(self):
         params = ['cores']
@@ -93,6 +94,8 @@ class HotplugRuntimeConfig(RuntimeConfig):
 
 class SysfileValuesRuntimeConfig(RuntimeConfig):
 
+    name = 'rt-sysfiles'
+
     @property
     def supported_parameters(self):
         return ['sysfile_values']
@@ -132,6 +135,8 @@ class SysfileValuesRuntimeConfig(RuntimeConfig):
 
 class CpufreqRuntimeConfig(RuntimeConfig):
 
+    name = 'rt-cpufreq'
+
     @property
     def supported_parameters(self):
         params = ['frequency']
@@ -151,9 +156,14 @@ class CpufreqRuntimeConfig(RuntimeConfig):
         self.min_supported_freq = {}
         self.max_supported_freq = {}
 
-        for cpu in self.target.list_online_cpus():
-            self.supported_freqs[cpu] = self.target.cpufreq.list_frequencies(cpu) or []
-            self.supported_govenors[cpu] = self.target.cpufreq.list_governors(cpu) or []
+        if self.target.has('cpufreq'):
+            for cpu in self.target.list_online_cpus():
+                freqs = self.target.cpufreq.list_frequencies(cpu) or []
+                self.supported_freqs[cpu] = freqs
+                govs = self.target.cpufreq.list_governors(cpu) or []
+                self.supported_govenors[cpu] = govs
+        else:
+            self.logger.debug('Target does not support cpufreq')
 
     def add(self, name, value):
         if not self.target.has('cpufreq'):
@@ -319,6 +329,8 @@ class CpufreqRuntimeConfig(RuntimeConfig):
 
 class CpuidleRuntimeConfig(RuntimeConfig):
 
+    name = 'rt-cpuidle'
+
     @property
     def supported_parameters(self):
         params = ['idle_states']
@@ -330,12 +342,15 @@ class CpuidleRuntimeConfig(RuntimeConfig):
         self.aliases = ['ENABLE_ALL', 'DISABLE_ALL']
         self.available_states = {}
 
-        for cpu in self.target.list_online_cpus():
-            self.available_states[cpu] = self.target.cpuidle.get_states(cpu) or []
+        if self.target.has('cpuidle'):
+            for cpu in self.target.list_online_cpus():
+                self.available_states[cpu] = self.target.cpuidle.get_states(cpu) or []
+        else:
+            self.logger.debug('Target does not support cpuidle.')
 
     def add(self, name, values):
-        if not self.target.has('cpufreq'):
-            raise TargetError('Target does not support cpufreq.')
+        if not self.target.has('cpuidle'):
+            raise TargetError('Target does not support cpuidle.')
 
         prefix, _ = split_parameter_name(name, self.supported_parameters)
         cpus = uniqueDomainCpusFromPrefix(prefix, self.target)

From 3885a93b60bdb8de98a576fd93d32fcce06a12b9 Mon Sep 17 00:00:00 2001
From: Sergei Trofimov <sergei.trofimov@arm.com>
Date: Tue, 7 Mar 2017 15:29:49 +0000
Subject: [PATCH 8/8] Fixed multiple connects

Target was implicitly connecting on instantiation, and then explicitily
re-connected by the TargetManger.
---
 wa/framework/target/descriptor.py | 4 +++-
 wa/framework/target/manager.py    | 7 +++----
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/wa/framework/target/descriptor.py b/wa/framework/target/descriptor.py
index bcaba382..c1252289 100644
--- a/wa/framework/target/descriptor.py
+++ b/wa/framework/target/descriptor.py
@@ -24,7 +24,7 @@ def get_target_descriptions(loader=pluginloader):
     return targets.values()
 
 
-def instantiate_target(tdesc, params):
+def instantiate_target(tdesc, params, connect=None):
     target_params = {p.name: p for p in tdesc.target_params}
     platform_params = {p.name: p for p in tdesc.platform_params}
     conn_params = {p.name: p for p in tdesc.conn_params}
@@ -47,6 +47,8 @@ def instantiate_target(tdesc, params):
         tp['connection_settings'] = cp
     if tdesc.connection:
         tp['conn_cls'] = tdesc.connection
+    if connect is not None:
+        tp['connect'] = connect
 
     return tdesc.target(**tp)
 
diff --git a/wa/framework/target/manager.py b/wa/framework/target/manager.py
index 4da43bd5..545178e6 100644
--- a/wa/framework/target/manager.py
+++ b/wa/framework/target/manager.py
@@ -62,10 +62,7 @@ class TargetManager(object):
 
         self._init_target()
         self._init_assistant()
-
         self.runtime_configs = [cls(self.target) for cls in self.runtime_config_cls]
-        with signal.wrap('TARGET_CONNECT'):
-            self.target.connect()
 
     def finalize(self):
         # self.logger.info('Disconnecting from the device')
@@ -108,7 +105,9 @@ class TargetManager(object):
         if self.target_name not in target_map:
             raise ValueError('Unknown Target: {}'.format(self.target_name))
         tdesc = target_map[self.target_name]
-        self.target = instantiate_target(tdesc, self.parameters)
+        self.target = instantiate_target(tdesc, self.parameters, connect=False)
+        with signal.wrap('TARGET_CONNECT'):
+            self.target.connect()
         self.target.setup()
 
     def _init_assistant(self):