diff --git a/wlauto/instrumentation/fps/__init__.py b/wlauto/instrumentation/fps/__init__.py
index 794764fb..6316eadd 100755
--- a/wlauto/instrumentation/fps/__init__.py
+++ b/wlauto/instrumentation/fps/__init__.py
@@ -111,6 +111,18 @@ class FpsInstrument(Instrument):
                   a content crash. E.g. a value of ``0.75`` means the number of actual frames counted is a
                   quarter lower than expected, it will treated as a content crash.
                   """),
+        Parameter('dumpsys_period', kind=float, default=2, constraint=lambda x: x > 0,
+                  description="""
+                  Specifies the time period between calls to ``dumpsys SurfaceFlinger --latency`` in
+                  seconds when collecting frame data. Using a lower value improves the granularity
+                  of timings when recording actions that take a short time to complete. Note, this
+                  will produce duplicate frame data in the raw dumpsys output, however, this is
+                  filtered out in frames.csv. It may also affect the overall load on the system.
+
+                  The default value of 2 seconds corresponds with the NUM_FRAME_RECORDS in
+                  android/services/surfaceflinger/FrameTracker.h (as of the time of writing
+                  currently 128) and a frame rate of 60 fps that is applicable to most devices.
+                  """),
     ]
 
     clear_command = 'dumpsys SurfaceFlinger --latency-clear '
@@ -135,7 +147,8 @@ class FpsInstrument(Instrument):
         if hasattr(workload, 'view'):
             self.fps_outfile = os.path.join(context.output_directory, 'fps.csv')
             self.outfile = os.path.join(context.output_directory, 'frames.csv')
-            self.collector = LatencyCollector(self.outfile, self.device, workload.view or '', self.keep_raw, self.logger)
+            self.collector = LatencyCollector(self.outfile, self.device, workload.view or '',
+                                              self.keep_raw, self.logger, self.dumpsys_period)
             self.device.execute(self.clear_command)
         else:
             self.logger.debug('Workload does not contain a view; disabling...')
@@ -153,21 +166,22 @@ class FpsInstrument(Instrument):
 
     def update_result(self, context):
         if self.is_enabled:
+            fps, frame_count, janks, not_at_vsync = float('nan'), 0, 0, 0
             data = pd.read_csv(self.outfile)
             if not data.empty:  # pylint: disable=maybe-no-member
                 fp = FpsProcessor(data)
                 per_frame_fps, metrics = fp.process(self.collector.refresh_period, self.drop_threshold)
                 fps, frame_count, janks, not_at_vsync = metrics
 
-                context.result.add_metric('FPS', fps)
-                context.result.add_metric('frame_count', frame_count)
-                context.result.add_metric('janks', janks)
-                context.result.add_metric('not_at_vsync', not_at_vsync)
-
                 if self.generate_csv:
                     per_frame_fps.to_csv(self.fps_outfile, index=False, header=True)
                     context.add_artifact('fps', path='fps.csv', kind='data')
 
+            context.result.add_metric('FPS', fps)
+            context.result.add_metric('frame_count', frame_count)
+            context.result.add_metric('janks', janks)
+            context.result.add_metric('not_at_vsync', not_at_vsync)
+
     def slow_update_result(self, context):
         result = context.result
         if self.crash_check and result.has_metric('execution_time'):
@@ -192,16 +206,17 @@ class LatencyCollector(threading.Thread):
     #       At the time of writing, this was hard-coded to 128. So at 60 fps
     #       (and there is no reason to go above that, as it matches vsync rate
     #       on pretty much all phones), there is just over 2 seconds' worth of
-    #       frames in there. Hence the sleep time of 2 seconds between dumps.
+    #       frames in there. Hence the default sleep time of 2 seconds between dumps.
     #command_template = 'while (true); do dumpsys SurfaceFlinger --latency {}; sleep 2; done'
     command_template = 'dumpsys SurfaceFlinger --latency {}'
 
-    def __init__(self, outfile, device, activities, keep_raw, logger):
+    def __init__(self, outfile, device, activities, keep_raw, logger, dumpsys_period):
         super(LatencyCollector, self).__init__()
         self.outfile = outfile
         self.device = device
         self.keep_raw = keep_raw
         self.logger = logger
+        self.dumpsys_period = dumpsys_period
         self.stop_signal = threading.Event()
         self.frames = []
         self.last_ready_time = 0
@@ -226,7 +241,7 @@ class LatencyCollector(threading.Thread):
                     for activity in self.activities:
                         if activity in view_list:
                             wfh.write(self.device.execute(self.command_template.format(activity)))
-                    time.sleep(2)
+                    time.sleep(self.dumpsys_period)
             finally:
                 wfh.close()
             # TODO: this can happen after the run during results processing
diff --git a/wlauto/result_processors/uxperf.py b/wlauto/result_processors/uxperf.py
index 07e5a683..f303df30 100644
--- a/wlauto/result_processors/uxperf.py
+++ b/wlauto/result_processors/uxperf.py
@@ -15,6 +15,7 @@
 
 import os
 import re
+import logging
 
 from collections import defaultdict
 from distutils.version import LooseVersion
@@ -86,12 +87,15 @@ class UxPerfResultProcessor(ResultProcessor):
         logfile = os.path.join(context.output_directory, 'logcat.log')
         framelog = os.path.join(context.output_directory, 'frames.csv')
 
+        self.logger.debug('Parsing logcat.log for UX_PERF markers')
         parser.parse(logfile)
 
         if self.add_timings:
+            self.logger.debug('Adding per-action timings')
             parser.add_action_timings()
 
         if self.add_frames:
+            self.logger.debug('Adding per-action frame metrics')
             parser.add_action_frames(framelog, self.drop_threshold, self.generate_csv)
 
 
@@ -116,6 +120,7 @@ class UxPerfParser(object):
     def __init__(self, context):
         self.context = context
         self.actions = defaultdict(list)
+        self.logger = logging.getLogger('UxPerfParser')
 
         # regex for matching logcat message format:
         # date time PID-TID/package priority/tag: message
@@ -146,16 +151,25 @@ class UxPerfParser(object):
         refresh_period = self._parse_refresh_peroid()
 
         for action in self.actions:
+            # default values
+            fps = float('nan')
+            frame_count, janks, not_at_vsync = 0, 0, 0
+            metrics = fps, frame_count, janks, not_at_vsync
+
             df = self._create_data_dict(action, frames)
             fp = FpsProcessor(pd.DataFrame(df), action=action)
-            per_frame_fps, metrics = fp.process(refresh_period, drop_threshold)
+            try:
+                per_frame_fps, metrics = fp.process(refresh_period, drop_threshold)
 
-            if generate_csv:
-                name = action + '_fps'
-                filename = name + '.csv'
-                fps_outfile = os.path.join(self.context.output_directory, filename)
-                per_frame_fps.to_csv(fps_outfile, index=False, header=True)
-                self.context.add_artifact(name, path=filename, kind='data')
+                if generate_csv:
+                    name = action + '_fps'
+                    filename = name + '.csv'
+                    fps_outfile = os.path.join(self.context.output_directory, filename)
+                    per_frame_fps.to_csv(fps_outfile, index=False, header=True)
+                    self.context.add_artifact(name, path=filename, kind='data')
+            except AttributeError:
+                self.logger.warning('Non-matched timestamps in dumpsys output: action={}'
+                                    .format(action))
 
             fps, frame_count, janks, not_at_vsync = metrics
             result = self.context.result
@@ -232,14 +246,16 @@ class UxPerfParser(object):
 
         return d
 
-    @staticmethod
-    def _read(log):
+    def _read(self, log):
         '''
         Opens a file a yields the lines with whitespace stripped.
         '''
-        with open(log, 'r') as rfh:
-            for line in rfh:
-                yield line.strip()
+        try:
+            with open(log, 'r') as rfh:
+                for line in rfh:
+                    yield line.strip()
+        except IOError:
+            self.logger.error('Could not open {}'.format(log))
 
     @staticmethod
     def _matched_rows(rows, timestamps):
diff --git a/wlauto/utils/fps.py b/wlauto/utils/fps.py
index 671f7871..0b3c2d97 100644
--- a/wlauto/utils/fps.py
+++ b/wlauto/utils/fps.py
@@ -16,10 +16,10 @@
 
 class FpsProcessor(object):
     """
-    Provide common object for processing surfaceFlinger output for frame
+    Provides common object for processing surfaceFlinger output for frame
     statistics.
 
-    This processor adds four metrics to the results:
+    This processor returns the four frame statistics below:
 
         :FPS: Frames Per Second. This is the frame rate of the workload.
         :frames: The total number of frames rendered during the execution of
@@ -72,11 +72,12 @@ class FpsProcessor(object):
             total_vsyncs = filtered_vsyncs_to_compose.sum()
             frame_count = filtered_vsyncs_to_compose.size
 
-        if total_vsyncs:
-            fps = 1e9 * frame_count / (vsync_interval * total_vsyncs)
+            if total_vsyncs:
+                fps = 1e9 * frame_count / (vsync_interval * total_vsyncs)
+
+            janks = self._calc_janks(filtered_vsyncs_to_compose)
+            not_at_vsync = self._calc_not_at_vsync(vsyncs_to_compose)
 
-        janks = self._calc_janks(filtered_vsyncs_to_compose)
-        not_at_vsync = self._calc_not_at_vsync(vsyncs_to_compose)
         metrics = (fps, frame_count, janks, not_at_vsync)
 
         return per_frame_fps, metrics