mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-01-18 12:06:08 +00:00
pep8: Ignore line break before binary operator
PEP8 has switched its guidance [1] for where a line break should occur in relation to a binary operator, so don't raise this warning for new code and update the code base to follow the new style. [1] https://www.python.org/dev/peps/pep-0008/#should-a-line-break-before-or-after-a-binary-operator
This commit is contained in:
parent
fbb84eca72
commit
aa4df95a69
@ -6,7 +6,7 @@ DEFAULT_DIRS=(
|
|||||||
|
|
||||||
EXCLUDE=wa/tests,wa/framework/target/descriptor.py
|
EXCLUDE=wa/tests,wa/framework/target/descriptor.py
|
||||||
EXCLUDE_COMMA=
|
EXCLUDE_COMMA=
|
||||||
IGNORE=E501,E265,E266,W391,E401,E402,E731,W504,W605,F401
|
IGNORE=E501,E265,E266,W391,E401,E402,E731,W503,W605,F401
|
||||||
|
|
||||||
if ! hash flake8 2>/dev/null; then
|
if ! hash flake8 2>/dev/null; then
|
||||||
echo "flake8 not found in PATH"
|
echo "flake8 not found in PATH"
|
||||||
|
@ -106,8 +106,8 @@ class CreateDatabaseSubcommand(SubCommand):
|
|||||||
def execute(self, state, args): # pylint: disable=too-many-branches
|
def execute(self, state, args): # pylint: disable=too-many-branches
|
||||||
if not psycopg2:
|
if not psycopg2:
|
||||||
raise CommandError(
|
raise CommandError(
|
||||||
'The module psycopg2 is required for the wa ' +
|
'The module psycopg2 is required for the wa '
|
||||||
'create database command.')
|
+ 'create database command.')
|
||||||
|
|
||||||
if args.dbname == 'postgres':
|
if args.dbname == 'postgres':
|
||||||
raise ValueError('Databasename to create cannot be postgres.')
|
raise ValueError('Databasename to create cannot be postgres.')
|
||||||
@ -131,8 +131,8 @@ class CreateDatabaseSubcommand(SubCommand):
|
|||||||
config = yaml.load(config_file)
|
config = yaml.load(config_file)
|
||||||
if 'postgres' in config and not args.force_update_config:
|
if 'postgres' in config and not args.force_update_config:
|
||||||
raise CommandError(
|
raise CommandError(
|
||||||
"The entry 'postgres' already exists in the config file. " +
|
"The entry 'postgres' already exists in the config file. "
|
||||||
"Please specify the -F flag to force an update.")
|
+ "Please specify the -F flag to force an update.")
|
||||||
|
|
||||||
possible_connection_errors = [
|
possible_connection_errors = [
|
||||||
(
|
(
|
||||||
@ -261,8 +261,8 @@ class CreateDatabaseSubcommand(SubCommand):
|
|||||||
else:
|
else:
|
||||||
if not self.force:
|
if not self.force:
|
||||||
raise CommandError(
|
raise CommandError(
|
||||||
"Database {} already exists. ".format(self.dbname) +
|
"Database {} already exists. ".format(self.dbname)
|
||||||
"Please specify the -f flag to create it from afresh."
|
+ "Please specify the -f flag to create it from afresh."
|
||||||
)
|
)
|
||||||
|
|
||||||
def _create_database_postgres(self):
|
def _create_database_postgres(self):
|
||||||
@ -400,14 +400,14 @@ class CreateWorkloadSubcommand(SubCommand):
|
|||||||
self.parser.add_argument('name', metavar='NAME',
|
self.parser.add_argument('name', metavar='NAME',
|
||||||
help='Name of the workload to be created')
|
help='Name of the workload to be created')
|
||||||
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
|
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
|
||||||
help='The location at which the workload will be created. If not specified, ' +
|
help='The location at which the workload will be created. If not specified, '
|
||||||
'this defaults to "~/.workload_automation/plugins".')
|
+ 'this defaults to "~/.workload_automation/plugins".')
|
||||||
self.parser.add_argument('-f', '--force', action='store_true',
|
self.parser.add_argument('-f', '--force', action='store_true',
|
||||||
help='Create the new workload even if a workload with the specified ' +
|
help='Create the new workload even if a workload with the specified '
|
||||||
'name already exists.')
|
+ 'name already exists.')
|
||||||
self.parser.add_argument('-k', '--kind', metavar='KIND', default='basic', choices=list(create_funcs.keys()),
|
self.parser.add_argument('-k', '--kind', metavar='KIND', default='basic', choices=list(create_funcs.keys()),
|
||||||
help='The type of workload to be created. The available options ' +
|
help='The type of workload to be created. The available options '
|
||||||
'are: {}'.format(', '.join(list(create_funcs.keys()))))
|
+ 'are: {}'.format(', '.join(list(create_funcs.keys()))))
|
||||||
|
|
||||||
def execute(self, state, args): # pylint: disable=R0201
|
def execute(self, state, args): # pylint: disable=R0201
|
||||||
where = args.path or 'local'
|
where = args.path or 'local'
|
||||||
@ -430,8 +430,8 @@ class CreatePackageSubcommand(SubCommand):
|
|||||||
self.parser.add_argument('name', metavar='NAME',
|
self.parser.add_argument('name', metavar='NAME',
|
||||||
help='Name of the package to be created')
|
help='Name of the package to be created')
|
||||||
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
|
self.parser.add_argument('-p', '--path', metavar='PATH', default=None,
|
||||||
help='The location at which the new package will be created. If not specified, ' +
|
help='The location at which the new package will be created. If not specified, '
|
||||||
'current working directory will be used.')
|
+ 'current working directory will be used.')
|
||||||
self.parser.add_argument('-f', '--force', action='store_true',
|
self.parser.add_argument('-f', '--force', action='store_true',
|
||||||
help='Create the new package even if a file or directory with the same name '
|
help='Create the new package even if a file or directory with the same name '
|
||||||
'already exists at the specified location.')
|
'already exists at the specified location.')
|
||||||
|
@ -96,8 +96,8 @@ class RecordCommand(Command):
|
|||||||
if args.workload and args.output:
|
if args.workload and args.output:
|
||||||
self.logger.error("Output file cannot be specified with Workload")
|
self.logger.error("Output file cannot be specified with Workload")
|
||||||
sys.exit()
|
sys.exit()
|
||||||
if not args.workload and (args.setup or args.extract_results or
|
if not args.workload and (args.setup or args.extract_results
|
||||||
args.teardown or args.all):
|
or args.teardown or args.all):
|
||||||
self.logger.error("Cannot specify a recording stage without a Workload")
|
self.logger.error("Cannot specify a recording stage without a Workload")
|
||||||
sys.exit()
|
sys.exit()
|
||||||
if args.workload and not any([args.all, args.teardown, args.extract_results, args.run, args.setup]):
|
if args.workload and not any([args.all, args.teardown, args.extract_results, args.run, args.setup]):
|
||||||
|
@ -84,9 +84,9 @@ class PluginCache(object):
|
|||||||
'defined in a config file, move the entry content into the top level'
|
'defined in a config file, move the entry content into the top level'
|
||||||
raise ConfigError(msg.format((plugin_name)))
|
raise ConfigError(msg.format((plugin_name)))
|
||||||
|
|
||||||
if (not self.loader.has_plugin(plugin_name) and
|
if (not self.loader.has_plugin(plugin_name)
|
||||||
plugin_name not in self.targets and
|
and plugin_name not in self.targets
|
||||||
plugin_name not in GENERIC_CONFIGS):
|
and plugin_name not in GENERIC_CONFIGS):
|
||||||
msg = 'configuration provided for unknown plugin "{}"'
|
msg = 'configuration provided for unknown plugin "{}"'
|
||||||
raise ConfigError(msg.format(plugin_name))
|
raise ConfigError(msg.format(plugin_name))
|
||||||
|
|
||||||
@ -95,8 +95,8 @@ class PluginCache(object):
|
|||||||
raise ConfigError(msg.format(plugin_name, repr(values), type(values)))
|
raise ConfigError(msg.format(plugin_name, repr(values), type(values)))
|
||||||
|
|
||||||
for name, value in values.items():
|
for name, value in values.items():
|
||||||
if (plugin_name not in GENERIC_CONFIGS and
|
if (plugin_name not in GENERIC_CONFIGS
|
||||||
name not in self.get_plugin_parameters(plugin_name)):
|
and name not in self.get_plugin_parameters(plugin_name)):
|
||||||
msg = "'{}' is not a valid parameter for '{}'"
|
msg = "'{}' is not a valid parameter for '{}'"
|
||||||
raise ConfigError(msg.format(name, plugin_name))
|
raise ConfigError(msg.format(name, plugin_name))
|
||||||
|
|
||||||
|
@ -128,8 +128,8 @@ class ExecutionContext(object):
|
|||||||
self.run_state.status = status
|
self.run_state.status = status
|
||||||
self.run_output.status = status
|
self.run_output.status = status
|
||||||
self.run_output.info.end_time = datetime.utcnow()
|
self.run_output.info.end_time = datetime.utcnow()
|
||||||
self.run_output.info.duration = (self.run_output.info.end_time -
|
self.run_output.info.duration = (self.run_output.info.end_time
|
||||||
self.run_output.info.start_time)
|
- self.run_output.info.start_time)
|
||||||
self.write_output()
|
self.write_output()
|
||||||
|
|
||||||
def finalize(self):
|
def finalize(self):
|
||||||
|
@ -268,8 +268,8 @@ class RunOutput(Output, RunOutputCommon):
|
|||||||
self._combined_config = None
|
self._combined_config = None
|
||||||
self.jobs = []
|
self.jobs = []
|
||||||
self.job_specs = []
|
self.job_specs = []
|
||||||
if (not os.path.isfile(self.statefile) or
|
if (not os.path.isfile(self.statefile)
|
||||||
not os.path.isfile(self.infofile)):
|
or not os.path.isfile(self.infofile)):
|
||||||
msg = '"{}" does not exist or is not a valid WA output directory.'
|
msg = '"{}" does not exist or is not a valid WA output directory.'
|
||||||
raise ValueError(msg.format(self.basepath))
|
raise ValueError(msg.format(self.basepath))
|
||||||
self.reload()
|
self.reload()
|
||||||
|
@ -166,8 +166,8 @@ class AndroidAssistant(object):
|
|||||||
else:
|
else:
|
||||||
parser = LogcatParser()
|
parser = LogcatParser()
|
||||||
for event in parser.parse(outfile):
|
for event in parser.parse(outfile):
|
||||||
if (event.tag == self._logcat_marker_tag and
|
if (event.tag == self._logcat_marker_tag
|
||||||
event.message == self._logcat_marker_msg):
|
and event.message == self._logcat_marker_msg):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
@ -275,8 +275,8 @@ class LogcatPoller(threading.Thread):
|
|||||||
counter = self._start_marker
|
counter = self._start_marker
|
||||||
for event in parser.parse(outfile):
|
for event in parser.parse(outfile):
|
||||||
message = self._logcat_marker_msg.split(':')[0]
|
message = self._logcat_marker_msg.split(':')[0]
|
||||||
if not (event.tag == self._logcat_marker_tag and
|
if not (event.tag == self._logcat_marker_tag
|
||||||
event.message.split(':')[0] == message):
|
and event.message.split(':')[0] == message):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
number = int(event.message.split(':')[1])
|
number = int(event.message.split(':')[1])
|
||||||
|
@ -201,16 +201,16 @@ class DelayInstrument(Instrument):
|
|||||||
reading = self.target.read_int(self.temperature_file)
|
reading = self.target.read_int(self.temperature_file)
|
||||||
|
|
||||||
def validate(self):
|
def validate(self):
|
||||||
if (self.temperature_between_specs is not None and
|
if (self.temperature_between_specs is not None
|
||||||
self.fixed_between_specs is not None):
|
and self.fixed_between_specs is not None):
|
||||||
raise ConfigError('Both fixed delay and thermal threshold specified for specs.')
|
raise ConfigError('Both fixed delay and thermal threshold specified for specs.')
|
||||||
|
|
||||||
if (self.temperature_between_jobs is not None and
|
if (self.temperature_between_jobs is not None
|
||||||
self.fixed_between_jobs is not None):
|
and self.fixed_between_jobs is not None):
|
||||||
raise ConfigError('Both fixed delay and thermal threshold specified for jobs.')
|
raise ConfigError('Both fixed delay and thermal threshold specified for jobs.')
|
||||||
|
|
||||||
if (self.temperature_before_start is not None and
|
if (self.temperature_before_start is not None
|
||||||
self.fixed_before_start is not None):
|
and self.fixed_before_start is not None):
|
||||||
raise ConfigError('Both fixed delay and thermal threshold specified before start.')
|
raise ConfigError('Both fixed delay and thermal threshold specified before start.')
|
||||||
|
|
||||||
if not any([self.temperature_between_specs, self.fixed_between_specs,
|
if not any([self.temperature_between_specs, self.fixed_between_specs,
|
||||||
|
@ -169,9 +169,9 @@ class SysfsExtractor(Instrument):
|
|||||||
for paths in self.device_and_host_paths:
|
for paths in self.device_and_host_paths:
|
||||||
after_dir = paths[self.AFTER_PATH]
|
after_dir = paths[self.AFTER_PATH]
|
||||||
dev_dir = paths[self.DEVICE_PATH].strip('*') # remove potential trailing '*'
|
dev_dir = paths[self.DEVICE_PATH].strip('*') # remove potential trailing '*'
|
||||||
if (not os.listdir(after_dir) and
|
if (not os.listdir(after_dir)
|
||||||
self.target.file_exists(dev_dir) and
|
and self.target.file_exists(dev_dir)
|
||||||
self.target.list_directory(dev_dir)):
|
and self.target.list_directory(dev_dir)):
|
||||||
self.logger.error('sysfs files were not pulled from the device.')
|
self.logger.error('sysfs files were not pulled from the device.')
|
||||||
self.device_and_host_paths.remove(paths) # Path is removed to skip diffing it
|
self.device_and_host_paths.remove(paths) # Path is removed to skip diffing it
|
||||||
for dev_dir, before_dir, after_dir, diff_dir in self.device_and_host_paths:
|
for dev_dir, before_dir, after_dir, diff_dir in self.device_and_host_paths:
|
||||||
|
@ -134,8 +134,8 @@ class CpuStatesProcessor(OutputProcessor):
|
|||||||
parallel_rows.append([job_id, workload, iteration] + record)
|
parallel_rows.append([job_id, workload, iteration] + record)
|
||||||
for state in sorted(powerstate_report.state_stats):
|
for state in sorted(powerstate_report.state_stats):
|
||||||
stats = powerstate_report.state_stats[state]
|
stats = powerstate_report.state_stats[state]
|
||||||
powerstate_rows.append([job_id, workload, iteration, state] +
|
powerstate_rows.append([job_id, workload, iteration, state]
|
||||||
['{:.3f}'.format(s if s is not None else 0)
|
+ ['{:.3f}'.format(s if s is not None else 0)
|
||||||
for s in stats])
|
for s in stats])
|
||||||
|
|
||||||
outpath = output.get_path('parallel-stats.csv')
|
outpath = output.get_path('parallel-stats.csv')
|
||||||
|
@ -90,8 +90,8 @@ class CsvReportProcessor(OutputProcessor):
|
|||||||
|
|
||||||
outfile = output.get_path('results.csv')
|
outfile = output.get_path('results.csv')
|
||||||
with csvwriter(outfile) as writer:
|
with csvwriter(outfile) as writer:
|
||||||
writer.writerow(['id', 'workload', 'iteration', 'metric', ] +
|
writer.writerow(['id', 'workload', 'iteration', 'metric', ]
|
||||||
extra_columns + ['value', 'units'])
|
+ extra_columns + ['value', 'units'])
|
||||||
|
|
||||||
for o in outputs:
|
for o in outputs:
|
||||||
if o.kind == 'job':
|
if o.kind == 'job':
|
||||||
@ -106,8 +106,8 @@ class CsvReportProcessor(OutputProcessor):
|
|||||||
'Output of kind "{}" unrecognised by csvproc'.format(o.kind))
|
'Output of kind "{}" unrecognised by csvproc'.format(o.kind))
|
||||||
|
|
||||||
for metric in o.result.metrics:
|
for metric in o.result.metrics:
|
||||||
row = (header + [metric.name] +
|
row = (header + [metric.name]
|
||||||
[str(metric.classifiers.get(c, ''))
|
+ [str(metric.classifiers.get(c, ''))
|
||||||
for c in extra_columns] +
|
for c in extra_columns]
|
||||||
[str(metric.value), metric.units or ''])
|
+ [str(metric.value), metric.units or ''])
|
||||||
writer.writerow(row)
|
writer.writerow(row)
|
||||||
|
@ -124,8 +124,8 @@ class PostgresqlResultProcessor(OutputProcessor):
|
|||||||
|
|
||||||
if not psycopg2:
|
if not psycopg2:
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
'The psycopg2 module is required for the ' +
|
'The psycopg2 module is required for the '
|
||||||
'Postgresql Output Processor: {}'.format(import_error_msg))
|
+ 'Postgresql Output Processor: {}'.format(import_error_msg))
|
||||||
# N.B. Typecasters are for postgres->python and adapters the opposite
|
# N.B. Typecasters are for postgres->python and adapters the opposite
|
||||||
self.connect_to_database()
|
self.connect_to_database()
|
||||||
|
|
||||||
@ -515,8 +515,8 @@ class PostgresqlResultProcessor(OutputProcessor):
|
|||||||
self.conn = connect(dsn=dsn)
|
self.conn = connect(dsn=dsn)
|
||||||
except Psycopg2Error as e:
|
except Psycopg2Error as e:
|
||||||
raise OutputProcessorError(
|
raise OutputProcessorError(
|
||||||
"Database error, if the database doesn't exist, " +
|
"Database error, if the database doesn't exist, "
|
||||||
"please use 'wa create database' to create the database: {}".format(e))
|
+ "please use 'wa create database' to create the database: {}".format(e))
|
||||||
self.cursor = self.conn.cursor()
|
self.cursor = self.conn.cursor()
|
||||||
self.verify_schema_versions()
|
self.verify_schema_versions()
|
||||||
|
|
||||||
|
@ -95,8 +95,8 @@ def diff_sysfs_dirs(before, after, result): # pylint: disable=R0914
|
|||||||
logger.debug('Token length mismatch in {} on line {}'.format(bfile, i))
|
logger.debug('Token length mismatch in {} on line {}'.format(bfile, i))
|
||||||
dfh.write('xxx ' + bline)
|
dfh.write('xxx ' + bline)
|
||||||
continue
|
continue
|
||||||
if ((len([c for c in bchunks if c.strip()]) == len([c for c in achunks if c.strip()]) == 2) and
|
if ((len([c for c in bchunks if c.strip()]) == len([c for c in achunks if c.strip()]) == 2)
|
||||||
(bchunks[0] == achunks[0])):
|
and (bchunks[0] == achunks[0])):
|
||||||
# if there are only two columns and the first column is the
|
# if there are only two columns and the first column is the
|
||||||
# same, assume it's a "header" column and do not diff it.
|
# same, assume it's a "header" column and do not diff it.
|
||||||
dchunks = [bchunks[0]] + [diff_tokens(b, a) for b, a in zip(bchunks[1:], achunks[1:])]
|
dchunks = [bchunks[0]] + [diff_tokens(b, a) for b, a in zip(bchunks[1:], achunks[1:])]
|
||||||
|
@ -79,7 +79,7 @@ def init(verbosity=logging.INFO, color=True, indent_with=4,
|
|||||||
root_logger.addHandler(_console_handler)
|
root_logger.addHandler(_console_handler)
|
||||||
|
|
||||||
buffer_capacity = int(os.getenv('WA_LOG_BUFFER_CAPACITY',
|
buffer_capacity = int(os.getenv('WA_LOG_BUFFER_CAPACITY',
|
||||||
str(DEFAULT_INIT_BUFFER_CAPACITY)))
|
str(DEFAULT_INIT_BUFFER_CAPACITY)))
|
||||||
_init_handler = InitHandler(buffer_capacity)
|
_init_handler = InitHandler(buffer_capacity)
|
||||||
_init_handler.setLevel(logging.DEBUG)
|
_init_handler.setLevel(logging.DEBUG)
|
||||||
root_logger.addHandler(_init_handler)
|
root_logger.addHandler(_init_handler)
|
||||||
|
@ -404,8 +404,8 @@ def istextfile(fileobj, blocksize=512):
|
|||||||
If more than 30% of the chars in the block are non-text, or there
|
If more than 30% of the chars in the block are non-text, or there
|
||||||
are NUL ('\x00') bytes in the block, assume this is a binary file.
|
are NUL ('\x00') bytes in the block, assume this is a binary file.
|
||||||
"""
|
"""
|
||||||
_text_characters = (b''.join(chr(i) for i in range(32, 127)) +
|
_text_characters = (b''.join(chr(i) for i in range(32, 127))
|
||||||
b'\n\r\t\f\b')
|
+ b'\n\r\t\f\b')
|
||||||
|
|
||||||
block = fileobj.read(blocksize)
|
block = fileobj.read(blocksize)
|
||||||
if b'\x00' in block:
|
if b'\x00' in block:
|
||||||
|
@ -155,8 +155,8 @@ class ReventRecording(object):
|
|||||||
else: # not streaming
|
else: # not streaming
|
||||||
if not self._events:
|
if not self._events:
|
||||||
self._duration = 0
|
self._duration = 0
|
||||||
self._duration = (self._events[-1].time -
|
self._duration = (self._events[-1].time
|
||||||
self._events[0].time).total_seconds()
|
- self._events[0].time).total_seconds()
|
||||||
return self._duration
|
return self._duration
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -343,7 +343,7 @@ def _read_pod(fh, fmt=None):
|
|||||||
fmt = os.path.splitext(fh.name)[1].lower().strip('.')
|
fmt = os.path.splitext(fh.name)[1].lower().strip('.')
|
||||||
if fmt == '':
|
if fmt == '':
|
||||||
# Special case of no given file extension
|
# Special case of no given file extension
|
||||||
message = ("Could not determine format " +
|
message = ("Could not determine format "
|
||||||
"from file extension for \"{}\". "
|
"from file extension for \"{}\". "
|
||||||
"Please specify it or modify the fmt parameter.")
|
"Please specify it or modify the fmt parameter.")
|
||||||
raise ValueError(message.format(getattr(fh, 'name', '<none>')))
|
raise ValueError(message.format(getattr(fh, 'name', '<none>')))
|
||||||
|
@ -88,6 +88,6 @@ class IdleWorkload(Workload):
|
|||||||
self.target.sleep(1)
|
self.target.sleep(1)
|
||||||
if self.screen_off and self.old_screen_state:
|
if self.screen_off and self.old_screen_state:
|
||||||
self.target.ensure_screen_is_on()
|
self.target.ensure_screen_is_on()
|
||||||
elif (self.target.os == 'android' and
|
elif (self.target.os == 'android'
|
||||||
not self.screen_off and not self.old_screen_state):
|
and not self.screen_off and not self.old_screen_state):
|
||||||
self.target.ensure_screen_is_off()
|
self.target.ensure_screen_is_off()
|
||||||
|
@ -259,8 +259,8 @@ class Speedometer(Workload):
|
|||||||
while not benchmark_complete:
|
while not benchmark_complete:
|
||||||
if self.target_file_was_created(local_storage):
|
if self.target_file_was_created(local_storage):
|
||||||
if (
|
if (
|
||||||
iterations % (find_period_s // sleep_period_s) == 0 or
|
iterations % (find_period_s // sleep_period_s) == 0
|
||||||
not local_storage_seen
|
or not local_storage_seen
|
||||||
):
|
):
|
||||||
# There's a chance we don't see the localstorage file immediately, and there's a
|
# There's a chance we don't see the localstorage file immediately, and there's a
|
||||||
# chance more of them could be created later, so check for those files every ~30
|
# chance more of them could be created later, so check for those files every ~30
|
||||||
|
Loading…
Reference in New Issue
Block a user