mirror of
https://github.com/ARM-software/workload-automation.git
synced 2025-09-01 19:02:31 +01:00
New target description + moving target stuff under "framework"
Changing the way target descriptions work from a static mapping to something that is dynamically generated and is extensible via plugins. Also moving core target implementation stuff under "framework".
This commit is contained in:
148
wa/utils/formatter.py
Normal file
148
wa/utils/formatter.py
Normal file
@@ -0,0 +1,148 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from wa.utils.terminalsize import get_terminal_size
|
||||
|
||||
|
||||
INDENTATION_FROM_TITLE = 4
|
||||
|
||||
|
||||
class TextFormatter(object):
|
||||
|
||||
"""
|
||||
This is a base class for text formatting. It mainly ask to implement two
|
||||
methods which are add_item and format_data. The formar will add new text to
|
||||
the formatter, whereas the latter will return a formatted text. The name
|
||||
attribute represents the name of the foramtter.
|
||||
"""
|
||||
|
||||
name = None
|
||||
data = None
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def add_item(self, new_data, item_title):
|
||||
"""
|
||||
Add new item to the text formatter.
|
||||
|
||||
:param new_data: The data to be added
|
||||
:param item_title: A title for the added data
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def format_data(self):
|
||||
"""
|
||||
It returns a formatted text
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class DescriptionListFormatter(TextFormatter):
|
||||
|
||||
name = 'description_list_formatter'
|
||||
data = None
|
||||
|
||||
def get_text_width(self):
|
||||
if not self._text_width:
|
||||
self._text_width, _ = get_terminal_size() # pylint: disable=unpacking-non-sequence
|
||||
return self._text_width
|
||||
|
||||
def set_text_width(self, value):
|
||||
self._text_width = value
|
||||
|
||||
text_width = property(get_text_width, set_text_width)
|
||||
|
||||
def __init__(self, title=None, width=None):
|
||||
super(DescriptionListFormatter, self).__init__()
|
||||
self.data_title = title
|
||||
self._text_width = width
|
||||
self.longest_word_length = 0
|
||||
self.data = []
|
||||
|
||||
def add_item(self, new_data, item_title):
|
||||
if len(item_title) > self.longest_word_length:
|
||||
self.longest_word_length = len(item_title)
|
||||
self.data[len(self.data):] = [(item_title, self._remove_newlines(new_data))]
|
||||
|
||||
def format_data(self):
|
||||
parag_indentation = self.longest_word_length + INDENTATION_FROM_TITLE
|
||||
string_formatter = '{}:<{}{} {}'.format('{', parag_indentation, '}', '{}')
|
||||
|
||||
formatted_data = ''
|
||||
if self.data_title:
|
||||
formatted_data += self.data_title
|
||||
|
||||
line_width = self.text_width - parag_indentation
|
||||
for title, paragraph in self.data:
|
||||
formatted_data += '\n'
|
||||
title_len = self.longest_word_length - len(title)
|
||||
title += ':'
|
||||
if title_len > 0:
|
||||
title = (' ' * title_len) + title
|
||||
|
||||
parag_lines = self._break_lines(paragraph, line_width).splitlines()
|
||||
if parag_lines:
|
||||
formatted_data += string_formatter.format(title, parag_lines[0])
|
||||
for line in parag_lines[1:]:
|
||||
formatted_data += '\n' + string_formatter.format('', line)
|
||||
else:
|
||||
formatted_data += title[:-1]
|
||||
|
||||
self.text_width = None
|
||||
return formatted_data
|
||||
|
||||
# Return text's paragraphs sperated in a list, such that each index in the
|
||||
# list is a single text paragraph with no new lines
|
||||
def _remove_newlines(self, new_data): # pylint: disable=R0201
|
||||
parag_list = ['']
|
||||
parag_num = 0
|
||||
prv_parag = None
|
||||
# For each paragraph sperated by a new line
|
||||
for paragraph in new_data.splitlines():
|
||||
if paragraph:
|
||||
parag_list[parag_num] += ' ' + paragraph
|
||||
# if the previous line is NOT empty, then add new empty index for
|
||||
# the next paragraph
|
||||
elif prv_parag:
|
||||
parag_num = 1
|
||||
parag_list.append('')
|
||||
prv_parag = paragraph
|
||||
|
||||
# sometimes, we end up with an empty string as the last item so we reomve it
|
||||
if not parag_list[-1]:
|
||||
return parag_list[:-1]
|
||||
return parag_list
|
||||
|
||||
def _break_lines(self, parag_list, line_width): # pylint: disable=R0201
|
||||
formatted_paragraphs = []
|
||||
for para in parag_list:
|
||||
words = para.split()
|
||||
if words:
|
||||
formatted_text = words.pop(0)
|
||||
current_width = len(formatted_text)
|
||||
# for each word in the paragraph, line width is an accumlation of
|
||||
# word length + 1 (1 is for the space after each word).
|
||||
for word in words:
|
||||
word = word.strip()
|
||||
if current_width + len(word) + 1 >= line_width:
|
||||
formatted_text += '\n' + word
|
||||
current_width = len(word)
|
||||
else:
|
||||
formatted_text += ' ' + word
|
||||
current_width += len(word) + 1
|
||||
formatted_paragraphs.append(formatted_text)
|
||||
return '\n\n'.join(formatted_paragraphs)
|
306
wa/utils/log.py
Normal file
306
wa/utils/log.py
Normal file
@@ -0,0 +1,306 @@
|
||||
# Copyright 2013-2015 ARM Limited
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
# pylint: disable=E1101
|
||||
import logging
|
||||
import string
|
||||
import threading
|
||||
import subprocess
|
||||
|
||||
import colorama
|
||||
|
||||
from wa.framework import signal
|
||||
from wa.framework.exception import WAError
|
||||
from wa.utils.misc import get_traceback
|
||||
|
||||
|
||||
COLOR_MAP = {
|
||||
logging.DEBUG: colorama.Fore.BLUE,
|
||||
logging.INFO: colorama.Fore.GREEN,
|
||||
logging.WARNING: colorama.Fore.YELLOW,
|
||||
logging.ERROR: colorama.Fore.RED,
|
||||
logging.CRITICAL: colorama.Style.BRIGHT + colorama.Fore.RED,
|
||||
}
|
||||
|
||||
RESET_COLOR = colorama.Style.RESET_ALL
|
||||
|
||||
_indent_level = 0
|
||||
_indent_width = 4
|
||||
_console_handler = None
|
||||
|
||||
|
||||
def init(verbosity=logging.INFO, color=True, indent_with=4,
|
||||
regular_fmt='%(levelname)-8s %(message)s',
|
||||
verbose_fmt='%(asctime)s %(levelname)-8s %(name)10.10s: %(message)s',
|
||||
debug=False):
|
||||
global _indent_width, _console_handler
|
||||
_indent_width = indent_with
|
||||
signal.log_error_func = lambda m: log_error(m, signal.logger)
|
||||
|
||||
root_logger = logging.getLogger()
|
||||
root_logger.setLevel(logging.DEBUG)
|
||||
|
||||
error_handler = ErrorSignalHandler(logging.DEBUG)
|
||||
root_logger.addHandler(error_handler)
|
||||
|
||||
_console_handler = logging.StreamHandler()
|
||||
if color:
|
||||
formatter = ColorFormatter
|
||||
else:
|
||||
formatter = LineFormatter
|
||||
if verbosity:
|
||||
_console_handler.setLevel(logging.DEBUG)
|
||||
_console_handler.setFormatter(formatter(verbose_fmt))
|
||||
else:
|
||||
_console_handler.setLevel(logging.INFO)
|
||||
_console_handler.setFormatter(formatter(regular_fmt))
|
||||
root_logger.addHandler(_console_handler)
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
if not debug:
|
||||
logging.raiseExceptions = False
|
||||
|
||||
|
||||
def set_level(level):
|
||||
_console_handler.setLevel(level)
|
||||
|
||||
|
||||
def add_file(filepath, level=logging.DEBUG,
|
||||
fmt='%(asctime)s %(levelname)-8s %(name)s: %(message)-10.10s'):
|
||||
root_logger = logging.getLogger()
|
||||
file_handler = logging.FileHandler(filepath)
|
||||
file_handler.setLevel(level)
|
||||
file_handler.setFormatter(LineFormatter(fmt))
|
||||
root_logger.addHandler(file_handler)
|
||||
|
||||
|
||||
def enable(logs):
|
||||
if isinstance(logs, list):
|
||||
for log in logs:
|
||||
__enable_logger(log)
|
||||
else:
|
||||
__enable_logger(logs)
|
||||
|
||||
|
||||
def disable(logs):
|
||||
if isinstance(logs, list):
|
||||
for log in logs:
|
||||
__disable_logger(log)
|
||||
else:
|
||||
__disable_logger(logs)
|
||||
|
||||
|
||||
def __enable_logger(logger):
|
||||
if isinstance(logger, basestring):
|
||||
logger = logging.getLogger(logger)
|
||||
logger.propagate = True
|
||||
|
||||
|
||||
def __disable_logger(logger):
|
||||
if isinstance(logger, basestring):
|
||||
logger = logging.getLogger(logger)
|
||||
logger.propagate = False
|
||||
|
||||
|
||||
def indent():
|
||||
global _indent_level
|
||||
_indent_level += 1
|
||||
|
||||
|
||||
def dedent():
|
||||
global _indent_level
|
||||
_indent_level -= 1
|
||||
|
||||
|
||||
def log_error(e, logger, critical=False):
|
||||
"""
|
||||
Log the specified Exception as an error. The Error message will be formatted
|
||||
differently depending on the nature of the exception.
|
||||
|
||||
:e: the error to log. should be an instance of ``Exception``
|
||||
:logger: logger to be used.
|
||||
:critical: if ``True``, this error will be logged at ``logging.CRITICAL``
|
||||
level, otherwise it will be logged as ``logging.ERROR``.
|
||||
|
||||
"""
|
||||
if critical:
|
||||
log_func = logger.critical
|
||||
else:
|
||||
log_func = logger.error
|
||||
|
||||
if isinstance(e, KeyboardInterrupt):
|
||||
log_func('Got CTRL-C. Aborting.')
|
||||
elif isinstance(e, WAError):
|
||||
log_func(e)
|
||||
elif isinstance(e, subprocess.CalledProcessError):
|
||||
tb = get_traceback()
|
||||
log_func(tb)
|
||||
command = e.cmd
|
||||
if e.args:
|
||||
command = '{} {}'.format(command, ' '.join(e.args))
|
||||
message = 'Command \'{}\' returned non-zero exit status {}\nOUTPUT:\n{}\n'
|
||||
log_func(message.format(command, e.returncode, e.output))
|
||||
elif isinstance(e, SyntaxError):
|
||||
tb = get_traceback()
|
||||
log_func(tb)
|
||||
message = 'Syntax Error in {}, line {}, offset {}:'
|
||||
log_func(message.format(e.filename, e.lineno, e.offset))
|
||||
log_func('\t{}'.format(e.msg))
|
||||
else:
|
||||
tb = get_traceback()
|
||||
log_func(tb)
|
||||
log_func('{}({})'.format(e.__class__.__name__, e))
|
||||
|
||||
|
||||
class ErrorSignalHandler(logging.Handler):
|
||||
"""
|
||||
Emits signals for ERROR and WARNING level traces.
|
||||
|
||||
"""
|
||||
|
||||
def emit(self, record):
|
||||
if record.levelno == logging.ERROR:
|
||||
signal.send(signal.ERROR_LOGGED, self)
|
||||
elif record.levelno == logging.WARNING:
|
||||
signal.send(signal.WARNING_LOGGED, self)
|
||||
|
||||
|
||||
class LineFormatter(logging.Formatter):
|
||||
"""
|
||||
Logs each line of the message separately.
|
||||
|
||||
"""
|
||||
|
||||
def format(self, record):
|
||||
record.message = record.getMessage()
|
||||
if self.usesTime():
|
||||
record.asctime = self.formatTime(record, self.datefmt)
|
||||
|
||||
indent = _indent_width * _indent_level
|
||||
d = record.__dict__
|
||||
parts = []
|
||||
for line in record.message.split('\n'):
|
||||
line = ' ' * indent + line
|
||||
d.update({'message': line.strip('\r')})
|
||||
parts.append(self._fmt % d)
|
||||
|
||||
return '\n'.join(parts)
|
||||
|
||||
|
||||
class ColorFormatter(LineFormatter):
|
||||
"""
|
||||
Formats logging records with color and prepends record info
|
||||
to each line of the message.
|
||||
|
||||
BLUE for DEBUG logging level
|
||||
GREEN for INFO logging level
|
||||
YELLOW for WARNING logging level
|
||||
RED for ERROR logging level
|
||||
BOLD RED for CRITICAL logging level
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, fmt=None, datefmt=None):
|
||||
super(ColorFormatter, self).__init__(fmt, datefmt)
|
||||
template_text = self._fmt.replace('%(message)s', RESET_COLOR + '%(message)s${color}')
|
||||
template_text = '${color}' + template_text + RESET_COLOR
|
||||
self.fmt_template = string.Template(template_text)
|
||||
|
||||
def format(self, record):
|
||||
self._set_color(COLOR_MAP[record.levelno])
|
||||
return super(ColorFormatter, self).format(record)
|
||||
|
||||
def _set_color(self, color):
|
||||
self._fmt = self.fmt_template.substitute(color=color)
|
||||
|
||||
|
||||
class BaseLogWriter(object):
|
||||
|
||||
def __init__(self, name, level=logging.DEBUG):
|
||||
"""
|
||||
File-like object class designed to be used for logging from streams
|
||||
Each complete line (terminated by new line character) gets logged
|
||||
at DEBUG level. In complete lines are buffered until the next new line.
|
||||
|
||||
:param name: The name of the logger that will be used.
|
||||
|
||||
"""
|
||||
self.logger = logging.getLogger(name)
|
||||
self.buffer = ''
|
||||
if level == logging.DEBUG:
|
||||
self.do_write = self.logger.debug
|
||||
elif level == logging.INFO:
|
||||
self.do_write = self.logger.info
|
||||
elif level == logging.WARNING:
|
||||
self.do_write = self.logger.warning
|
||||
elif level == logging.ERROR:
|
||||
self.do_write = self.logger.error
|
||||
else:
|
||||
raise Exception('Unknown logging level: {}'.format(level))
|
||||
|
||||
def flush(self):
|
||||
# Defined to match the interface expected by pexpect.
|
||||
return self
|
||||
|
||||
def close(self):
|
||||
if self.buffer:
|
||||
self.logger.debug(self.buffer)
|
||||
self.buffer = ''
|
||||
return self
|
||||
|
||||
def __del__(self):
|
||||
# Ensure we don't lose bufferd output
|
||||
self.close()
|
||||
|
||||
|
||||
class LogWriter(BaseLogWriter):
|
||||
|
||||
def write(self, data):
|
||||
data = data.replace('\r\n', '\n').replace('\r', '\n')
|
||||
if '\n' in data:
|
||||
parts = data.split('\n')
|
||||
parts[0] = self.buffer + parts[0]
|
||||
for part in parts[:-1]:
|
||||
self.do_write(part)
|
||||
self.buffer = parts[-1]
|
||||
else:
|
||||
self.buffer += data
|
||||
return self
|
||||
|
||||
|
||||
class LineLogWriter(BaseLogWriter):
|
||||
|
||||
def write(self, data):
|
||||
self.do_write(data)
|
||||
|
||||
|
||||
class StreamLogger(threading.Thread):
|
||||
"""
|
||||
Logs output from a stream in a thread.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, name, stream, level=logging.DEBUG, klass=LogWriter):
|
||||
super(StreamLogger, self).__init__()
|
||||
self.writer = klass(name, level)
|
||||
self.stream = stream
|
||||
self.daemon = True
|
||||
|
||||
def run(self):
|
||||
line = self.stream.readline()
|
||||
while line:
|
||||
self.writer.write(line.rstrip('\n'))
|
||||
line = self.stream.readline()
|
||||
self.writer.close()
|
544
wa/utils/misc.py
544
wa/utils/misc.py
@@ -24,7 +24,6 @@ import sys
|
||||
import re
|
||||
import math
|
||||
import imp
|
||||
import uuid
|
||||
import string
|
||||
import threading
|
||||
import signal
|
||||
@@ -33,154 +32,28 @@ import pkgutil
|
||||
import traceback
|
||||
import logging
|
||||
import random
|
||||
import hashlib
|
||||
from datetime import datetime, timedelta
|
||||
from operator import mul, itemgetter
|
||||
from StringIO import StringIO
|
||||
from itertools import cycle, groupby
|
||||
from itertools import cycle, groupby, chain
|
||||
from functools import partial
|
||||
from distutils.spawn import find_executable
|
||||
|
||||
import yaml
|
||||
from dateutil import tz
|
||||
|
||||
from wa.framework.version import get_wa_version
|
||||
|
||||
|
||||
# ABI --> architectures list
|
||||
ABI_MAP = {
|
||||
'armeabi': ['armeabi', 'armv7', 'armv7l', 'armv7el', 'armv7lh'],
|
||||
'arm64': ['arm64', 'armv8', 'arm64-v8a'],
|
||||
}
|
||||
|
||||
|
||||
def preexec_function():
|
||||
# Ignore the SIGINT signal by setting the handler to the standard
|
||||
# signal handler SIG_IGN.
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
# Change process group in case we have to kill the subprocess and all of
|
||||
# its children later.
|
||||
# TODO: this is Unix-specific; would be good to find an OS-agnostic way
|
||||
# to do this in case we wanna port WA to Windows.
|
||||
os.setpgrp()
|
||||
|
||||
from devlib.utils.misc import (ABI_MAP, check_output, walk_modules,
|
||||
ensure_directory_exists, ensure_file_directory_exists,
|
||||
normalize, convert_new_lines, get_cpu_mask, unique,
|
||||
escape_quotes, escape_single_quotes, escape_double_quotes,
|
||||
isiterable, getch, as_relative, ranges_to_list,
|
||||
list_to_ranges, list_to_mask, mask_to_list, which)
|
||||
|
||||
check_output_logger = logging.getLogger('check_output')
|
||||
|
||||
|
||||
# Defined here rather than in wlauto.exceptions due to module load dependencies
|
||||
class TimeoutError(Exception):
|
||||
"""Raised when a subprocess command times out. This is basically a ``WAError``-derived version
|
||||
of ``subprocess.CalledProcessError``, the thinking being that while a timeout could be due to
|
||||
programming error (e.g. not setting long enough timers), it is often due to some failure in the
|
||||
environment, and there fore should be classed as a "user error"."""
|
||||
|
||||
def __init__(self, command, output):
|
||||
super(TimeoutError, self).__init__('Timed out: {}'.format(command))
|
||||
self.command = command
|
||||
self.output = output
|
||||
|
||||
def __str__(self):
|
||||
return '\n'.join([self.message, 'OUTPUT:', self.output or ''])
|
||||
|
||||
|
||||
def check_output(command, timeout=None, ignore=None, **kwargs):
|
||||
"""This is a version of subprocess.check_output that adds a timeout parameter to kill
|
||||
the subprocess if it does not return within the specified time."""
|
||||
# pylint: disable=too-many-branches
|
||||
if ignore is None:
|
||||
ignore = []
|
||||
elif isinstance(ignore, int):
|
||||
ignore = [ignore]
|
||||
elif not isinstance(ignore, list) and ignore != 'all':
|
||||
message = 'Invalid value for ignore parameter: "{}"; must be an int or a list'
|
||||
raise ValueError(message.format(ignore))
|
||||
if 'stdout' in kwargs:
|
||||
raise ValueError('stdout argument not allowed, it will be overridden.')
|
||||
|
||||
def callback(pid):
|
||||
try:
|
||||
check_output_logger.debug('{} timed out; sending SIGKILL'.format(pid))
|
||||
os.killpg(pid, signal.SIGKILL)
|
||||
except OSError:
|
||||
pass # process may have already terminated.
|
||||
|
||||
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
preexec_fn=preexec_function, **kwargs)
|
||||
|
||||
if timeout:
|
||||
timer = threading.Timer(timeout, callback, [process.pid, ])
|
||||
timer.start()
|
||||
|
||||
try:
|
||||
output, error = process.communicate()
|
||||
finally:
|
||||
if timeout:
|
||||
timer.cancel()
|
||||
|
||||
retcode = process.poll()
|
||||
if retcode:
|
||||
if retcode == -9: # killed, assume due to timeout callback
|
||||
raise TimeoutError(command, output='\n'.join([output, error]))
|
||||
elif ignore != 'all' and retcode not in ignore:
|
||||
raise subprocess.CalledProcessError(retcode, command, output='\n'.join([output, error]))
|
||||
return output, error
|
||||
|
||||
|
||||
def init_argument_parser(parser):
|
||||
parser.add_argument('-c', '--config', help='specify an additional config.py')
|
||||
parser.add_argument('-v', '--verbose', action='count',
|
||||
help='The scripts will produce verbose output.')
|
||||
parser.add_argument('--debug', action='store_true',
|
||||
help='Enable debug mode. Note: this implies --verbose.')
|
||||
parser.add_argument('--version', action='version', version='%(prog)s {}'.format(get_wa_version()))
|
||||
return parser
|
||||
|
||||
|
||||
def walk_modules(path):
|
||||
"""
|
||||
Given a path to a Python package, iterate over all the modules and
|
||||
sub-packages in that package.
|
||||
|
||||
"""
|
||||
try:
|
||||
root_mod = __import__(path, {}, {}, [''])
|
||||
yield root_mod
|
||||
except ImportError as e:
|
||||
e.path = path
|
||||
raise e
|
||||
if not hasattr(root_mod, '__path__'): # module, not package
|
||||
return
|
||||
for _, name, ispkg in pkgutil.iter_modules(root_mod.__path__):
|
||||
try:
|
||||
submod_path = '.'.join([path, name])
|
||||
if ispkg:
|
||||
for submod in walk_modules(submod_path):
|
||||
yield submod
|
||||
else:
|
||||
yield __import__(submod_path, {}, {}, [''])
|
||||
except ImportError as e:
|
||||
e.path = submod_path
|
||||
raise e
|
||||
|
||||
|
||||
def ensure_directory_exists(dirpath):
|
||||
"""A filter for directory paths to ensure they exist."""
|
||||
if not os.path.isdir(dirpath):
|
||||
os.makedirs(dirpath)
|
||||
return dirpath
|
||||
|
||||
|
||||
def ensure_file_directory_exists(filepath):
|
||||
"""
|
||||
A filter for file paths to ensure the directory of the
|
||||
file exists and the file can be created there. The file
|
||||
itself is *not* going to be created if it doesn't already
|
||||
exist.
|
||||
|
||||
"""
|
||||
ensure_directory_exists(os.path.dirname(filepath))
|
||||
return filepath
|
||||
|
||||
|
||||
def diff_tokens(before_token, after_token):
|
||||
"""
|
||||
Creates a diff of two tokens.
|
||||
@@ -269,22 +142,18 @@ def get_traceback(exc=None):
|
||||
return sio.getvalue()
|
||||
|
||||
|
||||
def normalize(value, dict_type=dict):
|
||||
"""Normalize values. Recursively normalizes dict keys to be lower case,
|
||||
no surrounding whitespace, underscore-delimited strings."""
|
||||
if isinstance(value, dict):
|
||||
normalized = dict_type()
|
||||
for k, v in value.iteritems():
|
||||
if isinstance(k, basestring):
|
||||
k = k.strip().lower().replace(' ', '_')
|
||||
normalized[k] = normalize(v, dict_type)
|
||||
return normalized
|
||||
elif isinstance(value, list):
|
||||
return [normalize(v, dict_type) for v in value]
|
||||
elif isinstance(value, tuple):
|
||||
return tuple([normalize(v, dict_type) for v in value])
|
||||
else:
|
||||
return value
|
||||
def _check_remove_item(the_list, item):
|
||||
"""Helper function for merge_lists that implements checking wether an items
|
||||
should be removed from the list and doing so if needed. Returns ``True`` if
|
||||
the item has been removed and ``False`` otherwise."""
|
||||
if not isinstance(item, basestring):
|
||||
return False
|
||||
if not item.startswith('~'):
|
||||
return False
|
||||
actual_item = item[1:]
|
||||
if actual_item in the_list:
|
||||
del the_list[the_list.index(actual_item)]
|
||||
return True
|
||||
|
||||
|
||||
VALUE_REGEX = re.compile(r'(\d+(?:\.\d+)?)\s*(\w*)')
|
||||
@@ -338,50 +207,6 @@ def capitalize(text):
|
||||
return text[0].upper() + text[1:].lower()
|
||||
|
||||
|
||||
def convert_new_lines(text):
|
||||
""" Convert new lines to a common format. """
|
||||
return text.replace('\r\n', '\n').replace('\r', '\n')
|
||||
|
||||
|
||||
def escape_quotes(text):
|
||||
"""Escape quotes, and escaped quotes, in the specified text."""
|
||||
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\\\'').replace('\"', '\\\"')
|
||||
|
||||
|
||||
def escape_single_quotes(text):
|
||||
"""Escape single quotes, and escaped single quotes, in the specified text."""
|
||||
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\'\\\'\'')
|
||||
|
||||
|
||||
def escape_double_quotes(text):
|
||||
"""Escape double quotes, and escaped double quotes, in the specified text."""
|
||||
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\"', '\\\"')
|
||||
|
||||
|
||||
def getch(count=1):
|
||||
"""Read ``count`` characters from standard input."""
|
||||
if os.name == 'nt':
|
||||
import msvcrt # pylint: disable=F0401
|
||||
return ''.join([msvcrt.getch() for _ in xrange(count)])
|
||||
else: # assume Unix
|
||||
import tty # NOQA
|
||||
import termios # NOQA
|
||||
fd = sys.stdin.fileno()
|
||||
old_settings = termios.tcgetattr(fd)
|
||||
try:
|
||||
tty.setraw(sys.stdin.fileno())
|
||||
ch = sys.stdin.read(count)
|
||||
finally:
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
||||
return ch
|
||||
|
||||
|
||||
def isiterable(obj):
|
||||
"""Returns ``True`` if the specified object is iterable and
|
||||
*is not a string type*, ``False`` otherwise."""
|
||||
return hasattr(obj, '__iter__') and not isinstance(obj, basestring)
|
||||
|
||||
|
||||
def utc_to_local(dt):
|
||||
"""Convert naive datetime to local time zone, assuming UTC."""
|
||||
return dt.replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())
|
||||
@@ -392,21 +217,6 @@ def local_to_utc(dt):
|
||||
return dt.replace(tzinfo=tz.tzlocal()).astimezone(tz.tzutc())
|
||||
|
||||
|
||||
def as_relative(path):
|
||||
"""Convert path to relative by stripping away the leading '/' on UNIX or
|
||||
the equivant on other platforms."""
|
||||
path = os.path.splitdrive(path)[1]
|
||||
return path.lstrip(os.sep)
|
||||
|
||||
|
||||
def get_cpu_mask(cores):
|
||||
"""Return a string with the hex for the cpu mask for the specified core numbers."""
|
||||
mask = 0
|
||||
for i in cores:
|
||||
mask |= 1 << i
|
||||
return '0x{0:x}'.format(mask)
|
||||
|
||||
|
||||
def load_class(classpath):
|
||||
"""Loads the specified Python class. ``classpath`` must be a fully-qualified
|
||||
class name (i.e. namspaced under module/package)."""
|
||||
@@ -468,29 +278,7 @@ def enum_metaclass(enum_param, return_name=False, start=0):
|
||||
return __EnumMeta
|
||||
|
||||
|
||||
def which(name):
|
||||
"""Platform-independent version of UNIX which utility."""
|
||||
if os.name == 'nt':
|
||||
paths = os.getenv('PATH').split(os.pathsep)
|
||||
exts = os.getenv('PATHEXT').split(os.pathsep)
|
||||
for path in paths:
|
||||
testpath = os.path.join(path, name)
|
||||
if os.path.isfile(testpath):
|
||||
return testpath
|
||||
for ext in exts:
|
||||
testpathext = testpath + ext
|
||||
if os.path.isfile(testpathext):
|
||||
return testpathext
|
||||
return None
|
||||
else: # assume UNIX-like
|
||||
try:
|
||||
result = check_output(['which', name])[0]
|
||||
return result.strip() # pylint: disable=E1103
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
|
||||
_bash_color_regex = re.compile('\x1b\\[[0-9;]+m')
|
||||
_bash_color_regex = re.compile('\x1b\[[0-9;]+m')
|
||||
|
||||
|
||||
def strip_bash_colors(text):
|
||||
@@ -536,6 +324,18 @@ def get_random_string(length):
|
||||
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in xrange(length))
|
||||
|
||||
|
||||
class LoadSyntaxError(Exception):
|
||||
|
||||
def __init__(self, message, filepath, lineno):
|
||||
super(LoadSyntaxError, self).__init__(message)
|
||||
self.filepath = filepath
|
||||
self.lineno = lineno
|
||||
|
||||
def __str__(self):
|
||||
message = 'Syntax Error in {}, line {}:\n\t{}'
|
||||
return message.format(self.filepath, self.lineno, self.message)
|
||||
|
||||
|
||||
RAND_MOD_NAME_LEN = 30
|
||||
BAD_CHARS = string.punctuation + string.whitespace
|
||||
TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
|
||||
@@ -544,23 +344,63 @@ TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
|
||||
def to_identifier(text):
|
||||
"""Converts text to a valid Python identifier by replacing all
|
||||
whitespace and punctuation."""
|
||||
result = re.sub('_+', '_', text.translate(TRANS_TABLE))
|
||||
if result and result[0] in string.digits:
|
||||
result = '_' + result
|
||||
return result
|
||||
return re.sub('_+', '_', text.translate(TRANS_TABLE))
|
||||
|
||||
|
||||
def unique(alist):
|
||||
def load_struct_from_python(filepath=None, text=None):
|
||||
"""Parses a config structure from a .py file. The structure should be composed
|
||||
of basic Python types (strings, ints, lists, dicts, etc.)."""
|
||||
if not (filepath or text) or (filepath and text):
|
||||
raise ValueError('Exactly one of filepath or text must be specified.')
|
||||
try:
|
||||
if filepath:
|
||||
modname = to_identifier(filepath)
|
||||
mod = imp.load_source(modname, filepath)
|
||||
else:
|
||||
modname = get_random_string(RAND_MOD_NAME_LEN)
|
||||
while modname in sys.modules: # highly unlikely, but...
|
||||
modname = get_random_string(RAND_MOD_NAME_LEN)
|
||||
mod = imp.new_module(modname)
|
||||
exec text in mod.__dict__ # pylint: disable=exec-used
|
||||
return dict((k, v)
|
||||
for k, v in mod.__dict__.iteritems()
|
||||
if not k.startswith('_'))
|
||||
except SyntaxError as e:
|
||||
raise LoadSyntaxError(e.message, filepath, e.lineno)
|
||||
|
||||
|
||||
def load_struct_from_yaml(filepath=None, text=None):
|
||||
"""Parses a config structure from a .yaml file. The structure should be composed
|
||||
of basic Python types (strings, ints, lists, dicts, etc.)."""
|
||||
if not (filepath or text) or (filepath and text):
|
||||
raise ValueError('Exactly one of filepath or text must be specified.')
|
||||
try:
|
||||
if filepath:
|
||||
with open(filepath) as fh:
|
||||
return yaml.load(fh)
|
||||
else:
|
||||
return yaml.load(text)
|
||||
except yaml.YAMLError as e:
|
||||
lineno = None
|
||||
if hasattr(e, 'problem_mark'):
|
||||
lineno = e.problem_mark.line # pylint: disable=no-member
|
||||
raise LoadSyntaxError(e.message, filepath=filepath, lineno=lineno)
|
||||
|
||||
|
||||
def load_struct_from_file(filepath):
|
||||
"""
|
||||
Returns a list containing only unique elements from the input list (but preserves
|
||||
order, unlike sets).
|
||||
Attempts to parse a Python structure consisting of basic types from the specified file.
|
||||
Raises a ``ValueError`` if the specified file is of unkown format; ``LoadSyntaxError`` if
|
||||
there is an issue parsing the file.
|
||||
|
||||
"""
|
||||
result = []
|
||||
for item in alist:
|
||||
if item not in result:
|
||||
result.append(item)
|
||||
return result
|
||||
extn = os.path.splitext(filepath)[1].lower()
|
||||
if (extn == '.py') or (extn == '.pyc') or (extn == '.pyo'):
|
||||
return load_struct_from_python(filepath)
|
||||
elif extn == '.yaml':
|
||||
return load_struct_from_yaml(filepath)
|
||||
else:
|
||||
raise ValueError('Unknown format "{}": {}'.format(extn, filepath))
|
||||
|
||||
|
||||
def open_file(filepath):
|
||||
@@ -576,68 +416,170 @@ def open_file(filepath):
|
||||
return subprocess.call(['xdg-open', filepath])
|
||||
|
||||
|
||||
def ranges_to_list(ranges_string):
|
||||
"""Converts a sysfs-style ranges string, e.g. ``"0,2-4"``, into a list ,e.g ``[0,2,3,4]``"""
|
||||
values = []
|
||||
for rg in ranges_string.split(','):
|
||||
if '-' in rg:
|
||||
first, last = map(int, rg.split('-'))
|
||||
values.extend(xrange(first, last + 1))
|
||||
else:
|
||||
values.append(int(rg))
|
||||
return values
|
||||
def sha256(path, chunk=2048):
|
||||
"""Calculates SHA256 hexdigest of the file at the specified path."""
|
||||
h = hashlib.sha256()
|
||||
with open(path, 'rb') as fh:
|
||||
buf = fh.read(chunk)
|
||||
while buf:
|
||||
h.update(buf)
|
||||
buf = fh.read(chunk)
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
def list_to_ranges(values):
|
||||
"""Converts a list, e.g ``[0,2,3,4]``, into a sysfs-style ranges string, e.g. ``"0,2-4"``"""
|
||||
range_groups = []
|
||||
for _, g in groupby(enumerate(values), lambda (i, x): i - x):
|
||||
range_groups.append(map(itemgetter(1), g))
|
||||
range_strings = []
|
||||
for group in range_groups:
|
||||
if len(group) == 1:
|
||||
range_strings.append(str(group[0]))
|
||||
else:
|
||||
range_strings.append('{}-{}'.format(group[0], group[-1]))
|
||||
return ','.join(range_strings)
|
||||
def urljoin(*parts):
|
||||
return '/'.join(p.rstrip('/') for p in parts)
|
||||
|
||||
|
||||
def list_to_mask(values, base=0x0):
|
||||
"""Converts the specified list of integer values into
|
||||
a bit mask for those values. Optinally, the list can be
|
||||
applied to an existing mask."""
|
||||
for v in values:
|
||||
base |= (1 << v)
|
||||
return base
|
||||
|
||||
|
||||
def mask_to_list(mask):
|
||||
"""Converts the specfied integer bitmask into a list of
|
||||
indexes of bits that are set in the mask."""
|
||||
size = len(bin(mask)) - 2 # because of "0b"
|
||||
return [size - i - 1 for i in xrange(size)
|
||||
if mask & (1 << size - i - 1)]
|
||||
|
||||
|
||||
class Namespace(dict):
|
||||
# From: http://eli.thegreenplace.net/2011/10/19/perls-guess-if-file-is-text-or-binary-implemented-in-python/
|
||||
def istextfile(fileobj, blocksize=512):
|
||||
""" Uses heuristics to guess whether the given file is text or binary,
|
||||
by reading a single block of bytes from the file.
|
||||
If more than 30% of the chars in the block are non-text, or there
|
||||
are NUL ('\x00') bytes in the block, assume this is a binary file.
|
||||
"""
|
||||
A dict-like object that allows treating keys and attributes
|
||||
interchangeably (this means that keys are restricted to strings
|
||||
that are valid Python identifiers).
|
||||
_text_characters = (b''.join(chr(i) for i in range(32, 127)) +
|
||||
b'\n\r\t\f\b')
|
||||
|
||||
block = fileobj.read(blocksize)
|
||||
if b'\x00' in block:
|
||||
# Files with null bytes are binary
|
||||
return False
|
||||
elif not block:
|
||||
# An empty file is considered a valid text file
|
||||
return True
|
||||
|
||||
# Use translate's 'deletechars' argument to efficiently remove all
|
||||
# occurrences of _text_characters from the block
|
||||
nontext = block.translate(None, _text_characters)
|
||||
return float(len(nontext)) / len(block) <= 0.30
|
||||
|
||||
|
||||
def categorize(v):
|
||||
if hasattr(v, 'merge_with') and hasattr(v, 'merge_into'):
|
||||
return 'o'
|
||||
elif hasattr(v, 'iteritems'):
|
||||
return 'm'
|
||||
elif isiterable(v):
|
||||
return 's'
|
||||
elif v is None:
|
||||
return 'n'
|
||||
else:
|
||||
return 'c'
|
||||
|
||||
|
||||
def merge_config_values(base, other):
|
||||
"""
|
||||
This is used to merge two objects, typically when setting the value of a
|
||||
``ConfigurationPoint``. First, both objects are categorized into
|
||||
|
||||
c: A scalar value. Basically, most objects. These values
|
||||
are treated as atomic, and not mergeable.
|
||||
s: A sequence. Anything iterable that is not a dict or
|
||||
a string (strings are considered scalars).
|
||||
m: A key-value mapping. ``dict`` and its derivatives.
|
||||
n: ``None``.
|
||||
o: A mergeable object; this is an object that implements both
|
||||
``merge_with`` and ``merge_into`` methods.
|
||||
|
||||
The merge rules based on the two categories are then as follows:
|
||||
|
||||
(c1, c2) --> c2
|
||||
(s1, s2) --> s1 . s2
|
||||
(m1, m2) --> m1 . m2
|
||||
(c, s) --> [c] . s
|
||||
(s, c) --> s . [c]
|
||||
(s, m) --> s . [m]
|
||||
(m, s) --> [m] . s
|
||||
(m, c) --> ERROR
|
||||
(c, m) --> ERROR
|
||||
(o, X) --> o.merge_with(X)
|
||||
(X, o) --> o.merge_into(X)
|
||||
(X, n) --> X
|
||||
(n, X) --> X
|
||||
|
||||
where:
|
||||
|
||||
'.' means concatenation (for maps, contcationation of (k, v) streams
|
||||
then converted back into a map). If the types of the two objects
|
||||
differ, the type of ``other`` is used for the result.
|
||||
'X' means "any category"
|
||||
'[]' used to indicate a literal sequence (not necessarily a ``list``).
|
||||
when this is concatenated with an actual sequence, that sequencies
|
||||
type is used.
|
||||
|
||||
notes:
|
||||
|
||||
- When a mapping is combined with a sequence, that mapping is
|
||||
treated as a scalar value.
|
||||
- When combining two mergeable objects, they're combined using
|
||||
``o1.merge_with(o2)`` (_not_ using o2.merge_into(o1)).
|
||||
- Combining anything with ``None`` yields that value, irrespective
|
||||
of the order. So a ``None`` value is eqivalent to the corresponding
|
||||
item being omitted.
|
||||
- When both values are scalars, merging is equivalent to overwriting.
|
||||
- There is no recursion (e.g. if map values are lists, they will not
|
||||
be merged; ``other`` will overwrite ``base`` values). If complicated
|
||||
merging semantics (such as recursion) are required, they should be
|
||||
implemented within custom mergeable types (i.e. those that implement
|
||||
``merge_with`` and ``merge_into``).
|
||||
|
||||
While this can be used as a generic "combine any two arbitry objects"
|
||||
function, the semantics have been selected specifically for merging
|
||||
configuration point values.
|
||||
|
||||
"""
|
||||
cat_base = categorize(base)
|
||||
cat_other = categorize(other)
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return self[name]
|
||||
except KeyError:
|
||||
raise AttributeError(name)
|
||||
if cat_base == 'n':
|
||||
return other
|
||||
elif cat_other == 'n':
|
||||
return base
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
self[name] = value
|
||||
if cat_base == 'o':
|
||||
return base.merge_with(other)
|
||||
elif cat_other == 'o':
|
||||
return other.merge_into(base)
|
||||
|
||||
def __setitem__(self, name, value):
|
||||
if to_identifier(name) != name:
|
||||
message = 'Key must be a valid identifier; got "{}"'
|
||||
raise ValueError(message.format(name))
|
||||
dict.__setitem__(self, name, value)
|
||||
if cat_base == 'm':
|
||||
if cat_other == 's':
|
||||
return merge_sequencies([base], other)
|
||||
elif cat_other == 'm':
|
||||
return merge_maps(base, other)
|
||||
else:
|
||||
message = 'merge error ({}, {}): "{}" and "{}"'
|
||||
raise ValueError(message.format(cat_base, cat_other, base, other))
|
||||
elif cat_base == 's':
|
||||
if cat_other == 's':
|
||||
return merge_sequencies(base, other)
|
||||
else:
|
||||
return merge_sequencies(base, [other])
|
||||
else: # cat_base == 'c'
|
||||
if cat_other == 's':
|
||||
return merge_sequencies([base], other)
|
||||
elif cat_other == 'm':
|
||||
message = 'merge error ({}, {}): "{}" and "{}"'
|
||||
raise ValueError(message.format(cat_base, cat_other, base, other))
|
||||
else:
|
||||
return other
|
||||
|
||||
|
||||
def merge_sequencies(s1, s2):
|
||||
return type(s2)(unique(chain(s1, s2)))
|
||||
|
||||
|
||||
def merge_maps(m1, m2):
|
||||
return type(m2)(chain(m1.iteritems(), m2.iteritems()))
|
||||
|
||||
|
||||
def merge_dicts_simple(base, other):
|
||||
result = base.copy()
|
||||
for key, value in (base or {}).iteritems():
|
||||
result[key] = merge_config_values(result.get(key), value)
|
||||
return result
|
||||
|
||||
|
||||
def touch(path):
|
||||
with open(path, 'w'):
|
||||
pass
|
||||
|
@@ -1,13 +1,13 @@
|
||||
"""
|
||||
This module contains wrappers for Python serialization modules for
|
||||
common formats that make it easier to serialize/deserialize WA
|
||||
Plain Old Data structures (serilizable WA classes implement
|
||||
``to_pod()``/``from_pod()`` methods for converting between POD
|
||||
Plain Old Data structures (serilizable WA classes implement
|
||||
``to_pod()``/``from_pod()`` methods for converting between POD
|
||||
structures and Python class instances).
|
||||
|
||||
The modifications to standard serilization procedures are:
|
||||
|
||||
- mappings are deserialized as ``OrderedDict``\ 's are than standard
|
||||
- mappings are deserialized as ``OrderedDict``\ 's rather than standard
|
||||
Python ``dict``\ 's. This allows for cleaner syntax in certain parts
|
||||
of WA configuration (e.g. values to be written to files can be specified
|
||||
as a dict, and they will be written in the order specified in the config).
|
||||
@@ -16,7 +16,7 @@ The modifications to standard serilization procedures are:
|
||||
in the POD config.
|
||||
|
||||
This module exports the "wrapped" versions of serialization libraries,
|
||||
and this should be imported and used instead of importing the libraries
|
||||
and this should be imported and used instead of importing the libraries
|
||||
directly. i.e. ::
|
||||
|
||||
from wa.utils.serializer import yaml
|
||||
@@ -27,7 +27,7 @@ instead of ::
|
||||
import yaml
|
||||
pod = yaml.load(fh)
|
||||
|
||||
It's also possible to suse the serializer directly::
|
||||
It's also possible to use the serializer directly::
|
||||
|
||||
from wa.utils import serializer
|
||||
pod = serializer.load(fh)
|
||||
@@ -35,13 +35,14 @@ It's also possible to suse the serializer directly::
|
||||
This can also be used to ``dump()`` POD structures. By default,
|
||||
``dump()`` will produce JSON, but ``fmt`` parameter may be used to
|
||||
specify an alternative format (``yaml`` or ``python``). ``load()`` will
|
||||
use the file extension to guess the format, but ``fmt`` may also be used
|
||||
use the file plugin to guess the format, but ``fmt`` may also be used
|
||||
to specify it explicitly.
|
||||
|
||||
"""
|
||||
# pylint: disable=unused-argument
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import json as _json
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime
|
||||
@@ -50,8 +51,8 @@ import yaml as _yaml
|
||||
import dateutil.parser
|
||||
|
||||
from wa.framework.exception import SerializerSyntaxError
|
||||
from wa.utils.types import regex_type
|
||||
from wa.utils.misc import isiterable
|
||||
from wa.utils.types import regex_type, none_type
|
||||
|
||||
|
||||
__all__ = [
|
||||
@@ -60,16 +61,29 @@ __all__ = [
|
||||
'read_pod',
|
||||
'dump',
|
||||
'load',
|
||||
'is_pod',
|
||||
'POD_TYPES',
|
||||
]
|
||||
|
||||
|
||||
POD_TYPES = [
|
||||
list,
|
||||
tuple,
|
||||
dict,
|
||||
set,
|
||||
str,
|
||||
unicode,
|
||||
int,
|
||||
float,
|
||||
bool,
|
||||
datetime,
|
||||
regex_type,
|
||||
none_type,
|
||||
]
|
||||
|
||||
class WAJSONEncoder(_json.JSONEncoder):
|
||||
|
||||
def default(self, obj):
|
||||
if hasattr(obj, 'to_pod'):
|
||||
return obj.to_pod()
|
||||
elif isinstance(obj, regex_type):
|
||||
def default(self, obj): # pylint: disable=method-hidden
|
||||
if isinstance(obj, regex_type):
|
||||
return 'REGEX:{}:{}'.format(obj.flags, obj.pattern)
|
||||
elif isinstance(obj, datetime):
|
||||
return 'DATET:{}'.format(obj.isoformat())
|
||||
@@ -79,8 +93,8 @@ class WAJSONEncoder(_json.JSONEncoder):
|
||||
|
||||
class WAJSONDecoder(_json.JSONDecoder):
|
||||
|
||||
def decode(self, s):
|
||||
d = _json.JSONDecoder.decode(self, s)
|
||||
def decode(self, s, **kwargs):
|
||||
d = _json.JSONDecoder.decode(self, s, **kwargs)
|
||||
|
||||
def try_parse_object(v):
|
||||
if isinstance(v, basestring) and v.startswith('REGEX:'):
|
||||
@@ -112,7 +126,6 @@ class json(object):
|
||||
def dump(o, wfh, indent=4, *args, **kwargs):
|
||||
return _json.dump(o, wfh, cls=WAJSONEncoder, indent=indent, *args, **kwargs)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def load(fh, *args, **kwargs):
|
||||
try:
|
||||
@@ -176,7 +189,7 @@ class yaml(object):
|
||||
except _yaml.YAMLError as e:
|
||||
lineno = None
|
||||
if hasattr(e, 'problem_mark'):
|
||||
lineno = e.problem_mark.line
|
||||
lineno = e.problem_mark.line # pylint: disable=no-member
|
||||
raise SerializerSyntaxError(e.message, lineno)
|
||||
|
||||
loads = load
|
||||
@@ -196,7 +209,7 @@ class python(object):
|
||||
def loads(s, *args, **kwargs):
|
||||
pod = {}
|
||||
try:
|
||||
exec s in pod
|
||||
exec s in pod # pylint: disable=exec-used
|
||||
except SyntaxError as e:
|
||||
raise SerializerSyntaxError(e.message, e.lineno)
|
||||
for k in pod.keys():
|
||||
@@ -209,20 +222,29 @@ def read_pod(source, fmt=None):
|
||||
if isinstance(source, basestring):
|
||||
with open(source) as fh:
|
||||
return _read_pod(fh, fmt)
|
||||
elif hasattr(source, 'read') and (hasattr(sourc, 'name') or fmt):
|
||||
elif hasattr(source, 'read') and (hasattr(source, 'name') or fmt):
|
||||
return _read_pod(source, fmt)
|
||||
else:
|
||||
message = 'source must be a path or an open file handle; got {}'
|
||||
raise ValueError(message.format(type(source)))
|
||||
|
||||
def write_pod(pod, dest, fmt=None):
|
||||
if isinstance(dest, basestring):
|
||||
with open(dest, 'w') as wfh:
|
||||
return _write_pod(pod, wfh, fmt)
|
||||
elif hasattr(dest, 'write') and (hasattr(dest, 'name') or fmt):
|
||||
return _write_pod(pod, dest, fmt)
|
||||
else:
|
||||
message = 'dest must be a path or an open file handle; got {}'
|
||||
raise ValueError(message.format(type(dest)))
|
||||
|
||||
|
||||
def dump(o, wfh, fmt='json', *args, **kwargs):
|
||||
serializer = {
|
||||
'yaml': yaml,
|
||||
'json': json,
|
||||
'python': python,
|
||||
'py': python,
|
||||
}.get(fmt)
|
||||
serializer = {'yaml': yaml,
|
||||
'json': json,
|
||||
'python': python,
|
||||
'py': python,
|
||||
}.get(fmt)
|
||||
if serializer is None:
|
||||
raise ValueError('Unknown serialization format: "{}"'.format(fmt))
|
||||
serializer.dump(o, wfh, *args, **kwargs)
|
||||
@@ -242,4 +264,20 @@ def _read_pod(fh, fmt=None):
|
||||
elif fmt == 'py':
|
||||
return python.load(fh)
|
||||
else:
|
||||
raise ValueError('Unknown format "{}": {}'.format(fmt, path))
|
||||
raise ValueError('Unknown format "{}": {}'.format(fmt, getattr(fh, 'name', '<none>')))
|
||||
|
||||
def _write_pod(pod, wfh, fmt=None):
|
||||
if fmt is None:
|
||||
fmt = os.path.splitext(wfh.name)[1].lower().strip('.')
|
||||
if fmt == 'yaml':
|
||||
return yaml.dump(pod, wfh)
|
||||
elif fmt == 'json':
|
||||
return json.dump(pod, wfh)
|
||||
elif fmt == 'py':
|
||||
raise ValueError('Serializing to Python is not supported')
|
||||
else:
|
||||
raise ValueError('Unknown format "{}": {}'.format(fmt, getattr(wfh, 'name', '<none>')))
|
||||
|
||||
def is_pod(obj):
|
||||
return type(obj) in POD_TYPES
|
||||
|
||||
|
93
wa/utils/terminalsize.py
Normal file
93
wa/utils/terminalsize.py
Normal file
@@ -0,0 +1,93 @@
|
||||
# Adapted from
|
||||
# https://gist.github.com/jtriley/1108174
|
||||
# pylint: disable=bare-except,unpacking-non-sequence
|
||||
import os
|
||||
import shlex
|
||||
import struct
|
||||
import platform
|
||||
import subprocess
|
||||
|
||||
|
||||
def get_terminal_size():
|
||||
""" getTerminalSize()
|
||||
- get width and height of console
|
||||
- works on linux,os x,windows,cygwin(windows)
|
||||
originally retrieved from:
|
||||
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
|
||||
"""
|
||||
current_os = platform.system()
|
||||
tuple_xy = None
|
||||
if current_os == 'Windows':
|
||||
tuple_xy = _get_terminal_size_windows()
|
||||
if tuple_xy is None:
|
||||
# needed for window's python in cygwin's xterm
|
||||
tuple_xy = _get_terminal_size_tput()
|
||||
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
|
||||
tuple_xy = _get_terminal_size_linux()
|
||||
if tuple_xy is None or tuple_xy == (0, 0):
|
||||
tuple_xy = (80, 25) # assume "standard" terminal
|
||||
return tuple_xy
|
||||
|
||||
|
||||
def _get_terminal_size_windows():
|
||||
# pylint: disable=unused-variable,redefined-outer-name,too-many-locals
|
||||
try:
|
||||
from ctypes import windll, create_string_buffer
|
||||
# stdin handle is -10
|
||||
# stdout handle is -11
|
||||
# stderr handle is -12
|
||||
h = windll.kernel32.GetStdHandle(-12)
|
||||
csbi = create_string_buffer(22)
|
||||
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
|
||||
if res:
|
||||
(bufx, bufy, curx, cury, wattr,
|
||||
left, top, right, bottom,
|
||||
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
|
||||
sizex = right - left + 1
|
||||
sizey = bottom - top + 1
|
||||
return sizex, sizey
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def _get_terminal_size_tput():
|
||||
# get terminal width
|
||||
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
|
||||
try:
|
||||
cols = int(subprocess.check_call(shlex.split('tput cols')))
|
||||
rows = int(subprocess.check_call(shlex.split('tput lines')))
|
||||
return (cols, rows)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def _get_terminal_size_linux():
|
||||
def ioctl_GWINSZ(fd):
|
||||
try:
|
||||
import fcntl
|
||||
import termios
|
||||
cr = struct.unpack('hh',
|
||||
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
|
||||
return cr
|
||||
except:
|
||||
pass
|
||||
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
|
||||
if not cr:
|
||||
try:
|
||||
fd = os.open(os.ctermid(), os.O_RDONLY)
|
||||
cr = ioctl_GWINSZ(fd)
|
||||
os.close(fd)
|
||||
except:
|
||||
pass
|
||||
if not cr:
|
||||
try:
|
||||
cr = (os.environ['LINES'], os.environ['COLUMNS'])
|
||||
except:
|
||||
return None
|
||||
return int(cr[1]), int(cr[0])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sizex, sizey = get_terminal_size()
|
||||
print 'width =', sizex, 'height =', sizey
|
||||
|
@@ -15,77 +15,29 @@
|
||||
|
||||
|
||||
"""
|
||||
Routines for doing various type conversions. These usually embody some higher-level
|
||||
semantics than are present in standard Python types (e.g. ``boolean`` will convert the
|
||||
string ``"false"`` to ``False``, where as non-empty strings are usually considered to be
|
||||
``True``).
|
||||
Routines for doing various type conversions. These usually embody some
|
||||
higher-level semantics than are present in standard Python types (e.g.
|
||||
``boolean`` will convert the string ``"false"`` to ``False``, where as
|
||||
non-empty strings are usually considered to be ``True``).
|
||||
|
||||
A lot of these are intened to stpecify type conversions declaratively in place like
|
||||
``Parameter``'s ``kind`` argument. These are basically "hacks" around the fact that Python
|
||||
is not the best language to use for configuration.
|
||||
A lot of these are intened to stpecify type conversions declaratively in place
|
||||
like ``Parameter``'s ``kind`` argument. These are basically "hacks" around the
|
||||
fact that Python is not the best language to use for configuration.
|
||||
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import math
|
||||
import shlex
|
||||
import numbers
|
||||
from bisect import insort
|
||||
from collections import defaultdict
|
||||
from collections import defaultdict, MutableMapping
|
||||
from copy import copy
|
||||
|
||||
from devlib.utils.types import identifier, boolean, integer, numeric, caseless_string
|
||||
|
||||
from wa.utils.misc import isiterable, to_identifier
|
||||
|
||||
|
||||
def identifier(text):
|
||||
"""Converts text to a valid Python identifier by replacing all
|
||||
whitespace and punctuation."""
|
||||
return to_identifier(text)
|
||||
|
||||
|
||||
def boolean(value):
|
||||
"""
|
||||
Returns bool represented by the value. This is different from
|
||||
calling the builtin bool() in that it will interpret string representations.
|
||||
e.g. boolean('0') and boolean('false') will both yield False.
|
||||
|
||||
"""
|
||||
false_strings = ['', '0', 'n', 'no']
|
||||
if isinstance(value, basestring):
|
||||
value = value.lower()
|
||||
if value in false_strings or 'false'.startswith(value):
|
||||
return False
|
||||
return bool(value)
|
||||
|
||||
|
||||
def integer(value):
|
||||
"""Handles conversions for string respresentations of binary, octal and hex."""
|
||||
if isinstance(value, basestring):
|
||||
return int(value, 0)
|
||||
else:
|
||||
return int(value)
|
||||
|
||||
|
||||
def numeric(value):
|
||||
"""
|
||||
Returns the value as number (int if possible, or float otherwise), or
|
||||
raises ``ValueError`` if the specified ``value`` does not have a straight
|
||||
forward numeric conversion.
|
||||
|
||||
"""
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
try:
|
||||
fvalue = float(value)
|
||||
except ValueError:
|
||||
raise ValueError('Not numeric: {}'.format(value))
|
||||
if not math.isnan(fvalue) and not math.isinf(fvalue):
|
||||
ivalue = int(fvalue)
|
||||
# yeah, yeah, I know. Whatever. This is best-effort.
|
||||
if ivalue == fvalue:
|
||||
return ivalue
|
||||
return fvalue
|
||||
|
||||
|
||||
def list_of_strs(value):
|
||||
"""
|
||||
Value must be iterable. All elements will be converted to strings.
|
||||
@@ -142,7 +94,6 @@ def list_of(type_):
|
||||
"""Generates a "list of" callable for the specified type. The callable
|
||||
attempts to convert all elements in the passed value to the specifed
|
||||
``type_``, raising ``ValueError`` on error."""
|
||||
|
||||
def __init__(self, values):
|
||||
list.__init__(self, map(type_, values))
|
||||
|
||||
@@ -204,7 +155,6 @@ def list_or(type_):
|
||||
list_type = list_of(type_)
|
||||
|
||||
class list_or_type(list_type):
|
||||
|
||||
def __init__(self, value):
|
||||
# pylint: disable=non-parent-init-called,super-init-not-called
|
||||
if isiterable(value):
|
||||
@@ -220,6 +170,7 @@ list_or_bool = list_or(boolean)
|
||||
|
||||
|
||||
regex_type = type(re.compile(''))
|
||||
none_type = type(None)
|
||||
|
||||
|
||||
def regex(value):
|
||||
@@ -234,28 +185,25 @@ def regex(value):
|
||||
return re.compile(value)
|
||||
|
||||
|
||||
class caseless_string(str):
|
||||
__counters = defaultdict(int)
|
||||
|
||||
|
||||
def reset_counter(name=None):
|
||||
__counters[name] = 0
|
||||
|
||||
|
||||
def counter(name=None):
|
||||
"""
|
||||
Just like built-in Python string except case-insensitive on comparisons. However, the
|
||||
case is preserved otherwise.
|
||||
An auto incremeting value (kind of like an AUTO INCREMENT field in SQL).
|
||||
Optionally, the name of the counter to be used is specified (each counter
|
||||
increments separately).
|
||||
|
||||
Counts start at 1, not 0.
|
||||
|
||||
"""
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, basestring):
|
||||
other = other.lower()
|
||||
return self.lower() == other
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __cmp__(self, other):
|
||||
if isinstance(basestring, other):
|
||||
other = other.lower()
|
||||
return cmp(self.lower(), other)
|
||||
|
||||
def format(self, *args, **kwargs):
|
||||
return caseless_string(super(caseless_string, self).format(*args, **kwargs))
|
||||
__counters[name] += 1
|
||||
value = __counters[name]
|
||||
return value
|
||||
|
||||
|
||||
class arguments(list):
|
||||
@@ -375,7 +323,8 @@ class prioritylist(object):
|
||||
raise ValueError('Invalid index {}'.format(index))
|
||||
current_global_offset = 0
|
||||
priority_counts = {priority: count for (priority, count) in
|
||||
zip(self.priorities, [len(self.elements[p]) for p in self.priorities])}
|
||||
zip(self.priorities, [len(self.elements[p])
|
||||
for p in self.priorities])}
|
||||
for priority in self.priorities:
|
||||
if not index_range:
|
||||
break
|
||||
@@ -395,103 +344,134 @@ class prioritylist(object):
|
||||
return self.size
|
||||
|
||||
|
||||
class TreeNode(object):
|
||||
class toggle_set(set):
|
||||
"""
|
||||
A list that contains items to enable or disable something.
|
||||
|
||||
@property
|
||||
def is_root(self):
|
||||
return self.parent is None
|
||||
|
||||
@property
|
||||
def is_leaf(self):
|
||||
return not self.children
|
||||
A prefix of ``~`` is used to denote disabling something, for example
|
||||
the list ['apples', '~oranges', 'cherries'] enables both ``apples``
|
||||
and ``cherries`` but disables ``oranges``.
|
||||
"""
|
||||
|
||||
@property
|
||||
def parent(self):
|
||||
return self._parent
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
return toggle_set(pod)
|
||||
|
||||
@parent.setter
|
||||
def parent(self, parent):
|
||||
if self._parent:
|
||||
self._parent.remove_child(self)
|
||||
self._parent = parent
|
||||
if self._parent:
|
||||
self._parent.add_child(self)
|
||||
@staticmethod
|
||||
def merge(source, dest):
|
||||
for item in source:
|
||||
if item not in dest:
|
||||
#Disable previously enabled item
|
||||
if item.startswith('~') and item[1:] in dest:
|
||||
dest.remove(item[1:])
|
||||
#Enable previously disabled item
|
||||
if not item.startswith('~') and ('~' + item) in dest:
|
||||
dest.remove('~' + item)
|
||||
dest.add(item)
|
||||
return dest
|
||||
|
||||
@property
|
||||
def children(self):
|
||||
return [c for c in self._children]
|
||||
def merge_with(self, other):
|
||||
new_self = copy(self)
|
||||
return toggle_set.merge(other, new_self)
|
||||
|
||||
def __init__(self):
|
||||
self._parent = None
|
||||
self._children = []
|
||||
def merge_into(self, other):
|
||||
other = copy(other)
|
||||
return toggle_set.merge(self, other)
|
||||
|
||||
def add_child(self, node):
|
||||
if node == self:
|
||||
raise ValueError('A node cannot be its own child.')
|
||||
if node in self._children:
|
||||
return
|
||||
for ancestor in self.iter_ancestors():
|
||||
if ancestor == node:
|
||||
raise ValueError('Can\'t add {} as a child, as it already an ancestor')
|
||||
if node.parent and node.parent != self:
|
||||
raise ValueError('Cannot add {}, as it already has a parent.'.format(node))
|
||||
self._children.append(node)
|
||||
node._parent = self
|
||||
def values(self):
|
||||
"""
|
||||
returns a list of enabled items.
|
||||
"""
|
||||
return set([item for item in self if not item.startswith('~')])
|
||||
|
||||
def remove_child(self, node):
|
||||
if node not in self._children:
|
||||
message = 'Cannot remove: {} is not a child of {}'
|
||||
raise ValueError(message.format(node, self))
|
||||
self._children.remove(node)
|
||||
node._parent = None
|
||||
def conflicts_with(self, other):
|
||||
"""
|
||||
Checks if any items in ``other`` conflict with items already in this list.
|
||||
|
||||
def iter_ancestors(self, after=None, upto=None):
|
||||
if upto == self:
|
||||
return
|
||||
ancestor = self
|
||||
if after:
|
||||
while ancestor != after:
|
||||
ancestor = ancestor.parent
|
||||
while ancestor and ancestor != upto:
|
||||
yield ancestor
|
||||
ancestor = ancestor.parent
|
||||
Args:
|
||||
other (list): The list to be checked against
|
||||
|
||||
def iter_descendants(self):
|
||||
for child in self.children:
|
||||
yield child
|
||||
for grandchild in child.iter_descendants():
|
||||
yield grandchild
|
||||
Returns:
|
||||
A list of items in ``other`` that conflict with items in this list
|
||||
"""
|
||||
conflicts = []
|
||||
for item in other:
|
||||
if item.startswith('~') and item[1:] in self:
|
||||
conflicts.append(item)
|
||||
if not item.startswith('~') and ('~' + item) in self:
|
||||
conflicts.append(item)
|
||||
return conflicts
|
||||
|
||||
def iter_leaves(self):
|
||||
for descendant in self.iter_descendants():
|
||||
if descendant.is_leaf:
|
||||
yield descendant
|
||||
def to_pod(self):
|
||||
return list(self.values())
|
||||
|
||||
def get_common_ancestor(self, other):
|
||||
if self.has_ancestor(other):
|
||||
return other
|
||||
if other.has_ancestor(self):
|
||||
return self
|
||||
for my_ancestor in self.iter_ancestors():
|
||||
for other_ancestor in other.iter_ancestors():
|
||||
if my_ancestor == other_ancestor:
|
||||
return my_ancestor
|
||||
|
||||
def get_root(self):
|
||||
node = self
|
||||
while not node.is_root:
|
||||
node = node.parent
|
||||
return node
|
||||
class ID(str):
|
||||
|
||||
def has_ancestor(self, other):
|
||||
for ancestor in self.iter_ancestors():
|
||||
if other == ancestor:
|
||||
return True
|
||||
return False
|
||||
def merge_with(self, other):
|
||||
return '_'.join(self, other)
|
||||
|
||||
def has_descendant(self, other):
|
||||
for descendant in self.iter_descendants():
|
||||
if other == descendant:
|
||||
return True
|
||||
return False
|
||||
def merge_into(self, other):
|
||||
return '_'.join(other, self)
|
||||
|
||||
|
||||
class obj_dict(MutableMapping):
|
||||
"""
|
||||
An object that behaves like a dict but each dict entry can also be accesed
|
||||
as an attribute.
|
||||
|
||||
:param not_in_dict: A list of keys that can only be accessed as attributes
|
||||
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def from_pod(pod):
|
||||
return obj_dict(pod)
|
||||
|
||||
def __init__(self, values=None, not_in_dict=None):
|
||||
self.__dict__['dict'] = dict(values or {})
|
||||
self.__dict__['not_in_dict'] = not_in_dict if not_in_dict is not None else []
|
||||
|
||||
def to_pod(self):
|
||||
return self.__dict__['dict']
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key in self.not_in_dict:
|
||||
msg = '"{}" is in the list keys that can only be accessed as attributes'
|
||||
raise KeyError(msg.format(key))
|
||||
return self.__dict__['dict'][key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.__dict__['dict'][key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self.__dict__['dict'][key]
|
||||
|
||||
def __len__(self):
|
||||
return sum(1 for _ in self)
|
||||
|
||||
def __iter__(self):
|
||||
for key in self.__dict__['dict']:
|
||||
if key not in self.__dict__['not_in_dict']:
|
||||
yield key
|
||||
|
||||
def __repr__(self):
|
||||
return repr(dict(self))
|
||||
|
||||
def __str__(self):
|
||||
return str(dict(self))
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
self.__dict__['dict'][name] = value
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name in self:
|
||||
del self.__dict__['dict'][name]
|
||||
else:
|
||||
raise AttributeError("No such attribute: " + name)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in self.__dict__['dict']:
|
||||
return self.__dict__['dict'][name]
|
||||
else:
|
||||
raise AttributeError("No such attribute: " + name)
|
||||
|
Reference in New Issue
Block a user