cleanup pylint errors and py2/3 issues

This commit is contained in:
j 2016-06-08 15:32:46 +02:00
commit 77f8876fca
20 changed files with 232 additions and 197 deletions

View file

@ -11,9 +11,9 @@ def minify(source, comment=''):
pass
# python2 performance with unicode string is terrible
if PY2:
if isinstance(source, unicode):
if isinstance(source, unicode): # pylint: disable=undefined-variable
source = source.encode('utf-8')
if isinstance(comment, unicode):
if isinstance(comment, unicode): # pylint: disable=undefined-variable
comment = comment.encode('utf-8')
tokens = tokenize(source)
length = len(tokens)
@ -30,20 +30,20 @@ def minify(source, comment=''):
# numbers or strings or unary operators or grouping operators
# with a single newline, otherwise remove it
if prevToken and nextToken\
and (prevToken['type'] in ['identifier', 'number', 'string']\
or prevToken['value'] in ['++', '--', ')', ']', '}'])\
and (nextToken['type'] in ['identifier', 'number', 'string']\
or nextToken['value'] in ['+', '-', '++', '--', '~', '!', '(', '[', '{']):
and (prevToken['type'] in ['identifier', 'number', 'string']
or prevToken['value'] in ['++', '--', ')', ']', '}']) \
and (nextToken['type'] in ['identifier', 'number', 'string']
or nextToken['value'] in ['+', '-', '++', '--', '~', '!', '(', '[', '{']):
minified += '\n'
elif token['type'] == 'whitespace':
# replace whitespace between two tokens that are identifiers or
# numbers, or between a token that ends with "+" or "-" and one that
# begins with "+" or "-", with a single space, otherwise remove it
if prevToken and nextToken\
and ((prevToken['type'] in ['identifier', 'number']\
and nextToken['type'] in ['identifier', 'number'])
or (prevToken['value'] in ['+', '-', '++', '--']
and nextToken['value'] in ['+', '-', '++', '--'])):
if prevToken and nextToken \
and ((prevToken['type'] in ['identifier', 'number'] and
nextToken['type'] in ['identifier', 'number']) or
(prevToken['value'] in ['+', '-', '++', '--'] and
nextToken['value'] in ['+', '-', '++', '--'])):
minified += ' '
elif token['type'] != 'comment':
# remove comments and leave all other tokens untouched
@ -178,7 +178,7 @@ def tokenize(source):
'value': value
})
if type == 'comment':
lines = value.split('\n');
lines = value.split('\n')
column = len(lines[-1])
line += len(lines) - 1
elif type == 'linebreak':