diff --git a/ox/file.py b/ox/file.py index b99ceef..081cda3 100644 --- a/ox/file.py +++ b/ox/file.py @@ -294,7 +294,7 @@ def copy_file(source, target, verbose=False): def read_file(file, verbose=False): if verbose: print('reading', file) - f = open(file) + f = open(file, 'rb') data = f.read() f.close() return data @@ -309,8 +309,10 @@ def read_json(file, verbose=False): def write_file(file, data, verbose=False): if verbose: print('writing', file) + if not isinstance(data, bytes): + data = data.encode('utf-8') write_path(file) - f = open(file, 'w') + f = open(file, 'wb') f.write(data) f.close() return len(data) diff --git a/ox/js.py b/ox/js.py index 7a300e7..13ead66 100644 --- a/ox/js.py +++ b/ox/js.py @@ -1,6 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 +import sys from ox.utils import json @@ -8,6 +9,12 @@ def minify(source, comment=''): # see https://github.com/douglascrockford/JSMin/blob/master/README def get_next_non_whitespace_token(): pass + # python2 performance with unicode string is terrible + if sys.version[0] == '2': + if isinstance(source, unicode): + source = source.encode('utf-8') + if isinstance(comment, unicode): + comment = comment.encode('utf-8') tokens = tokenize(source) length = len(tokens) minified = '/*' + comment + '*/' if comment else ''