From ec252440d978ac32488bed22790ace44944a4d20 Mon Sep 17 00:00:00 2001
From: j <0x006A@0x2620.org>
Date: Tue, 30 Sep 2014 21:27:26 +0200
Subject: [PATCH] from __futre__ import print_function
---
ox/django/shortcuts.py | 3 ++-
ox/web/allmovie.py | 5 +++--
ox/web/amazon.py | 3 ++-
ox/web/apple.py | 11 ++++++-----
ox/web/arsenalberlin.py | 5 +++--
ox/web/auth.py | 3 ++-
ox/web/criterion.py | 3 ++-
ox/web/epguides.py | 3 ++-
ox/web/impawards.py | 3 ++-
ox/web/itunes.py | 9 +++++----
ox/web/lyricsfly.py | 4 +++-
ox/web/movieposterdb.py | 5 +++--
ox/web/spiegel.py | 23 ++++++++++++-----------
ox/web/ubu.py | 3 ++-
14 files changed, 49 insertions(+), 34 deletions(-)
diff --git a/ox/django/shortcuts.py b/ox/django/shortcuts.py
index 9f3c02a..2e6a4fb 100644
--- a/ox/django/shortcuts.py
+++ b/ox/django/shortcuts.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
+from __future__ import print_function
import datetime
from django.utils import datetime_safe
from django.http import HttpResponse, Http404
@@ -34,7 +35,7 @@ def render_to_json_response(dictionary, content_type="text/json", status=200):
content_type = "text/javascript"
indent = 2
if getattr(settings, 'JSON_DEBUG', False):
- print json.dumps(dictionary, indent=2, default=_to_json, ensure_ascii=False).encode('utf-8')
+ print(json.dumps(dictionary, indent=2, default=_to_json, ensure_ascii=False).encode('utf-8'))
return HttpResponse(json.dumps(dictionary, indent=indent, default=_to_json,
ensure_ascii=False).encode('utf-8'), content_type=content_type, status=status)
diff --git a/ox/web/allmovie.py b/ox/web/allmovie.py
index 3839cf7..fdb7a46 100644
--- a/ox/web/allmovie.py
+++ b/ox/web/allmovie.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
+from __future__ import print_function
import re
from ox import strip_tags, find_re
@@ -80,6 +81,6 @@ def parse_text(html, title):
return strip_tags(find_re(html, '%s.*?
(.*?) | ' % title)).strip()
if __name__ == '__main__':
- print get_data('129689')
- # print get_data('177524')
+ print(get_data('129689'))
+ # print(get_data('177524'))
diff --git a/ox/web/amazon.py b/ox/web/amazon.py
index 59f65f0..920fe89 100644
--- a/ox/web/amazon.py
+++ b/ox/web/amazon.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
+from __future__ import print_function
import re
from six.moves.urllib.parse import quote
@@ -34,7 +35,7 @@ def get_data(id):
r['authors'] = []
doc = lxml.html.document_fromstring(data)
for e in doc.xpath("//span[contains(@class, 'author')]"):
- print e
+ print(e)
for secondary in e.xpath(".//span[contains(@class, 'a-color-secondary')]"):
if 'Author' in secondary.text:
author = e.xpath(".//span[contains(@class, 'a-size-medium')]")
diff --git a/ox/web/apple.py b/ox/web/apple.py
index 2725aac..57093a2 100644
--- a/ox/web/apple.py
+++ b/ox/web/apple.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
import json
import re
@@ -60,8 +61,8 @@ def get_movie_data(title, director):
return data
if __name__ == '__main__':
- print get_movie_data('Alphaville', 'Jean-Luc Godard')
- print get_movie_data('Sin City', 'Roberto Rodriguez')
- print get_movie_data('Breathless', 'Jean-Luc Godard')
- print get_movie_data('Capitalism: A Love Story', 'Michael Moore')
- print get_movie_data('Film Socialisme', 'Jean-Luc Godard')
+ print(get_movie_data('Alphaville', 'Jean-Luc Godard'))
+ print(get_movie_data('Sin City', 'Roberto Rodriguez'))
+ print(get_movie_data('Breathless', 'Jean-Luc Godard'))
+ print(get_movie_data('Capitalism: A Love Story', 'Michael Moore'))
+ print(get_movie_data('Film Socialisme', 'Jean-Luc Godard'))
diff --git a/ox/web/arsenalberlin.py b/ox/web/arsenalberlin.py
index 647c821..e5a0dd2 100644
--- a/ox/web/arsenalberlin.py
+++ b/ox/web/arsenalberlin.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
+from __future__ import print_function
import json
import os
import re
@@ -60,11 +61,11 @@ def backup(filename):
if info:
data[i] = info
if len(data) % 10 == 0:
- print 'save', filename, len(data)
+ print('save', filename, len(data))
with open(filename, 'w') as f:
json.dump(data, f)
else:
- print 'ignore', i
+ print('ignore', i)
with open(filename, 'w') as f:
json.dump(data, f)
return data
diff --git a/ox/web/auth.py b/ox/web/auth.py
index b43196a..e610959 100644
--- a/ox/web/auth.py
+++ b/ox/web/auth.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
# GPL 2009
+from __future__ import print_function
import os
from ox.utils import json
@@ -15,7 +16,7 @@ def get(key):
auth = json.loads(data)
if key in auth:
return auth[key]
- print "please add key %s to json file '%s'" % (key, user_auth)
+ print("please add key %s to json file '%s'" % (key, user_auth))
raise Exception,"no key %s found" % key
def update(key, value):
diff --git a/ox/web/criterion.py b/ox/web/criterion.py
index 3d8ec07..93636d7 100644
--- a/ox/web/criterion.py
+++ b/ox/web/criterion.py
@@ -1,5 +1,6 @@
# -*- coding: UTF-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
+from __future__ import print_function
import re
import ox.cache
@@ -97,4 +98,4 @@ def get_ids(page=None):
return sorted(set(ids), key=int)
if __name__ == '__main__':
- print get_ids()
+ print(get_ids())
diff --git a/ox/web/epguides.py b/ox/web/epguides.py
index c5a7fcb..bb0e551 100644
--- a/ox/web/epguides.py
+++ b/ox/web/epguides.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
+from __future__ import print_function
import re
import time
@@ -44,6 +45,6 @@ def get_show_data(url):
'title':episode[5],
}
except:
- print "oxweb.epguides failed,", url
+ print("oxweb.epguides failed,", url)
return r
diff --git a/ox/web/impawards.py b/ox/web/impawards.py
index 9323fee..855fbb1 100644
--- a/ox/web/impawards.py
+++ b/ox/web/impawards.py
@@ -1,5 +1,6 @@
# vi:si:et:sw=4:sts=4:ts=4
# encoding: utf-8
+from __future__ import print_function
import re
from ox.cache import read_url
@@ -297,4 +298,4 @@ _id_map = {
if __name__ == '__main__':
ids = get_ids()
- print sorted(ids), len(ids)
+ print(sorted(ids), len(ids))
diff --git a/ox/web/itunes.py b/ox/web/itunes.py
index db8c7da..9d775a1 100644
--- a/ox/web/itunes.py
+++ b/ox/web/itunes.py
@@ -1,5 +1,6 @@
# vi:si:et:sw=4:sts=4:ts=4
# encoding: utf-8
+from __future__ import print_function
import re
import urllib
@@ -176,12 +177,12 @@ class ItunesMovie:
if __name__ == '__main__':
from ox.utils import json
data = ItunesAlbum(title = 'So Red the Rose', artist = 'Arcadia').get_data()
- print json.dumps(data, sort_keys = True, indent = 4)
+ print(json.dumps(data, sort_keys = True, indent = 4))
data = ItunesMovie(title = 'The Matrix', director = 'Wachowski').get_data()
- print json.dumps(data, sort_keys = True, indent = 4)
+ print(json.dumps(data, sort_keys = True, indent = 4))
for v in data['relatedMovies']:
data = ItunesMovie(id = v['id']).get_data()
- print json.dumps(data, sort_keys = True, indent = 4)
+ print(json.dumps(data, sort_keys = True, indent = 4))
data = ItunesMovie(id='272960052').get_data()
- print json.dumps(data, sort_keys = True, indent = 4)
+ print(json.dumps(data, sort_keys = True, indent = 4))
diff --git a/ox/web/lyricsfly.py b/ox/web/lyricsfly.py
index 12d821a..b69cda9 100644
--- a/ox/web/lyricsfly.py
+++ b/ox/web/lyricsfly.py
@@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
+from __future__ import print_function
+
from ox.cache import read_url
from ox.html import decode_html
from ox.text import find_re
@@ -18,4 +20,4 @@ def get_lyrics(title, artist):
return lyrics
if __name__ == '__main__':
- print getLyrics('Election Day', 'Arcadia')
+ print(get_lyrics('Election Day', 'Arcadia'))
diff --git a/ox/web/movieposterdb.py b/ox/web/movieposterdb.py
index d3294c3..eb90910 100644
--- a/ox/web/movieposterdb.py
+++ b/ox/web/movieposterdb.py
@@ -1,5 +1,6 @@
# -*- coding: UTF-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
+from __future__ import print_function
import re
@@ -40,5 +41,5 @@ def get_url(id):
return "http://www.movieposterdb.com/movie/%s/" % id
if __name__ == '__main__':
- print get_data('0060304')
- print get_data('0133093')
+ print(get_data('0060304'))
+ print(get_data('0133093'))
diff --git a/ox/web/spiegel.py b/ox/web/spiegel.py
index 390dde8..8f20b39 100644
--- a/ox/web/spiegel.py
+++ b/ox/web/spiegel.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
+from __future__ import print_function
from datetime import datetime
import re
import time
@@ -58,12 +59,12 @@ def get_news(year, month, day):
if new['url'][:1] == '/':
new['url'] = 'http://www.spiegel.de' + new['url']
news.append(new)
- # print '%s, %s' % (new['section'], dateString)
+ # print('%s, %s' % (new['section'], dateString))
'''
elif dateString[:10] == date and not description:
- print dateString + ' - no description'
+ print(dateString + ' - no description')
elif dateString[:10] == date and not imageUrl:
- print dateString + ' - no image'
+ print(dateString + ' - no image')
'''
return news
@@ -140,7 +141,7 @@ def archive_issues():
else:
wMax = 53
for w in range(wMax, 0, -1):
- print 'get_issue(%d, %d)' % (y, w)
+ print('get_issue(%d, %d)' % (y, w))
issue = get_issue(y, w)
if issue:
dirname = '%s/%d/%02d' % (archivePath, y, w)
@@ -185,7 +186,7 @@ def archive_issues():
p['min'] = issue['pages']
if issue['pages'] > p['max']:
p['max'] = issue['pages']
- print p['min'], p['sum'] / p['num'], p['max']
+ print(p['min'], p['sum'] / p['num'], p['max'])
def archive_news():
@@ -218,7 +219,7 @@ def archive_news():
else:
dMax = days[m]
for d in range(dMax, 0, -1):
- print 'getNews(%d, %d, %d)' % (y, m, d)
+ print('getNews(%d, %d, %d)' % (y, m, d))
news = getNews(y, m ,d)
for new in news:
dirname = archivePath + '/' + new['date'][0:4] + '/' + new['date'][5:7] + new['date'][8:10] + '/' + new['date'][11:13] + new['date'][14:16]
@@ -260,19 +261,19 @@ def archive_news():
if strings[0] != new['title1'] or strings[1] != new['title2']:
colon.append('%s %s %s: %s' % (new['date'], new['title'], new['title1'], new['title2']))
for key in sorted(count):
- print '%6d %-24s %s' % (count[key]['count'], key, count[key]['string'])
+ print('%6d %-24s %s' % (count[key]['count'], key, count[key]['string']))
for value in colon:
- print value
+ print(value)
if __name__ == '__main__':
# spiegel = Spiegel(2008, 8)
- # print spiegel.getContents()
+ # print(spiegel.getContents())
# news = News(2001, 9, 10)
# output(news.getNews())
'''
x = []
for d in range(10, 30):
- print '2/%d' % d
+ print('2/%d' % d)
news = getNews(2008, 2, d)
for new in news:
strings = new['url'].split('/')
@@ -281,7 +282,7 @@ if __name__ == '__main__':
string += '/' + format_subsection(strings[4])
if not string in x:
x.append(string)
- print x
+ print(x)
'''
# archive_issues()
archive_news()
diff --git a/ox/web/ubu.py b/ox/web/ubu.py
index 50a25b3..7286234 100644
--- a/ox/web/ubu.py
+++ b/ox/web/ubu.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
+from __future__ import print_function
import re
from ox import find_re, strip_tags, decode_html
@@ -31,7 +32,7 @@ def get_data(url):
del m['video']
m['title'] = strip_tags(decode_html(title)).strip()
if not 'url' in m:
- print url, 'missing'
+ print(url, 'missing')
if 'title' in m:
m['title'] = re.sub('(.*?) \(\d{4}\)$', '\\1', m['title'])