From 37dfed314305253730fe1cb1d5954f35cd0a011b Mon Sep 17 00:00:00 2001 From: j <0x006A@0x2620.org> Date: Thu, 2 Oct 2014 10:28:22 +0200 Subject: [PATCH] more python3 cleanups --- ox/cache.py | 4 +++- ox/net.py | 5 ++--- ox/web/imdb.py | 1 - ox/web/itunes.py | 2 +- ox/web/metacritic.py | 3 ++- ox/web/mininova.py | 2 +- ox/web/thepiratebay.py | 5 ++--- ox/web/twitter.py | 2 +- ox/web/vimeo.py | 26 ++++++++------------------ 9 files changed, 20 insertions(+), 30 deletions(-) diff --git a/ox/cache.py b/ox/cache.py index 5b92452..4137a36 100644 --- a/ox/cache.py +++ b/ox/cache.py @@ -10,6 +10,7 @@ import os from six import BytesIO import time from six.moves import urllib +from six import PY2 import sqlite3 from .utils import json @@ -24,6 +25,7 @@ COMPRESS_TYPES = ( 'text/html', 'text/plain', 'text/xml', + 'application/json', 'application/xhtml+xml', 'application/x-javascript', 'application/javascript', @@ -203,7 +205,7 @@ class SQLiteCache(Cache): elif value == 'data': if row[1] == 1: r = zlib.decompress(r) - else: + elif PY2: r = str(r) break diff --git a/ox/net.py b/ox/net.py index a598d2f..0912139 100644 --- a/ox/net.py +++ b/ox/net.py @@ -104,9 +104,8 @@ def save_url(url, filename, overwrite=False): if not os.path.exists(dirname): os.makedirs(dirname) data = read_url(url) - f = open(filename, 'w') - f.write(data) - f.close() + with open(filename, 'wb') as f: + f.write(data) def oshash(url): def get_size(url): diff --git a/ox/web/imdb.py b/ox/web/imdb.py index af65154..6103fdb 100644 --- a/ox/web/imdb.py +++ b/ox/web/imdb.py @@ -9,7 +9,6 @@ import unicodedata from six.moves import urllib from six import string_types - from .. import find_re, strip_tags, decode_html from .. import cache diff --git a/ox/web/itunes.py b/ox/web/itunes.py index 9d775a1..886ff08 100644 --- a/ox/web/itunes.py +++ b/ox/web/itunes.py @@ -2,7 +2,7 @@ # encoding: utf-8 from __future__ import print_function import re -import urllib +from six.moves import urllib from ox.cache import read_url from ox.html import decode_html, strip_tags diff --git a/ox/web/metacritic.py b/ox/web/metacritic.py index e59504a..63b713e 100644 --- a/ox/web/metacritic.py +++ b/ox/web/metacritic.py @@ -1,7 +1,8 @@ # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 import re -from urllib import quote + +from six.movies.urllib.parse import quote from lxml.html import document_fromstring from ox.cache import read_url diff --git a/ox/web/mininova.py b/ox/web/mininova.py index eb78cab..18ba586 100644 --- a/ox/web/mininova.py +++ b/ox/web/mininova.py @@ -3,7 +3,7 @@ from datetime import datetime import re import socket -from urllib import quote +from six.movies.urllib.parse import quote from ox.cache import read_url from ox import find_re, cache, strip_tags, decode_html, get_torrent_info, int_value, normalize_newlines diff --git a/ox/web/thepiratebay.py b/ox/web/thepiratebay.py index b751384..cc0992e 100644 --- a/ox/web/thepiratebay.py +++ b/ox/web/thepiratebay.py @@ -2,9 +2,8 @@ # vi:si:et:sw=4:sts=4:ts=4 from datetime import datetime import re -import socket -from urllib import quote, urlencode -from urllib2 import URLError + +from six.movies.urllib.parse import quote from ox import find_re, cache, strip_tags, decode_html, get_torrent_info, normalize_newlines from ox.normalize import normalize_imdbid diff --git a/ox/web/twitter.py b/ox/web/twitter.py index 45eff00..039c4a4 100644 --- a/ox/web/twitter.py +++ b/ox/web/twitter.py @@ -2,7 +2,7 @@ # vi:si:et:sw=4:sts=4:ts=4 import re from datetime import datetime -from urllib import quote +from six.movies.urllib.parse import quote import lxml.html import ox diff --git a/ox/web/vimeo.py b/ox/web/vimeo.py index f51216f..70783ca 100644 --- a/ox/web/vimeo.py +++ b/ox/web/vimeo.py @@ -1,27 +1,17 @@ # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 -import re -from StringIO import StringIO -import xml.etree.ElementTree as ET + +import json from ox.cache import read_url -from ox import find_string, find_re def get_data(id): - url = 'http://www.vimeo.com/moogaloop/load/clip:%s' %id - xml = read_url(url) - tree = ET.parse(StringIO(xml)) - request_signature = tree.find('request_signature').text - request_signature_expires = tree.find('request_signature_expires').text - - data = {} - video_url = "http://www.vimeo.com/moogaloop/play/clip:%s/%s/%s/?q=" % \ - (id, request_signature, request_signature_expires) - data['video_sd'] = video_url + 'sd' - data['video_hd'] = video_url + 'hd' - video = tree.find('video') - for key in ('caption', 'width', 'height', 'duration', 'thumbnail'): - data[key] = video.find(key).text + url = 'http://vimeo.com/api/v2/video/%s.json' % id + data = json.loads(read_url(url).decode('utf-8'))[0] + + url = 'http://player.vimeo.com/video/%s/config?autoplay=0&byline=0&bypass_privacy=1&context=clip.main&default_to_hd=1&portrait=0' % id + info = json.loads(read_url(url).decode('utf-8')) + data['video'] = info['request']['files']['h264'] return data