more python3 cleanups

This commit is contained in:
j 2014-10-02 10:28:22 +02:00
parent 4b8aad5b38
commit 37dfed3143
9 changed files with 20 additions and 30 deletions

View file

@ -10,6 +10,7 @@ import os
from six import BytesIO from six import BytesIO
import time import time
from six.moves import urllib from six.moves import urllib
from six import PY2
import sqlite3 import sqlite3
from .utils import json from .utils import json
@ -24,6 +25,7 @@ COMPRESS_TYPES = (
'text/html', 'text/html',
'text/plain', 'text/plain',
'text/xml', 'text/xml',
'application/json',
'application/xhtml+xml', 'application/xhtml+xml',
'application/x-javascript', 'application/x-javascript',
'application/javascript', 'application/javascript',
@ -203,7 +205,7 @@ class SQLiteCache(Cache):
elif value == 'data': elif value == 'data':
if row[1] == 1: if row[1] == 1:
r = zlib.decompress(r) r = zlib.decompress(r)
else: elif PY2:
r = str(r) r = str(r)
break break

View file

@ -104,9 +104,8 @@ def save_url(url, filename, overwrite=False):
if not os.path.exists(dirname): if not os.path.exists(dirname):
os.makedirs(dirname) os.makedirs(dirname)
data = read_url(url) data = read_url(url)
f = open(filename, 'w') with open(filename, 'wb') as f:
f.write(data) f.write(data)
f.close()
def oshash(url): def oshash(url):
def get_size(url): def get_size(url):

View file

@ -9,7 +9,6 @@ import unicodedata
from six.moves import urllib from six.moves import urllib
from six import string_types from six import string_types
from .. import find_re, strip_tags, decode_html from .. import find_re, strip_tags, decode_html
from .. import cache from .. import cache

View file

@ -2,7 +2,7 @@
# encoding: utf-8 # encoding: utf-8
from __future__ import print_function from __future__ import print_function
import re import re
import urllib from six.moves import urllib
from ox.cache import read_url from ox.cache import read_url
from ox.html import decode_html, strip_tags from ox.html import decode_html, strip_tags

View file

@ -1,7 +1,8 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4 # vi:si:et:sw=4:sts=4:ts=4
import re import re
from urllib import quote
from six.movies.urllib.parse import quote
from lxml.html import document_fromstring from lxml.html import document_fromstring
from ox.cache import read_url from ox.cache import read_url

View file

@ -3,7 +3,7 @@
from datetime import datetime from datetime import datetime
import re import re
import socket import socket
from urllib import quote from six.movies.urllib.parse import quote
from ox.cache import read_url from ox.cache import read_url
from ox import find_re, cache, strip_tags, decode_html, get_torrent_info, int_value, normalize_newlines from ox import find_re, cache, strip_tags, decode_html, get_torrent_info, int_value, normalize_newlines

View file

@ -2,9 +2,8 @@
# vi:si:et:sw=4:sts=4:ts=4 # vi:si:et:sw=4:sts=4:ts=4
from datetime import datetime from datetime import datetime
import re import re
import socket
from urllib import quote, urlencode from six.movies.urllib.parse import quote
from urllib2 import URLError
from ox import find_re, cache, strip_tags, decode_html, get_torrent_info, normalize_newlines from ox import find_re, cache, strip_tags, decode_html, get_torrent_info, normalize_newlines
from ox.normalize import normalize_imdbid from ox.normalize import normalize_imdbid

View file

@ -2,7 +2,7 @@
# vi:si:et:sw=4:sts=4:ts=4 # vi:si:et:sw=4:sts=4:ts=4
import re import re
from datetime import datetime from datetime import datetime
from urllib import quote from six.movies.urllib.parse import quote
import lxml.html import lxml.html
import ox import ox

View file

@ -1,27 +1,17 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4 # vi:si:et:sw=4:sts=4:ts=4
import re
from StringIO import StringIO import json
import xml.etree.ElementTree as ET
from ox.cache import read_url from ox.cache import read_url
from ox import find_string, find_re
def get_data(id): def get_data(id):
url = 'http://www.vimeo.com/moogaloop/load/clip:%s' %id url = 'http://vimeo.com/api/v2/video/%s.json' % id
xml = read_url(url) data = json.loads(read_url(url).decode('utf-8'))[0]
tree = ET.parse(StringIO(xml))
request_signature = tree.find('request_signature').text url = 'http://player.vimeo.com/video/%s/config?autoplay=0&byline=0&bypass_privacy=1&context=clip.main&default_to_hd=1&portrait=0' % id
request_signature_expires = tree.find('request_signature_expires').text info = json.loads(read_url(url).decode('utf-8'))
data['video'] = info['request']['files']['h264']
data = {}
video_url = "http://www.vimeo.com/moogaloop/play/clip:%s/%s/%s/?q=" % \
(id, request_signature, request_signature_expires)
data['video_sd'] = video_url + 'sd'
data['video_hd'] = video_url + 'hd'
video = tree.find('video')
for key in ('caption', 'width', 'height', 'duration', 'thumbnail'):
data[key] = video.find(key).text
return data return data