get rid of all urllib2 calls
This commit is contained in:
parent
1f14f6db55
commit
dcc23ba2a4
4 changed files with 11 additions and 17 deletions
|
@ -1,8 +1,8 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
import cookielib
|
||||
import urllib2
|
||||
from StringIO import StringIO
|
||||
from six import StringIO, PY2
|
||||
from six.moves import urllib
|
||||
from six.moves import http_cookiejar as cookielib
|
||||
|
||||
from celery.utils import get_full_cls_name
|
||||
from celery.backends import default_backend
|
||||
|
@ -49,15 +49,15 @@ def api_proxy(request):
|
|||
cj = SessionCookieJar()
|
||||
if 'cj' in request.session:
|
||||
cj.load(request.session['cj'])
|
||||
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
|
||||
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
|
||||
opener.addheaders = [
|
||||
('User-Agent', request.META.get('HTTP_USER_AGENT'))
|
||||
]
|
||||
form = ox.MultiPartForm()
|
||||
for key in request.POST:
|
||||
form.add_field(key, request.POST[key])
|
||||
r = urllib2.Request(url)
|
||||
body = str(form)
|
||||
r = urllib.request.Request(url)
|
||||
body = form.body()
|
||||
r.add_header('Content-type', form.get_content_type())
|
||||
r.add_header('Content-length', len(body))
|
||||
r.add_data(body)
|
||||
|
|
|
@ -68,7 +68,7 @@ class MultiPartForm(object):
|
|||
return body
|
||||
|
||||
def body(self):
|
||||
"""Return a string representing the form data, including attached files."""
|
||||
"""Return a byte string representing the form data, including attached files."""
|
||||
# Build a list of lists, each containing "lines" of the
|
||||
# request. Each part is separated by a boundary string.
|
||||
# Once the list is built, return a string where each
|
||||
|
|
|
@ -7,12 +7,6 @@ from ox import find_re, strip_tags
|
|||
|
||||
|
||||
def get_url(id=None, imdb=None):
|
||||
#this would also wor but does not cache:
|
||||
'''
|
||||
from urllib2 import urlopen
|
||||
u = urlopen(url)
|
||||
return u.url
|
||||
'''
|
||||
if imdb:
|
||||
url = "http://www.rottentomatoes.com/alias?type=imdbid&s=%s" % imdb
|
||||
data = read_url(url)
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
from urllib import quote, unquote_plus
|
||||
import urllib2
|
||||
import cookielib
|
||||
from six.moves.urllib.parse import quote, unquote_plus
|
||||
from six.moves import urllib
|
||||
from six.moves import http_cookiejar as cookielib
|
||||
import re
|
||||
from xml.dom.minidom import parseString
|
||||
import json
|
||||
|
@ -167,7 +167,7 @@ def download_webm(id, filename):
|
|||
stream_type = 'video/webm'
|
||||
url = "http://www.youtube.com/watch?v=%s" % id
|
||||
cj = cookielib.CookieJar()
|
||||
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
|
||||
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
|
||||
opener.addheaders = [
|
||||
('User-Agent',
|
||||
'Mozilla/5.0 (X11; Linux i686; rv:2.0) Gecko/20100101 Firefox/4.0'),
|
||||
|
|
Loading…
Reference in a new issue