openmedialibrary_platform/Shared/lib/python3.4/site-packages/ox/net.py

158 lines
5.1 KiB
Python
Raw Normal View History

2013-10-11 17:28:32 +00:00
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
# GPL 2008
2014-09-30 16:15:32 +00:00
from __future__ import with_statement, print_function
2013-10-11 17:28:32 +00:00
import os
import gzip
import re
2014-09-30 16:15:32 +00:00
from six import BytesIO
2013-10-11 17:28:32 +00:00
import struct
2014-09-30 16:15:32 +00:00
from six.moves import urllib
2013-10-11 17:28:32 +00:00
from chardet.universaldetector import UniversalDetector
DEBUG = False
# Default headers for HTTP requests.
DEFAULT_HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
'Accept-Encoding': 'gzip'
}
def status(url, data=None, headers=DEFAULT_HEADERS):
try:
f = open_url(url, data, headers)
s = f.code
2014-09-30 16:15:32 +00:00
except urllib.error.HTTPError as e:
2013-10-11 17:28:32 +00:00
s = e.code
return s
def exists(url, data=None, headers=DEFAULT_HEADERS):
s = status(url, data, headers)
if s >= 200 and s < 400:
return True
return False
def get_headers(url, data=None, headers=DEFAULT_HEADERS):
try:
f = open_url(url, data, headers)
f.headers['Status'] = "%s" % f.code
headers = f.headers
f.close()
2014-09-30 16:15:32 +00:00
except urllib.error.HTTPError as e:
2013-10-11 17:28:32 +00:00
e.headers['Status'] = "%s" % e.code
headers = e.headers
return dict(headers)
def open_url(url, data=None, headers=DEFAULT_HEADERS):
2014-09-30 16:15:32 +00:00
if isinstance(url, bytes):
url = url.decode('utf-8')
2013-10-11 17:28:32 +00:00
url = url.replace(' ', '%20')
2014-09-30 16:15:32 +00:00
req = urllib.request.Request(url, data, headers)
return urllib.request.urlopen(req)
2013-10-11 17:28:32 +00:00
def read_url(url, data=None, headers=DEFAULT_HEADERS, return_headers=False, unicode=False):
if DEBUG:
2014-09-30 16:15:32 +00:00
print('ox.net.read_url', url)
2013-10-11 17:28:32 +00:00
f = open_url(url, data, headers)
result = f.read()
f.close()
if f.headers.get('content-encoding', None) == 'gzip':
2014-09-30 16:15:32 +00:00
result = gzip.GzipFile(fileobj=BytesIO(result)).read()
2013-10-11 17:28:32 +00:00
if unicode:
2014-09-30 16:15:32 +00:00
ctype = f.headers.get('content-type', '').lower()
if 'charset' in ctype:
encoding = ctype.split('charset=')[-1]
else:
encoding = detect_encoding(result)
2013-10-11 17:28:32 +00:00
if not encoding:
encoding = 'latin-1'
result = result.decode(encoding)
if return_headers:
f.headers['Status'] = "%s" % f.code
2014-09-30 16:15:32 +00:00
headers = {}
for key in f.headers:
headers[key.lower()] = f.headers[key]
return headers, result
2013-10-11 17:28:32 +00:00
return result
def detect_encoding(data):
2014-09-30 16:15:32 +00:00
data_lower = data.lower().decode('utf-8', 'ignore')
charset = re.compile('content="text/html; charset=(.*?)"').findall(data_lower)
2013-10-11 17:28:32 +00:00
if not charset:
2014-09-30 16:15:32 +00:00
charset = re.compile('meta charset="(.*?)"').findall(data_lower)
2013-10-11 17:28:32 +00:00
if charset:
return charset[0].lower()
detector = UniversalDetector()
2014-09-30 16:15:32 +00:00
p = 0
l = len(data)
s = 1024
while p < l:
detector.feed(data[p:p+s])
2013-10-11 17:28:32 +00:00
if detector.done:
break
2014-09-30 16:15:32 +00:00
p += s
2013-10-11 17:28:32 +00:00
detector.close()
return detector.result['encoding']
def save_url(url, filename, overwrite=False):
if not os.path.exists(filename) or overwrite:
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
data = read_url(url)
f = open(filename, 'w')
f.write(data)
f.close()
def oshash(url):
def get_size(url):
2014-09-30 16:15:32 +00:00
req = urllib.request.Request(url, headers=DEFAULT_HEADERS.copy())
2013-10-11 17:28:32 +00:00
req.get_method = lambda : 'HEAD'
2014-09-30 16:15:32 +00:00
u = urllib.request.urlopen(req)
2013-10-11 17:28:32 +00:00
if u.code != 200 or not 'Content-Length' in u.headers:
raise IOError
return int(u.headers['Content-Length'])
def get_range(url, start, end):
headers = DEFAULT_HEADERS.copy()
headers['Range'] = 'bytes=%s-%s' % (start, end)
2014-09-30 16:15:32 +00:00
req = urllib.request.Request(url, headers=headers)
u = urllib.request.urlopen(req)
2013-10-11 17:28:32 +00:00
return u.read()
try:
longlongformat = 'q' # long long
bytesize = struct.calcsize(longlongformat)
filesize = get_size(url)
hash = filesize
head = get_range(url, 0, min(filesize, 65536))
if filesize > 65536:
tail = get_range(url, filesize-65536, filesize)
if filesize < 65536:
for offset in range(0, filesize, bytesize):
buffer = head[offset:offset+bytesize]
(l_value,)= struct.unpack(longlongformat, buffer)
hash += l_value
hash = hash & 0xFFFFFFFFFFFFFFFF #cut off 64bit overflow
else:
for offset in range(0, 65536, bytesize):
buffer = head[offset:offset+bytesize]
(l_value,)= struct.unpack(longlongformat, buffer)
hash += l_value
hash = hash & 0xFFFFFFFFFFFFFFFF #cut of 64bit overflow
for offset in range(0, 65536, bytesize):
buffer = tail[offset:offset+bytesize]
(l_value,)= struct.unpack(longlongformat, buffer)
hash += l_value
hash = hash & 0xFFFFFFFFFFFFFFFF
returnedhash = "%016x" % hash
return returnedhash
except(IOError):
return "IOError"