python-ox/ox/net.py

142 lines
4.5 KiB
Python
Raw Normal View History

2008-04-27 16:54:37 +00:00
# -*- coding: utf-8 -*-
2008-06-19 09:21:21 +00:00
# vi:si:et:sw=4:sts=4:ts=4
2008-07-06 13:00:06 +00:00
# GPL 2008
2009-07-13 10:30:37 +00:00
import os
2008-04-28 09:55:30 +00:00
import gzip
import StringIO
2012-04-30 08:40:10 +00:00
import struct
2008-04-27 16:54:37 +00:00
import urllib
import urllib2
from chardet.universaldetector import UniversalDetector
2008-04-27 16:54:37 +00:00
2012-08-21 06:41:25 +00:00
DEBUG = False
2008-04-27 16:54:37 +00:00
# Default headers for HTTP requests.
DEFAULT_HEADERS = {
2011-10-14 10:42:28 +00:00
'User-Agent': 'Mozilla/5.0 (X11; Linux i686; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
2010-08-12 21:35:31 +00:00
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
2008-06-19 09:21:21 +00:00
'Accept-Encoding': 'gzip'
}
2008-04-27 16:54:37 +00:00
def status(url, data=None, headers=DEFAULT_HEADERS):
2008-06-19 09:21:21 +00:00
try:
f = open_url(url, data, headers)
2008-06-19 09:21:21 +00:00
s = f.code
except urllib2.HTTPError, e:
s = e.code
return s
def exists(url, data=None, headers=DEFAULT_HEADERS):
2008-06-19 09:21:21 +00:00
s = status(url, data, headers)
if s >= 200 and s < 400:
return True
return False
def headers(url, data=None, headers=DEFAULT_HEADERS):
2008-06-19 09:21:21 +00:00
try:
f = open_url(url, data, headers)
2008-06-19 09:21:21 +00:00
f.headers['Status'] = "%s" % f.code
headers = f.headers
f.close()
except urllib2.HTTPError, e:
e.headers['Status'] = "%s" % e.code
headers = e.headers
return dict(headers)
def open_url(url, data=None, headers=DEFAULT_HEADERS):
2008-06-19 09:21:21 +00:00
url = url.replace(' ', '%20')
req = urllib2.Request(url, data, headers)
return urllib2.urlopen(req)
2008-04-27 16:54:37 +00:00
def read_url(url, data=None, headers=DEFAULT_HEADERS, return_headers=False, unicode=False):
2012-08-21 06:41:25 +00:00
if DEBUG:
print 'ox.net.read_url', url
f = open_url(url, data, headers)
2012-08-17 20:20:35 +00:00
result = f.read()
2008-06-19 09:21:21 +00:00
f.close()
if f.headers.get('content-encoding', None) == 'gzip':
2012-08-17 20:20:35 +00:00
result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()
if unicode:
2012-08-17 20:20:35 +00:00
encoding = detect_encoding(result)
if not encoding:
encoding = 'latin-1'
2012-08-17 20:20:35 +00:00
result = result.decode(encoding)
if return_headers:
2008-06-19 09:21:21 +00:00
f.headers['Status'] = "%s" % f.code
2012-08-17 20:20:35 +00:00
return dict(f.headers), result
return result
2008-04-27 16:54:37 +00:00
def detect_encoding(data):
2009-03-14 21:02:20 +00:00
if 'content="text/html; charset=utf-8"' in data:
return 'utf-8'
2009-07-15 13:53:40 +00:00
elif 'content="text/html; charset=iso-8859-1"' in data:
return 'iso-8859-1'
2008-06-19 09:21:21 +00:00
detector = UniversalDetector()
for line in data.split('\n'):
detector.feed(line)
if detector.done:
break
detector.close()
return detector.result['encoding']
def save_url(url, filename, overwrite=False):
2009-07-15 13:53:40 +00:00
if not os.path.exists(filename) or overwrite:
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
data = read_url(url)
2009-07-15 13:53:40 +00:00
f = open(filename, 'w')
f.write(data)
f.close()
2012-04-30 08:40:10 +00:00
def oshash(url):
def get_size(url):
req = urllib2.Request(url, headers=DEFAULT_HEADERS.copy())
req.get_method = lambda : 'HEAD'
u = urllib2.urlopen(req)
if u.code != 200 or not 'Content-Length' in u.headers:
raise IOError
return int(u.headers['Content-Length'])
def get_range(url, start, end):
headers = DEFAULT_HEADERS.copy()
headers['Range'] = 'bytes=%s-%s' % (start, end)
req = urllib2.Request(url, headers=headers)
u = urllib2.urlopen(req)
return u.read()
try:
longlongformat = 'q' # long long
bytesize = struct.calcsize(longlongformat)
filesize = get_size(url)
hash = filesize
head = get_range(url, 0, min(filesize, 65536))
if filesize > 65536:
tail = get_range(url, filesize-65536, filesize)
if filesize < 65536:
for offset in range(0, filesize, bytesize):
buffer = head[offset:offset+bytesize]
(l_value,)= struct.unpack(longlongformat, buffer)
hash += l_value
hash = hash & 0xFFFFFFFFFFFFFFFF #cut off 64bit overflow
else:
for offset in range(0, 65536, bytesize):
buffer = head[offset:offset+bytesize]
(l_value,)= struct.unpack(longlongformat, buffer)
hash += l_value
hash = hash & 0xFFFFFFFFFFFFFFFF #cut of 64bit overflow
for offset in range(0, 65536, bytesize):
buffer = tail[offset:offset+bytesize]
(l_value,)= struct.unpack(longlongformat, buffer)
hash += l_value
hash = hash & 0xFFFFFFFFFFFFFFFF
returnedhash = "%016x" % hash
return returnedhash
except(IOError):
return "IOError"