2016-01-05 07:28:30 +00:00
|
|
|
from ox.cache import read_url
|
2016-01-05 07:46:30 +00:00
|
|
|
from ox import decode_html, strip_tags, find_re, fix_bad_unicode
|
2016-01-05 07:28:30 +00:00
|
|
|
import json
|
|
|
|
import re
|
|
|
|
from urllib.parse import unquote
|
|
|
|
import lxml.html
|
|
|
|
import stdnum.isbn
|
|
|
|
|
|
|
|
def info(key, value):
|
|
|
|
if key not in ('isbn',):
|
|
|
|
raise IOError('unknwon key %s' % key)
|
|
|
|
if len(value) == 13:
|
|
|
|
value = stdnum.isbn.to_isbn10(value)
|
|
|
|
if len(value) != 10:
|
|
|
|
raise IOError('invalid isbn %s' % value)
|
|
|
|
url = 'http://www.amazon.com/dp/' + value
|
|
|
|
data = read_url(url).decode()
|
|
|
|
doc = lxml.html.document_fromstring(data)
|
|
|
|
info = {}
|
|
|
|
if '<title>404 - Document Not Found</title>' in data:
|
|
|
|
return info
|
2016-01-05 08:30:25 +00:00
|
|
|
if 'To discuss automated access to Amazon data please' in data:
|
|
|
|
return info
|
2016-01-05 07:28:30 +00:00
|
|
|
for l in doc.xpath('//link[@rel="canonical" and @href]'):
|
|
|
|
info['asin'] = [l.get('href').rpartition('/')[-1]]
|
|
|
|
break
|
|
|
|
info['title'] = strip_tags(decode_html(doc.xpath('//span[@id="productTitle"]')[0].text))
|
2016-01-05 07:46:30 +00:00
|
|
|
info['title'] = re.sub(' \([^\)]+? Classics\)', '', info['title'])
|
|
|
|
info['title'] = re.sub(' \([^\)]+? Collection\)', '', info['title'])
|
2017-07-30 09:37:44 +00:00
|
|
|
d = re.compile('encodedDescription\' : "(.*?)",').findall(data)
|
|
|
|
if d:
|
|
|
|
info['description'] = strip_tags(decode_html(unquote(d[0])))
|
|
|
|
info['description'] = fix_bad_unicode(info['description'])
|
|
|
|
else:
|
|
|
|
info['description'] = ''
|
|
|
|
|
2016-01-05 07:28:30 +00:00
|
|
|
content = doc.xpath('//div[@class="content"]')[0]
|
|
|
|
content_info = {}
|
|
|
|
for li in content.xpath('.//li'):
|
|
|
|
v = li.text_content()
|
|
|
|
if ': ' in v:
|
|
|
|
k, v = li.text_content().split(': ', 1)
|
|
|
|
content_info[k.strip()] = v.strip()
|
|
|
|
if 'Language' in content_info:
|
|
|
|
info['language'] = content_info['Language']
|
|
|
|
if 'Publisher' in content_info:
|
|
|
|
if ' (' in content_info['Publisher']:
|
|
|
|
info['date'] = find_re(content_info['Publisher'].split(' (')[-1], '\d{4}')
|
|
|
|
info['publisher'] = content_info['Publisher'].split(' (')[0]
|
|
|
|
if '; ' in info['publisher']:
|
|
|
|
info['publisher'], info['edition'] = info['publisher'].split('; ', 1)
|
|
|
|
|
|
|
|
if 'ISBN-13' in content_info:
|
2016-01-11 13:43:54 +00:00
|
|
|
info['isbn'] = content_info['ISBN-13'].replace('-', '')
|
|
|
|
elif 'ISBN-10' in content_info:
|
|
|
|
info['isbn'] = stdnum.isbn.to_isbn13(content_info['ISBN-10'])
|
2016-01-05 07:28:30 +00:00
|
|
|
|
|
|
|
a = doc.xpath('//span[@class="a-size-medium"]')
|
|
|
|
if a:
|
|
|
|
for span in a:
|
|
|
|
r = span.getchildren()[0].text.strip()
|
2016-01-05 07:46:30 +00:00
|
|
|
role = get_role(r)
|
2017-07-30 09:37:44 +00:00
|
|
|
if role not in info:
|
|
|
|
info[role] = []
|
2016-01-05 07:28:30 +00:00
|
|
|
info[role].append(span.text.strip())
|
|
|
|
else:
|
|
|
|
for span in doc.xpath('//span[@class="author notFaded"]'):
|
|
|
|
author = [x.strip() for x in span.text_content().strip().split('\n') if x.strip()]
|
2016-01-05 07:46:30 +00:00
|
|
|
role = get_role(author[-1])
|
2017-07-30 09:37:44 +00:00
|
|
|
if role not in info:
|
|
|
|
info[role] = []
|
2016-01-05 07:28:30 +00:00
|
|
|
info[role].append(author[0])
|
|
|
|
|
|
|
|
covers = re.compile('data-a-dynamic-image="({.+?})"').findall(data)[0]
|
|
|
|
covers = json.loads(decode_html(covers))
|
2017-07-30 09:37:44 +00:00
|
|
|
last = [0, 0]
|
2016-01-05 07:28:30 +00:00
|
|
|
for url in covers:
|
|
|
|
if covers[url] > last:
|
|
|
|
last = covers[url]
|
|
|
|
info['cover'] = re.sub('(\._SX.+?_\.)', '.', url)
|
|
|
|
return info
|
2016-01-05 07:46:30 +00:00
|
|
|
|
2017-07-30 09:37:44 +00:00
|
|
|
def get_price(asin, currency='EUR'):
|
|
|
|
if currency == 'EUR':
|
|
|
|
url = 'http://www.amazon.de/dp/' + asin
|
|
|
|
else:
|
|
|
|
url = 'http://www.amazon.com/dp/' + asin
|
|
|
|
data = read_url(url).decode()
|
|
|
|
doc = lxml.html.document_fromstring(data)
|
|
|
|
for price in doc.xpath("//span[contains(@class, 'a-color-price')]"):
|
|
|
|
price = price.text_content().strip()
|
|
|
|
if currency == 'EUR':
|
|
|
|
price = price.replace('EUR ', '').replace(',', '.')
|
|
|
|
else:
|
|
|
|
price = price.replace('$', '').strip()
|
|
|
|
price = float(price)
|
|
|
|
return price
|
|
|
|
|
2016-01-05 07:46:30 +00:00
|
|
|
def get_role(value):
|
|
|
|
if 'Translator' in value:
|
|
|
|
role = 'translator'
|
|
|
|
elif 'Editor' in value:
|
|
|
|
role = 'editor'
|
|
|
|
else:
|
|
|
|
role = 'author'
|
|
|
|
return role
|