2014-05-17 14:26:59 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
|
|
|
from __future__ import division
|
|
|
|
|
2014-05-14 09:57:11 +00:00
|
|
|
from ox.cache import read_url
|
|
|
|
import re
|
|
|
|
import lxml.html
|
|
|
|
|
2014-05-17 14:26:59 +00:00
|
|
|
import logging
|
|
|
|
logger = logging.getLogger('meta.abebooks')
|
|
|
|
|
2014-05-18 23:24:04 +00:00
|
|
|
base = 'http://www.abebooks.com'
|
|
|
|
|
2014-05-14 09:57:11 +00:00
|
|
|
def get_ids(key, value):
|
|
|
|
ids = []
|
|
|
|
if key in ('isbn10', 'isbn13'):
|
|
|
|
url = '%s/servlet/SearchResults?isbn=%s&sts=t' % (base, id)
|
|
|
|
data = read_url(url)
|
|
|
|
urls = re.compile('href="(/servlet/BookDetailsPL[^"]+)"').findall(data)
|
|
|
|
if urls:
|
|
|
|
ids.append((key, value))
|
|
|
|
if ids:
|
2014-05-17 14:26:59 +00:00
|
|
|
logger.debug('get_ids %s %s => %s', key, value, ids)
|
2014-05-14 09:57:11 +00:00
|
|
|
return ids
|
|
|
|
|
|
|
|
def lookup(id):
|
2014-05-17 14:26:59 +00:00
|
|
|
logger.debug('lookup %s', id)
|
2014-05-18 23:24:04 +00:00
|
|
|
data = {}
|
2014-05-14 09:57:11 +00:00
|
|
|
url = '%s/servlet/SearchResults?isbn=%s&sts=t' % (base, id)
|
2014-05-18 23:24:04 +00:00
|
|
|
html = read_url(url)
|
|
|
|
urls = re.compile('href="(/servlet/BookDetailsPL[^"]+)"').findall(html)
|
|
|
|
keys = {
|
|
|
|
'pubdate': 'date'
|
|
|
|
}
|
2014-05-14 09:57:11 +00:00
|
|
|
if urls:
|
|
|
|
details = '%s%s' % (base, urls[0])
|
2014-05-18 23:24:04 +00:00
|
|
|
html = read_url(details)
|
|
|
|
doc = lxml.html.document_fromstring(html)
|
2014-05-14 09:57:11 +00:00
|
|
|
for e in doc.xpath("//*[contains(@id, 'biblio')]"):
|
|
|
|
key = e.attrib['id'].replace('biblio-', '')
|
|
|
|
value = e.text_content()
|
2014-05-18 23:24:04 +00:00
|
|
|
if value and key not in ('bookcondition', 'binding', 'edition-amz'):
|
|
|
|
data[keys.get(key, key)] = value
|
|
|
|
return data
|