cleanup imports
This commit is contained in:
parent
1e0f93bbf2
commit
12440acd96
22 changed files with 119 additions and 122 deletions
|
|
@ -2,8 +2,9 @@
|
|||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
from __future__ import division
|
||||
|
||||
from ox.cache import read_url
|
||||
import re
|
||||
|
||||
from ox.cache import read_url
|
||||
import lxml.html
|
||||
|
||||
import logging
|
||||
|
|
|
|||
|
|
@ -2,18 +2,19 @@
|
|||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
from __future__ import division
|
||||
|
||||
import ox
|
||||
from ox.cache import read_url
|
||||
import ox
|
||||
import re
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
from utils import normalize_isbn
|
||||
from marc_countries import COUNTRIES
|
||||
from dewey import get_classification
|
||||
from marc_countries import COUNTRIES
|
||||
from utils import normalize_isbn
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger('meta.loc')
|
||||
|
||||
|
||||
def get_ids(key, value):
|
||||
ids = []
|
||||
if key == 'isbn':
|
||||
|
|
|
|||
|
|
@ -1,6 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
from __future__ import division
|
||||
|
||||
import re
|
||||
|
||||
from ox.cache import read_url
|
||||
from ox import find_re, strip_tags, decode_html
|
||||
import re
|
||||
import stdnum.isbn
|
||||
|
||||
from utils import find_isbns
|
||||
|
|
@ -8,6 +13,7 @@ from utils import find_isbns
|
|||
import logging
|
||||
logger = logging.getLogger('meta.lookupbyisbn')
|
||||
|
||||
|
||||
base = 'http://www.lookupbyisbn.com'
|
||||
|
||||
def get_ids(key, value):
|
||||
|
|
|
|||
|
|
@ -2,19 +2,20 @@
|
|||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
from __future__ import division
|
||||
|
||||
from datetime import datetime
|
||||
from urllib import urlencode
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
from ox.cache import read_url
|
||||
|
||||
from marc_countries import COUNTRIES
|
||||
from dewey import get_classification
|
||||
from marc_countries import COUNTRIES
|
||||
from utils import normalize_isbn
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger('meta.openlibrary')
|
||||
|
||||
|
||||
KEYS = {
|
||||
'authors': 'author',
|
||||
'covers': 'cover',
|
||||
|
|
|
|||
|
|
@ -1,6 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
from __future__ import division
|
||||
|
||||
import re
|
||||
import stdnum.isbn
|
||||
|
||||
|
||||
def normalize_isbn(value):
|
||||
return ''.join([s for s in value if s.isdigit() or s == 'X'])
|
||||
|
||||
|
|
|
|||
|
|
@ -2,16 +2,19 @@
|
|||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
from __future__ import division
|
||||
|
||||
from ox.cache import read_url
|
||||
import lxml.html
|
||||
import re
|
||||
import hashlib
|
||||
from utils import normalize_isbn
|
||||
|
||||
from ox.cache import read_url
|
||||
import lxml.html
|
||||
import stdnum.isbn
|
||||
|
||||
from .utils import normalize_isbn
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger('meta.worldcat')
|
||||
|
||||
|
||||
base_url = 'http://www.worldcat.org'
|
||||
|
||||
def get_ids(key, value):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue