use metadata.opf or metadata if available
This commit is contained in:
parent
a33f1b18de
commit
95085bde8c
3 changed files with 64 additions and 8 deletions
|
@ -40,10 +40,10 @@ def remove_missing():
|
||||||
if dirty:
|
if dirty:
|
||||||
db.session.commit()
|
db.session.commit()
|
||||||
|
|
||||||
def add_file(id, f, prefix):
|
def add_file(id, f, prefix, from_=None):
|
||||||
user = state.user()
|
user = state.user()
|
||||||
path = f[len(prefix):]
|
path = f[len(prefix):]
|
||||||
data = media.metadata(f)
|
data = media.metadata(f, from_)
|
||||||
file = File.get_or_create(id, data, path)
|
file = File.get_or_create(id, data, path)
|
||||||
item = file.item
|
item = file.item
|
||||||
if 'primaryid' in file.info:
|
if 'primaryid' in file.info:
|
||||||
|
@ -88,7 +88,7 @@ def run_scan():
|
||||||
id = media.get_id(f)
|
id = media.get_id(f)
|
||||||
file = File.get(id)
|
file = File.get(id)
|
||||||
if not file:
|
if not file:
|
||||||
file = add_file(id, f, prefix)
|
file = add_file(id, f, prefix, f)
|
||||||
added += 1
|
added += 1
|
||||||
trigger_event('change', {})
|
trigger_event('change', {})
|
||||||
|
|
||||||
|
@ -168,7 +168,7 @@ def run_import(options=None):
|
||||||
shutil.move(f_import, f)
|
shutil.move(f_import, f)
|
||||||
else:
|
else:
|
||||||
shutil.copy(f_import, f)
|
shutil.copy(f_import, f)
|
||||||
file = add_file(id, f, prefix_books)
|
file = add_file(id, f, prefix_books, f_import)
|
||||||
file.move()
|
file.move()
|
||||||
item = file.item
|
item = file.item
|
||||||
if listname:
|
if listname:
|
||||||
|
|
|
@ -11,6 +11,7 @@ import ox
|
||||||
import pdf
|
import pdf
|
||||||
import epub
|
import epub
|
||||||
import txt
|
import txt
|
||||||
|
import opf
|
||||||
|
|
||||||
def get_id(f=None, data=None):
|
def get_id(f=None, data=None):
|
||||||
if data:
|
if data:
|
||||||
|
@ -19,11 +20,12 @@ def get_id(f=None, data=None):
|
||||||
return base64.b32encode(ox.sha1sum(f, cached=True).decode('hex'))
|
return base64.b32encode(ox.sha1sum(f, cached=True).decode('hex'))
|
||||||
|
|
||||||
|
|
||||||
def metadata(f):
|
def metadata(f, from_=None):
|
||||||
ext = f.split('.')[-1]
|
ext = f.split('.')[-1]
|
||||||
data = {}
|
data = {}
|
||||||
data['extension'] = ext
|
data['extension'] = ext
|
||||||
data['size'] = os.stat(f).st_size
|
data['size'] = os.stat(f).st_size
|
||||||
|
|
||||||
if ext == 'pdf':
|
if ext == 'pdf':
|
||||||
info = pdf.info(f)
|
info = pdf.info(f)
|
||||||
elif ext == 'epub':
|
elif ext == 'epub':
|
||||||
|
@ -31,9 +33,15 @@ def metadata(f):
|
||||||
elif ext == 'txt':
|
elif ext == 'txt':
|
||||||
info = txt.info(f)
|
info = txt.info(f)
|
||||||
|
|
||||||
|
opf_info = {}
|
||||||
|
metadata_opf = os.path.join(os.path.dirname(from_ or f), 'metadata.opf')
|
||||||
|
if os.path.exists(metadata_opf):
|
||||||
|
opf_info = opf.info(metadata_opf)
|
||||||
|
|
||||||
for key in (
|
for key in (
|
||||||
'title', 'author', 'date', 'publisher', 'isbn',
|
'title', 'author', 'date', 'publisher',
|
||||||
'textsize', 'pages'
|
'language', 'textsize', 'pages',
|
||||||
|
'isbn', 'asin'
|
||||||
):
|
):
|
||||||
if key in info:
|
if key in info:
|
||||||
value = info[key]
|
value = info[key]
|
||||||
|
@ -44,9 +52,12 @@ def metadata(f):
|
||||||
value = None
|
value = None
|
||||||
if value:
|
if value:
|
||||||
data[key] = info[key]
|
data[key] = info[key]
|
||||||
|
if key in opf_info:
|
||||||
|
data[key] = opf_info[key]
|
||||||
if 'isbn' in data:
|
if 'isbn' in data:
|
||||||
data['primaryid'] = ['isbn', data['isbn'][0]]
|
data['primaryid'] = ['isbn', data['isbn'][0]]
|
||||||
|
elif 'asin' in data:
|
||||||
|
data['primaryid'] = ['asin', data['asin'][0]]
|
||||||
if 'author' in data:
|
if 'author' in data:
|
||||||
if isinstance(data['author'], basestring):
|
if isinstance(data['author'], basestring):
|
||||||
data['author'] = data['author'].split('; ')
|
data['author'] = data['author'].split('; ')
|
||||||
|
|
45
oml/media/opf.py
Normal file
45
oml/media/opf.py
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# vi:si:et:sw=4:sts=4:ts=4
|
||||||
|
from __future__ import division
|
||||||
|
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
|
||||||
|
import stdnum.isbn
|
||||||
|
|
||||||
|
from utils import normalize_isbn
|
||||||
|
from ox import strip_tags
|
||||||
|
import ox.iso
|
||||||
|
|
||||||
|
def info(opf):
|
||||||
|
data = {}
|
||||||
|
with open(opf) as fd:
|
||||||
|
opf = ET.fromstring(fd.read())
|
||||||
|
ns = '{http://www.idpf.org/2007/opf}'
|
||||||
|
metadata = opf.findall(ns + 'metadata')[0]
|
||||||
|
for e in metadata.getchildren():
|
||||||
|
if e.text:
|
||||||
|
key = e.tag.split('}')[-1]
|
||||||
|
key = {
|
||||||
|
'creator': 'author',
|
||||||
|
}.get(key, key)
|
||||||
|
value = e.text
|
||||||
|
if key == 'identifier':
|
||||||
|
isbn = normalize_isbn(value)
|
||||||
|
if stdnum.isbn.is_valid(isbn):
|
||||||
|
if not 'isbn' in data:
|
||||||
|
data['isbn'] = [isbn]
|
||||||
|
else:
|
||||||
|
data['isbn'].append(isbn)
|
||||||
|
if e.attrib.get(ns + 'scheme') == 'AMAZON':
|
||||||
|
if not 'asin' in data:
|
||||||
|
data['asin'] = [value]
|
||||||
|
else:
|
||||||
|
data['asin'].append(value)
|
||||||
|
else:
|
||||||
|
data[key] = strip_tags(e.text)
|
||||||
|
#YYY-MM-DD
|
||||||
|
if 'date' in data and len(data['date']) > 10:
|
||||||
|
data['date'] =data['date'][:10]
|
||||||
|
if 'language' in data:
|
||||||
|
data['language'] = ox.iso.codeToLang(data['language'])
|
||||||
|
return data
|
Loading…
Reference in a new issue