198 lines
7.5 KiB
Python
198 lines
7.5 KiB
Python
# -*- coding: utf-8 -*-
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
|
|
|
|
|
import os
|
|
import xml.etree.ElementTree as ET
|
|
import zipfile
|
|
import re
|
|
from urllib.parse import unquote
|
|
import lxml.html
|
|
from io import BytesIO
|
|
|
|
from PIL import Image
|
|
|
|
from ox import strip_tags, decode_html, normalize_name
|
|
|
|
from utils import find_isbns, get_language, to_isbn13
|
|
|
|
import logging
|
|
logging.getLogger('PIL').setLevel(logging.ERROR)
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def get_ratio(data):
|
|
try:
|
|
img = Image.open(BytesIO(data))
|
|
return img.size[0]/img.size[1]
|
|
except:
|
|
return -1
|
|
|
|
def normpath(path):
|
|
return '/'.join(os.path.normpath(path).split(os.sep))
|
|
|
|
def cover(path):
|
|
logger.debug('cover %s', path)
|
|
data = None
|
|
try:
|
|
z = zipfile.ZipFile(path)
|
|
except zipfile.BadZipFile:
|
|
logger.debug('invalid epub file %s', path)
|
|
return data
|
|
|
|
def use(filename):
|
|
logger.debug('using %s', filename)
|
|
try:
|
|
data = z.read(filename)
|
|
except:
|
|
return None
|
|
r = get_ratio(data)
|
|
if r < 0.3 or r > 2:
|
|
return None
|
|
return data
|
|
|
|
files = []
|
|
for f in z.filelist:
|
|
if f.filename == 'calibre-logo.png':
|
|
continue
|
|
if 'cover' in f.filename.lower() and f.filename.split('.')[-1] in ('jpg', 'jpeg', 'png'):
|
|
return use(f.filename)
|
|
files.append(f.filename)
|
|
opf = [f for f in files if f.endswith('opf')]
|
|
if opf:
|
|
#logger.debug('opf: %s', z.read(opf[0]).decode())
|
|
info = ET.fromstring(z.read(opf[0]))
|
|
metadata = info.findall('{http://www.idpf.org/2007/opf}metadata')
|
|
if metadata:
|
|
metadata = metadata[0]
|
|
manifest = info.findall('{http://www.idpf.org/2007/opf}manifest')
|
|
if manifest:
|
|
manifest = manifest[0]
|
|
if metadata and manifest:
|
|
for e in metadata.getchildren():
|
|
if e.tag == '{http://www.idpf.org/2007/opf}meta' and e.attrib.get('name') == 'cover':
|
|
cover_id = e.attrib['content']
|
|
for e in manifest.getchildren():
|
|
if e.attrib['id'] == cover_id:
|
|
filename = unquote(e.attrib['href'])
|
|
filename = normpath(os.path.join(os.path.dirname(opf[0]), filename))
|
|
if filename in files:
|
|
return use(filename)
|
|
if manifest:
|
|
images = [e for e in manifest.getchildren() if 'image' in e.attrib['media-type']]
|
|
if images:
|
|
image_data = []
|
|
for e in images:
|
|
filename = unquote(e.attrib['href'])
|
|
filename = normpath(os.path.join(os.path.dirname(opf[0]), filename))
|
|
if filename in files:
|
|
image_data.append(filename)
|
|
if image_data:
|
|
image_data.sort(key=lambda name: z.getinfo(name).file_size)
|
|
return use(image_data[-1])
|
|
for e in manifest.getchildren():
|
|
if 'html' in e.attrib['media-type']:
|
|
filename = unquote(e.attrib['href'])
|
|
filename = normpath(os.path.join(os.path.dirname(opf[0]), filename))
|
|
html = z.read(filename).decode('utf-8', 'ignore')
|
|
img = re.compile('<img.*?src="(.*?)"').findall(html)
|
|
#svg image
|
|
img += re.compile('<image.*?href="(.*?)"').findall(html)
|
|
if img:
|
|
img = unquote(img[0])
|
|
img = normpath(os.path.join(os.path.dirname(filename), img))
|
|
if img in files:
|
|
return use(img)
|
|
return data
|
|
|
|
def info(epub):
|
|
data = {}
|
|
try:
|
|
z = zipfile.ZipFile(epub)
|
|
except zipfile.BadZipFile:
|
|
logger.debug('invalid epub file %s', epub)
|
|
return data
|
|
files = [f.filename for f in z.filelist]
|
|
opf = [f for f in files if f.endswith('opf')]
|
|
if opf:
|
|
info = ET.fromstring(z.read(opf[0]))
|
|
metadata = info.findall('{http://www.idpf.org/2007/opf}metadata')
|
|
if metadata:
|
|
metadata = metadata[0]
|
|
for e in metadata.getchildren():
|
|
if e.text and e.text.strip() and e.text not in ('unknown', 'none'):
|
|
key = e.tag.split('}')[-1]
|
|
key = {
|
|
'creator': 'author',
|
|
}.get(key, key)
|
|
value = e.text.strip()
|
|
if key == 'identifier':
|
|
value = to_isbn13(value)
|
|
if value:
|
|
data['isbn'] = value
|
|
elif key == 'author':
|
|
data[key] = value.split(', ')
|
|
if len(data[key]) == 2 and max(len(d.split(' ')) for d in data[key]) == 1:
|
|
data[key] = [normalize_name(', '.join(data[key]))]
|
|
else:
|
|
data[key] = value
|
|
toc = [f for f in files if 'toc.ncx' in f]
|
|
if toc:
|
|
try:
|
|
_toc = ET.fromstring(z.read(toc[0]))
|
|
nav_map = _toc.find('{http://www.daisy.org/z3986/2005/ncx/}navMap')
|
|
except:
|
|
logger.debug('failed to parse toc', exc_info=True)
|
|
nav_map = None
|
|
if nav_map:
|
|
contents = []
|
|
for point in nav_map.findall('{http://www.daisy.org/z3986/2005/ncx/}navPoint'):
|
|
label = point.find('{http://www.daisy.org/z3986/2005/ncx/}navLabel')
|
|
if label:
|
|
txt = label.getchildren()[0].text
|
|
if txt:
|
|
contents.append(txt)
|
|
if contents:
|
|
data['tableofcontents'] = '\n'.join(contents).strip()
|
|
if not 'tableofcontents' in data:
|
|
guide = info.find('{http://www.idpf.org/2007/opf}guide')
|
|
if guide:
|
|
for ref in guide.findall('{http://www.idpf.org/2007/opf}reference'):
|
|
if ref.attrib.get('type') == 'toc':
|
|
filename = unquote(ref.attrib['href']).split('#')[0]
|
|
filename = normpath(os.path.join(os.path.dirname(opf[0]), filename))
|
|
if filename in files:
|
|
toc = z.read(filename)
|
|
if toc:
|
|
doc = lxml.html.document_fromstring(toc)
|
|
data['tableofcontents'] = '\n'.join([a.text_content() for a in doc.xpath('//a')]).strip()
|
|
if 'description' in data:
|
|
data['description'] = strip_tags(decode_html(data['description']))
|
|
text = extract_text(epub)
|
|
data['textsize'] = len(text)
|
|
if not 'isbn' in data:
|
|
isbn = extract_isbn(text)
|
|
if isbn:
|
|
data['isbn'] = isbn
|
|
if 'date' in data and 'T' in data['date']:
|
|
data['date'] = data['date'].split('T')[0]
|
|
if 'language' in data and isinstance(data['language'], str):
|
|
data['language'] = get_language(data['language'])
|
|
return data
|
|
|
|
def extract_text(path):
|
|
data = ''
|
|
z = zipfile.ZipFile(path)
|
|
for f in z.filelist:
|
|
if '/._' in f.filename or f.filename.startswith('._'):
|
|
continue
|
|
if 'META-INF' in f.filename:
|
|
continue
|
|
if f.filename.split('.')[-1] in ('html', 'xml', 'htm'):
|
|
data += z.read(f.filename).decode('utf-8', 'ignore')
|
|
return data
|
|
|
|
def extract_isbn(data):
|
|
isbns = find_isbns(data)
|
|
if isbns:
|
|
return isbns[0]
|