2014-05-04 17:26:43 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2014-09-02 22:32:44 +00:00
|
|
|
|
2014-05-04 17:26:43 +00:00
|
|
|
|
2014-05-19 18:12:02 +00:00
|
|
|
import os
|
2014-05-04 17:26:43 +00:00
|
|
|
import xml.etree.ElementTree as ET
|
|
|
|
import zipfile
|
2014-05-19 18:12:02 +00:00
|
|
|
import re
|
2015-11-16 15:02:45 +00:00
|
|
|
from urllib.parse import unquote
|
2016-01-11 13:43:54 +00:00
|
|
|
import lxml.html
|
2016-01-13 11:11:28 +00:00
|
|
|
from io import BytesIO
|
|
|
|
|
|
|
|
from PIL import Image
|
2014-05-04 17:26:43 +00:00
|
|
|
|
2016-02-03 09:15:09 +00:00
|
|
|
from ox import strip_tags, decode_html, normalize_name
|
2014-05-04 17:26:43 +00:00
|
|
|
|
2016-01-11 13:43:54 +00:00
|
|
|
from utils import find_isbns, get_language, to_isbn13
|
2014-05-04 17:26:43 +00:00
|
|
|
|
2014-05-19 18:12:02 +00:00
|
|
|
import logging
|
2016-01-23 13:26:13 +00:00
|
|
|
logging.getLogger('PIL').setLevel(logging.ERROR)
|
2015-11-29 14:56:38 +00:00
|
|
|
logger = logging.getLogger(__name__)
|
2014-05-19 18:12:02 +00:00
|
|
|
|
2016-01-16 03:49:22 +00:00
|
|
|
|
2016-01-13 11:11:28 +00:00
|
|
|
def get_ratio(data):
|
|
|
|
try:
|
|
|
|
img = Image.open(BytesIO(data))
|
|
|
|
return img.size[0]/img.size[1]
|
|
|
|
except:
|
|
|
|
return -1
|
|
|
|
|
2016-01-31 17:28:53 +00:00
|
|
|
def normpath(path):
|
|
|
|
return '/'.join(os.path.normpath(path).split(os.sep))
|
|
|
|
|
2014-05-04 17:26:43 +00:00
|
|
|
def cover(path):
|
2014-05-19 18:12:02 +00:00
|
|
|
logger.debug('cover %s', path)
|
|
|
|
data = None
|
2015-11-16 15:02:45 +00:00
|
|
|
try:
|
|
|
|
z = zipfile.ZipFile(path)
|
|
|
|
except zipfile.BadZipFile:
|
|
|
|
logger.debug('invalid epub file %s', path)
|
|
|
|
return data
|
2016-01-05 09:50:47 +00:00
|
|
|
|
|
|
|
def use(filename):
|
|
|
|
logger.debug('using %s', filename)
|
2016-02-07 14:30:04 +00:00
|
|
|
try:
|
|
|
|
data = z.read(filename)
|
|
|
|
except:
|
|
|
|
return None
|
2016-01-13 11:11:28 +00:00
|
|
|
r = get_ratio(data)
|
|
|
|
if r < 0.3 or r > 2:
|
|
|
|
return None
|
|
|
|
return data
|
2016-01-16 03:49:22 +00:00
|
|
|
|
2016-01-13 11:11:28 +00:00
|
|
|
files = []
|
2014-05-19 18:12:02 +00:00
|
|
|
for f in z.filelist:
|
2016-01-13 11:11:28 +00:00
|
|
|
if f.filename == 'calibre-logo.png':
|
|
|
|
continue
|
2014-05-21 00:02:21 +00:00
|
|
|
if 'cover' in f.filename.lower() and f.filename.split('.')[-1] in ('jpg', 'jpeg', 'png'):
|
2016-01-05 09:50:47 +00:00
|
|
|
return use(f.filename)
|
2016-01-13 11:11:28 +00:00
|
|
|
files.append(f.filename)
|
2016-01-05 09:50:47 +00:00
|
|
|
opf = [f for f in files if f.endswith('opf')]
|
|
|
|
if opf:
|
|
|
|
#logger.debug('opf: %s', z.read(opf[0]).decode())
|
|
|
|
info = ET.fromstring(z.read(opf[0]))
|
2016-01-06 13:10:23 +00:00
|
|
|
metadata = info.findall('{http://www.idpf.org/2007/opf}metadata')
|
|
|
|
if metadata:
|
|
|
|
metadata = metadata[0]
|
|
|
|
manifest = info.findall('{http://www.idpf.org/2007/opf}manifest')
|
|
|
|
if manifest:
|
|
|
|
manifest = manifest[0]
|
|
|
|
if metadata and manifest:
|
2024-06-08 11:23:25 +00:00
|
|
|
for e in list(metadata):
|
2016-01-06 13:10:23 +00:00
|
|
|
if e.tag == '{http://www.idpf.org/2007/opf}meta' and e.attrib.get('name') == 'cover':
|
|
|
|
cover_id = e.attrib['content']
|
2024-06-08 11:23:25 +00:00
|
|
|
for e in list(manifest):
|
2016-01-06 13:10:23 +00:00
|
|
|
if e.attrib['id'] == cover_id:
|
|
|
|
filename = unquote(e.attrib['href'])
|
2016-01-31 17:28:53 +00:00
|
|
|
filename = normpath(os.path.join(os.path.dirname(opf[0]), filename))
|
2016-01-06 13:10:23 +00:00
|
|
|
if filename in files:
|
|
|
|
return use(filename)
|
|
|
|
if manifest:
|
2024-06-08 11:23:25 +00:00
|
|
|
images = [e for e in list(manifest) if 'image' in e.attrib['media-type']]
|
2016-01-06 13:10:23 +00:00
|
|
|
if images:
|
|
|
|
image_data = []
|
|
|
|
for e in images:
|
|
|
|
filename = unquote(e.attrib['href'])
|
2016-01-31 17:28:53 +00:00
|
|
|
filename = normpath(os.path.join(os.path.dirname(opf[0]), filename))
|
2016-01-06 13:10:23 +00:00
|
|
|
if filename in files:
|
2016-01-16 03:49:22 +00:00
|
|
|
image_data.append(filename)
|
2016-01-06 13:10:23 +00:00
|
|
|
if image_data:
|
2016-01-16 03:49:22 +00:00
|
|
|
image_data.sort(key=lambda name: z.getinfo(name).file_size)
|
|
|
|
return use(image_data[-1])
|
2024-06-08 11:23:25 +00:00
|
|
|
for e in list(manifest):
|
2016-01-06 13:10:23 +00:00
|
|
|
if 'html' in e.attrib['media-type']:
|
|
|
|
filename = unquote(e.attrib['href'])
|
2016-01-31 17:28:53 +00:00
|
|
|
filename = normpath(os.path.join(os.path.dirname(opf[0]), filename))
|
2016-01-06 13:10:23 +00:00
|
|
|
html = z.read(filename).decode('utf-8', 'ignore')
|
|
|
|
img = re.compile('<img.*?src="(.*?)"').findall(html)
|
|
|
|
#svg image
|
|
|
|
img += re.compile('<image.*?href="(.*?)"').findall(html)
|
|
|
|
if img:
|
|
|
|
img = unquote(img[0])
|
2016-01-31 17:28:53 +00:00
|
|
|
img = normpath(os.path.join(os.path.dirname(filename), img))
|
2016-01-06 13:10:23 +00:00
|
|
|
if img in files:
|
|
|
|
return use(img)
|
2014-05-04 17:26:43 +00:00
|
|
|
return data
|
|
|
|
|
|
|
|
def info(epub):
|
|
|
|
data = {}
|
2015-11-16 15:02:45 +00:00
|
|
|
try:
|
|
|
|
z = zipfile.ZipFile(epub)
|
|
|
|
except zipfile.BadZipFile:
|
|
|
|
logger.debug('invalid epub file %s', epub)
|
|
|
|
return data
|
2016-01-11 13:55:33 +00:00
|
|
|
files = [f.filename for f in z.filelist]
|
|
|
|
opf = [f for f in files if f.endswith('opf')]
|
2014-05-04 17:26:43 +00:00
|
|
|
if opf:
|
|
|
|
info = ET.fromstring(z.read(opf[0]))
|
2016-01-06 13:10:23 +00:00
|
|
|
metadata = info.findall('{http://www.idpf.org/2007/opf}metadata')
|
|
|
|
if metadata:
|
|
|
|
metadata = metadata[0]
|
2024-06-08 11:23:25 +00:00
|
|
|
for e in list(metadata):
|
2016-01-06 13:10:23 +00:00
|
|
|
if e.text and e.text.strip() and e.text not in ('unknown', 'none'):
|
|
|
|
key = e.tag.split('}')[-1]
|
|
|
|
key = {
|
|
|
|
'creator': 'author',
|
|
|
|
}.get(key, key)
|
|
|
|
value = e.text.strip()
|
|
|
|
if key == 'identifier':
|
2016-01-11 13:43:54 +00:00
|
|
|
value = to_isbn13(value)
|
|
|
|
if value:
|
|
|
|
data['isbn'] = value
|
2016-01-06 13:10:23 +00:00
|
|
|
elif key == 'author':
|
|
|
|
data[key] = value.split(', ')
|
2016-02-03 09:15:09 +00:00
|
|
|
if len(data[key]) == 2 and max(len(d.split(' ')) for d in data[key]) == 1:
|
|
|
|
data[key] = [normalize_name(', '.join(data[key]))]
|
2016-01-06 13:10:23 +00:00
|
|
|
else:
|
|
|
|
data[key] = value
|
2016-01-11 18:53:11 +00:00
|
|
|
toc = [f for f in files if 'toc.ncx' in f]
|
|
|
|
if toc:
|
|
|
|
try:
|
|
|
|
_toc = ET.fromstring(z.read(toc[0]))
|
|
|
|
nav_map = _toc.find('{http://www.daisy.org/z3986/2005/ncx/}navMap')
|
|
|
|
except:
|
|
|
|
logger.debug('failed to parse toc', exc_info=True)
|
|
|
|
nav_map = None
|
|
|
|
if nav_map:
|
|
|
|
contents = []
|
|
|
|
for point in nav_map.findall('{http://www.daisy.org/z3986/2005/ncx/}navPoint'):
|
|
|
|
label = point.find('{http://www.daisy.org/z3986/2005/ncx/}navLabel')
|
|
|
|
if label:
|
2024-06-08 11:23:25 +00:00
|
|
|
txt = list(label)[0].text
|
2016-01-11 18:53:11 +00:00
|
|
|
if txt:
|
|
|
|
contents.append(txt)
|
|
|
|
if contents:
|
|
|
|
data['tableofcontents'] = '\n'.join(contents).strip()
|
2019-02-01 11:13:20 +00:00
|
|
|
if 'tableofcontents' not in data:
|
2016-01-11 18:53:11 +00:00
|
|
|
guide = info.find('{http://www.idpf.org/2007/opf}guide')
|
|
|
|
if guide:
|
|
|
|
for ref in guide.findall('{http://www.idpf.org/2007/opf}reference'):
|
|
|
|
if ref.attrib.get('type') == 'toc':
|
|
|
|
filename = unquote(ref.attrib['href']).split('#')[0]
|
2016-01-31 17:28:53 +00:00
|
|
|
filename = normpath(os.path.join(os.path.dirname(opf[0]), filename))
|
2016-01-11 18:53:11 +00:00
|
|
|
if filename in files:
|
|
|
|
toc = z.read(filename)
|
|
|
|
if toc:
|
|
|
|
doc = lxml.html.document_fromstring(toc)
|
|
|
|
data['tableofcontents'] = '\n'.join([a.text_content() for a in doc.xpath('//a')]).strip()
|
2016-01-05 09:11:42 +00:00
|
|
|
if 'description' in data:
|
|
|
|
data['description'] = strip_tags(decode_html(data['description']))
|
2014-05-04 17:26:43 +00:00
|
|
|
text = extract_text(epub)
|
|
|
|
data['textsize'] = len(text)
|
2019-02-01 11:13:20 +00:00
|
|
|
if 'isbn' not in data:
|
2014-05-04 17:26:43 +00:00
|
|
|
isbn = extract_isbn(text)
|
|
|
|
if isbn:
|
2016-01-11 13:43:54 +00:00
|
|
|
data['isbn'] = isbn
|
2014-05-16 14:30:16 +00:00
|
|
|
if 'date' in data and 'T' in data['date']:
|
|
|
|
data['date'] = data['date'].split('T')[0]
|
2015-12-25 14:10:49 +00:00
|
|
|
if 'language' in data and isinstance(data['language'], str):
|
|
|
|
data['language'] = get_language(data['language'])
|
2016-03-18 17:35:41 +00:00
|
|
|
for key in list(data):
|
|
|
|
if isinstance(data[key], str) and not data[key].strip():
|
|
|
|
del data[key]
|
2014-05-04 17:26:43 +00:00
|
|
|
return data
|
|
|
|
|
|
|
|
def extract_text(path):
|
2014-11-15 01:05:33 +00:00
|
|
|
data = ''
|
2014-05-04 17:26:43 +00:00
|
|
|
z = zipfile.ZipFile(path)
|
|
|
|
for f in z.filelist:
|
2014-11-15 01:05:33 +00:00
|
|
|
if '/._' in f.filename or f.filename.startswith('._'):
|
|
|
|
continue
|
2016-01-28 12:23:50 +00:00
|
|
|
if 'META-INF' in f.filename:
|
|
|
|
continue
|
|
|
|
if f.filename.split('.')[-1] in ('html', 'xml', 'htm'):
|
2016-01-03 15:30:30 +00:00
|
|
|
data += z.read(f.filename).decode('utf-8', 'ignore')
|
2014-05-04 17:26:43 +00:00
|
|
|
return data
|
|
|
|
|
|
|
|
def extract_isbn(data):
|
|
|
|
isbns = find_isbns(data)
|
|
|
|
if isbns:
|
|
|
|
return isbns[0]
|