Compare commits

..

5 commits

Author SHA1 Message Date
j
03c1191550 fall back to storyline for summary 2019-11-15 14:51:32 +01:00
j
cef85fc4de depend on lxml 2019-11-15 14:51:13 +01:00
j
665a4038b2 space 2019-08-08 17:08:13 +02:00
j
388f33ebb6 cache imdb urls in parallel 2019-08-03 23:38:31 +02:00
j
cc1bad76cd update user agent 2019-08-03 23:35:16 +02:00
5 changed files with 19 additions and 5 deletions

View file

@ -21,7 +21,7 @@ from chardet.universaldetector import UniversalDetector
DEBUG = False DEBUG = False
# Default headers for HTTP requests. # Default headers for HTTP requests.
DEFAULT_HEADERS = { DEFAULT_HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.8,fr;q=0.6,de;q=0.4', 'Accept-Language': 'en-US,en;q=0.8,fr;q=0.6,de;q=0.4',

View file

@ -199,6 +199,11 @@ class Imdb(SiteParser):
'summary': zebra_table('Plot Summary', more=[ 'summary': zebra_table('Plot Summary', more=[
'<p>(.*?)<em' '<p>(.*?)<em'
]), ]),
'storyline': {
'page': '',
're': '<h2>Storyline</h2>.*?<p>(.*?)</p>',
'type': 'string'
},
'posterId': { 'posterId': {
'page': 'reference', 'page': 'reference',
're': '<img.*?class="titlereference-primary-image".*?src="(.*?)".*?>', 're': '<img.*?class="titlereference-primary-image".*?src="(.*?)".*?>',
@ -517,10 +522,13 @@ class Imdb(SiteParser):
]) ])
if self['releasedate'] == 'x': if self['releasedate'] == 'x':
del self['releasedate'] del self['releasedate']
if 'summary' not in self and 'storyline' in self:
self['summary'] = self.pop('storyline')
if 'summary' in self: if 'summary' in self:
if isinstance(self['summary'], list): if isinstance(self['summary'], list):
self['summary'] = self['summary'][0] self['summary'] = self['summary'][0]
self['summary'] = self['summary'].split('</p')[0].strip() self['summary'] = strip_tags(self['summary'].split('</p')[0]).split(' Written by\n')[0].strip()
if 'credits' in self: if 'credits' in self:
credits = [ credits = [

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4 # vi:si:et:sw=4:sts=4:ts=4
import re import re
from multiprocessing.pool import ThreadPool
from six import string_types from six import string_types
@ -28,6 +29,7 @@ def cleanup(key, data, data_type):
class SiteParser(dict): class SiteParser(dict):
baseUrl = '' baseUrl = ''
regex = {} regex = {}
pool = ThreadPool(8)
def get_url(self, page): def get_url(self, page):
return "%s%s" % (self.baseUrl, page) return "%s%s" % (self.baseUrl, page)
@ -39,6 +41,9 @@ class SiteParser(dict):
def __init__(self, timeout=-1): def __init__(self, timeout=-1):
self._cache = {} self._cache = {}
urls = list(set(self.get_url(self.regex[key]['page']) for key in self.regex))
self.pool.map(self.get_url, urls)
for key in self.regex: for key in self.regex:
url = self.get_url(self.regex[key]['page']) url = self.get_url(self.regex[key]['page'])
data = self.read_url(url, timeout) data = self.read_url(url, timeout)

View file

@ -17,7 +17,7 @@ def get_id(url):
def get_url(id=None, imdb=None, allmovie=None): def get_url(id=None, imdb=None, allmovie=None):
if imdb: if imdb:
query = '"%s"'% imdb query = '"%s"' % imdb
result = find(query) result = find(query)
if result: if result:
url = result[0][1] url = result[0][1]
@ -26,7 +26,7 @@ def get_url(id=None, imdb=None, allmovie=None):
return url return url
return "" return ""
if allmovie: if allmovie:
query = '"amg_id = 1:%s"'% allmovie query = '"amg_id = 1:%s"' % allmovie
result = find(query) result = find(query)
if result: if result:
url = result[0][1] url = result[0][1]
@ -140,7 +140,7 @@ def get_allmovie_id(wikipedia_url):
return data.get('amg_id', '') return data.get('amg_id', '')
def find(query, max_results=10): def find(query, max_results=10):
query = {'action': 'query', 'list':'search', 'format': 'json', query = {'action': 'query', 'list': 'search', 'format': 'json',
'srlimit': max_results, 'srwhat': 'text', 'srsearch': query.encode('utf-8')} 'srlimit': max_results, 'srwhat': 'text', 'srsearch': query.encode('utf-8')}
url = "http://en.wikipedia.org/w/api.php?" + urllib.parse.urlencode(query) url = "http://en.wikipedia.org/w/api.php?" + urllib.parse.urlencode(query)
data = read_url(url) data = read_url(url)

View file

@ -1,2 +1,3 @@
chardet chardet
six>=1.5.2 six>=1.5.2
lxml