Compare commits
5 commits
d845030557
...
03c1191550
| Author | SHA1 | Date | |
|---|---|---|---|
| 03c1191550 | |||
| cef85fc4de | |||
| 665a4038b2 | |||
| 388f33ebb6 | |||
| cc1bad76cd |
5 changed files with 19 additions and 5 deletions
|
|
@ -21,7 +21,7 @@ from chardet.universaldetector import UniversalDetector
|
|||
DEBUG = False
|
||||
# Default headers for HTTP requests.
|
||||
DEFAULT_HEADERS = {
|
||||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0',
|
||||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0',
|
||||
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
||||
'Accept-Language': 'en-US,en;q=0.8,fr;q=0.6,de;q=0.4',
|
||||
|
|
|
|||
|
|
@ -199,6 +199,11 @@ class Imdb(SiteParser):
|
|||
'summary': zebra_table('Plot Summary', more=[
|
||||
'<p>(.*?)<em'
|
||||
]),
|
||||
'storyline': {
|
||||
'page': '',
|
||||
're': '<h2>Storyline</h2>.*?<p>(.*?)</p>',
|
||||
'type': 'string'
|
||||
},
|
||||
'posterId': {
|
||||
'page': 'reference',
|
||||
're': '<img.*?class="titlereference-primary-image".*?src="(.*?)".*?>',
|
||||
|
|
@ -517,10 +522,13 @@ class Imdb(SiteParser):
|
|||
])
|
||||
if self['releasedate'] == 'x':
|
||||
del self['releasedate']
|
||||
|
||||
if 'summary' not in self and 'storyline' in self:
|
||||
self['summary'] = self.pop('storyline')
|
||||
if 'summary' in self:
|
||||
if isinstance(self['summary'], list):
|
||||
self['summary'] = self['summary'][0]
|
||||
self['summary'] = self['summary'].split('</p')[0].strip()
|
||||
self['summary'] = strip_tags(self['summary'].split('</p')[0]).split(' Written by\n')[0].strip()
|
||||
|
||||
if 'credits' in self:
|
||||
credits = [
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
import re
|
||||
from multiprocessing.pool import ThreadPool
|
||||
|
||||
from six import string_types
|
||||
|
||||
|
|
@ -28,6 +29,7 @@ def cleanup(key, data, data_type):
|
|||
class SiteParser(dict):
|
||||
baseUrl = ''
|
||||
regex = {}
|
||||
pool = ThreadPool(8)
|
||||
|
||||
def get_url(self, page):
|
||||
return "%s%s" % (self.baseUrl, page)
|
||||
|
|
@ -39,6 +41,9 @@ class SiteParser(dict):
|
|||
|
||||
def __init__(self, timeout=-1):
|
||||
self._cache = {}
|
||||
urls = list(set(self.get_url(self.regex[key]['page']) for key in self.regex))
|
||||
self.pool.map(self.get_url, urls)
|
||||
|
||||
for key in self.regex:
|
||||
url = self.get_url(self.regex[key]['page'])
|
||||
data = self.read_url(url, timeout)
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ def get_id(url):
|
|||
|
||||
def get_url(id=None, imdb=None, allmovie=None):
|
||||
if imdb:
|
||||
query = '"%s"'% imdb
|
||||
query = '"%s"' % imdb
|
||||
result = find(query)
|
||||
if result:
|
||||
url = result[0][1]
|
||||
|
|
@ -26,7 +26,7 @@ def get_url(id=None, imdb=None, allmovie=None):
|
|||
return url
|
||||
return ""
|
||||
if allmovie:
|
||||
query = '"amg_id = 1:%s"'% allmovie
|
||||
query = '"amg_id = 1:%s"' % allmovie
|
||||
result = find(query)
|
||||
if result:
|
||||
url = result[0][1]
|
||||
|
|
@ -140,7 +140,7 @@ def get_allmovie_id(wikipedia_url):
|
|||
return data.get('amg_id', '')
|
||||
|
||||
def find(query, max_results=10):
|
||||
query = {'action': 'query', 'list':'search', 'format': 'json',
|
||||
query = {'action': 'query', 'list': 'search', 'format': 'json',
|
||||
'srlimit': max_results, 'srwhat': 'text', 'srsearch': query.encode('utf-8')}
|
||||
url = "http://en.wikipedia.org/w/api.php?" + urllib.parse.urlencode(query)
|
||||
data = read_url(url)
|
||||
|
|
|
|||
|
|
@ -1,2 +1,3 @@
|
|||
chardet
|
||||
six>=1.5.2
|
||||
lxml
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue