python-ox/ox/web/imdb.py

806 lines
29 KiB
Python
Raw Normal View History

2010-07-07 23:25:57 +00:00
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
2010-12-31 07:23:28 +00:00
import urllib
2010-07-07 23:25:57 +00:00
import re
import time
2011-04-15 09:46:20 +00:00
import unicodedata
2010-07-07 23:25:57 +00:00
import ox
from ox import find_re, strip_tags
2010-10-08 16:07:39 +00:00
import ox.cache
2010-07-07 23:25:57 +00:00
from siteparser import SiteParser
2013-06-14 10:17:18 +00:00
import duckduckgo
2010-07-07 23:25:57 +00:00
2013-06-28 14:53:25 +00:00
from ..utils import datetime
from ..geo import normalize_country_name
2011-10-30 12:31:19 +00:00
def read_url(url, data=None, headers=ox.cache.DEFAULT_HEADERS, timeout=ox.cache.cache_timeout, valid=None, unicode=False):
2010-10-08 16:07:39 +00:00
headers = headers.copy()
return ox.cache.read_url(url, data, headers, timeout, unicode=unicode)
2010-07-10 08:24:56 +00:00
2012-08-15 15:15:40 +00:00
def get_url(id):
2011-10-30 12:31:19 +00:00
return "http://www.imdb.com/title/tt%s/" % id
2010-07-07 23:25:57 +00:00
class Imdb(SiteParser):
2010-12-09 03:37:28 +00:00
'''
>>> Imdb('0068646')['title']
u'The Godfather'
>>> Imdb('0133093')['title']
u'The Matrix'
'''
2010-07-07 23:25:57 +00:00
regex = {
2011-10-15 14:54:09 +00:00
'alternativeTitles': {
'page': 'releaseinfo',
're': [
'name="akas".*?<table.*?>(.*?)</table>',
2010-10-08 15:43:25 +00:00
"td>(.*?)</td>.*?<td>(.*?)</td>"
],
'type': 'list'
},
2011-10-28 23:25:43 +00:00
'aspectratio': {
2011-08-18 07:30:30 +00:00
'page': 'combined',
're': 'Aspect Ratio:</h5><div class="info-content">([\d\.]+)',
'type': 'float',
},
2011-08-09 08:30:13 +00:00
'budget': {
'page': 'business',
're': [
2011-10-30 14:34:59 +00:00
'<h5>Budget</h5>\s*?\$(.*?)<br',
lambda data: find_re(ox.decode_html(data).replace(',', ''), '\d+')
2011-08-09 08:30:13 +00:00
],
'type': 'int'
},
2010-07-07 23:25:57 +00:00
'cast': {
'page': 'combined',
2010-07-10 08:24:56 +00:00
're': [
'<td class="nm">.*?>(.*?)</a>.*?<td class="char">(.*?)</td>',
lambda ll: [strip_tags(l) for l in ll]
2010-07-10 08:24:56 +00:00
],
2010-07-07 23:25:57 +00:00
'type': 'list'
},
2011-10-15 14:54:09 +00:00
'cinematographer': {
'page': 'combined',
2010-07-07 23:25:57 +00:00
're': [
2010-07-10 08:24:56 +00:00
lambda data: data.split('Series Crew')[0],
2010-07-07 23:25:57 +00:00
'Cinematography by</a>(.*?)</table>',
'<a href="/name/.*?/">(.*?)</a>'
],
'type': 'list'
},
'connections': {
'page': 'trivia?tab=mc',
're': '<h4 class="li_group">(.*?)</h4>(.*?)(<\/div>\n <a|<script)',
2010-07-07 23:25:57 +00:00
'type': 'list'
},
2011-10-15 14:54:09 +00:00
'country': {
2010-07-07 23:25:57 +00:00
'page': 'combined',
2010-12-23 06:00:53 +00:00
're': [
'<div class="info"><h5>Country:</h5>.*?<div class="info">',
#'<a href="/country/.*?">(.*?)</a>', #links changed to work with existing caches, just take all links
'<a.*?>(.*?)</a>',
],
2010-07-07 23:25:57 +00:00
'type': 'list'
},
2011-10-15 14:54:09 +00:00
'creator': {
2010-11-28 15:53:47 +00:00
'page': 'combined',
're': [
2011-10-15 19:32:32 +00:00
'<h5>Creator.?:</h5>.*?<div class="info-content">(.*?)</div>',
2010-11-28 15:53:47 +00:00
'<a href="/name/.*?>(.*?)</a>'
],
'type': 'list'
},
2011-10-15 14:54:09 +00:00
'director': {
'page': 'combined',
2010-07-07 23:25:57 +00:00
're': [
2012-09-12 21:33:29 +00:00
lambda data: data.split('<b>Series Crew</b>')[0],
2010-07-07 23:25:57 +00:00
'Directed by</a>(.*?)</table>',
2010-11-28 15:53:47 +00:00
'<a href="/name/.*?>(.*?)</a>'
2010-07-07 23:25:57 +00:00
],
'type': 'list'
},
'_director': {
'page': 'combined',
're': [
'<h5>Director:</h5>.*?<div class="info-content">(.*?)</div>',
'<a href="/name/.*?>(.*?)</a>'
],
'type': 'list'
},
2011-10-15 14:54:09 +00:00
'editor': {
'page': 'combined',
2010-07-07 23:25:57 +00:00
're': [
2010-07-10 08:24:56 +00:00
lambda data: data.split('Series Crew')[0],
2010-07-07 23:25:57 +00:00
'Film Editing by</a>(.*?)</table>',
2010-11-28 15:53:47 +00:00
'<a href="/name/.*?>(.*?)</a>'
2010-07-07 23:25:57 +00:00
],
'type': 'list'
},
2013-02-26 07:50:06 +00:00
'composer': {
'page': 'combined',
're': [
lambda data: data.split('Series Crew')[0],
'Original Music by</a>(.*?)</table>',
'<a href="/name/.*?>(.*?)</a>'
],
'type': 'list'
},
2011-10-15 14:54:09 +00:00
'episodeTitle': {
2010-07-12 08:52:26 +00:00
'page': 'combined',
're': '<div id="tn15title">.*?<em>(.*?)</em>',
'type': 'string'
},
2011-10-15 14:54:09 +00:00
'filmingLocations': {
2010-07-07 23:25:57 +00:00
'page': 'locations',
2013-05-14 22:23:00 +00:00
're': [
'<a href="/search/title\?locations=.*?".*?>(.*?)</a>',
lambda data: data.strip(),
],
2010-07-07 23:25:57 +00:00
'type': 'list'
},
2011-10-15 14:54:09 +00:00
'genre': {
2010-07-07 23:25:57 +00:00
'page': 'combined',
2012-02-29 14:52:12 +00:00
're': [
'<h5>Genre:</h5>(.*?)<hr',
'<a href="/Sections/Genres/.*?/">(.*?)</a>'
],
2010-07-07 23:25:57 +00:00
'type': 'list'
},
2011-08-09 08:30:13 +00:00
'gross': {
'page': 'business',
're': [
2011-10-30 14:34:59 +00:00
'<h5>Gross</h5>\s*?\$(.*?)<br',
lambda data: find_re(data.replace(',', ''), '\d+')
2011-08-09 08:30:13 +00:00
],
'type': 'int'
},
2011-10-31 16:46:05 +00:00
'keyword': {
2010-07-07 23:25:57 +00:00
'page': 'keywords',
2013-05-30 19:12:28 +00:00
're': '<a href="/keyword/.*?>(.*?)</a>',
2010-07-07 23:25:57 +00:00
'type': 'list'
},
2011-10-15 14:54:09 +00:00
'language': {
2010-07-07 23:25:57 +00:00
'page': 'combined',
2010-12-23 06:05:06 +00:00
're': [
'<div class="info"><h5>Language:</h5>.*?<div class="info">',
#'<a href="/language/.*?">(.*?)</a>', #links changed to work with existing caches, just take all links
'<a.*?>(.*?)</a>',
],
2010-07-07 23:25:57 +00:00
'type': 'list'
},
2011-10-15 14:54:09 +00:00
'summary': {
2010-07-07 23:25:57 +00:00
'page': 'plotsummary',
're': '</div>.*?<p class="plotpar">(.*?)<i>',
2010-07-07 23:25:57 +00:00
'type': 'string'
},
2011-10-15 14:54:09 +00:00
'posterId': {
2010-07-07 23:25:57 +00:00
'page': 'combined',
're': '/primary-photo/media/rm(.*?)/tt',
2010-07-19 10:05:01 +00:00
'type': 'string'
2010-07-07 23:25:57 +00:00
},
2011-10-15 14:54:09 +00:00
'posterIds': {
2010-07-07 23:25:57 +00:00
'page': 'posters',
're': '/unknown-thumbnail/media/rm(.*?)/tt',
'type': 'list'
},
2011-10-15 14:54:09 +00:00
'producer': {
2010-07-07 23:25:57 +00:00
'page': 'combined',
're': [
lambda data: data.split('Series Crew')[0],
2010-07-07 23:25:57 +00:00
'Produced by</a>(.*?)</table>',
'<a href="/name/.*?/">(.*?)</a>'
],
'type': 'list'
},
2013-02-25 08:27:48 +00:00
'productionCompany': {
2013-02-18 13:50:30 +00:00
'page': 'combined',
're': [
'Production Companies</b><ul>(.*?)</ul>',
'<a href="/company/.*?/">(.*?)</a>'
],
'type': 'list'
},
2010-07-07 23:25:57 +00:00
'rating': {
'page': 'combined',
2011-01-06 10:49:58 +00:00
're': '<div class="starbar-meta">.*?<b>([\d,.]+?)/10</b>',
2010-07-07 23:25:57 +00:00
'type': 'float'
},
2011-10-28 17:24:09 +00:00
'releasedate': {
2010-07-07 23:25:57 +00:00
'page': 'releaseinfo',
2013-06-28 14:53:25 +00:00
're': [
'<td class="release_date">(.*?)</td>',
ox.strip_tags,
],
'type': 'list'
2010-07-07 23:25:57 +00:00
},
2010-07-08 08:59:15 +00:00
'reviews': {
'page': 'externalreviews',
're': [
'<ol>(.*?)</ol>',
'<li><a href="(http.*?)".*?>(.*?)</a></li>'
],
'type': 'list'
},
2010-07-07 23:25:57 +00:00
'runtime': {
'page': 'combined',
're': '<h5>Runtime:</h5><div class="info-content">.*?([0-9]+ sec|[0-9]+ min).*?</div>',
'type': 'string'
2013-02-25 13:48:14 +00:00
},
'color': {
'page': 'combined',
2013-02-25 13:53:44 +00:00
're': [
'<h5>Color:</h5><div class="info-content">(.*?)</div>',
'<a.*?>(.*?)</a>'
],
'type': 'list'
2013-02-25 13:48:14 +00:00
},
'sound': {
'page': 'combined',
2013-02-25 13:53:44 +00:00
're': [
'<h5>Sound Mix:</h5><div class="info-content">(.*?)</div>',
'<a.*?>(.*?)</a>'
],
'type': 'list'
2010-07-07 23:25:57 +00:00
},
2010-07-12 08:52:26 +00:00
'season': {
'page': 'combined',
're': [
'<h5>Original Air Date:</h5>.*?<div class="info-content">(.*?)</div>',
'\(Season (\d+), Episode \d+\)',
],
2010-07-12 08:52:26 +00:00
'type': 'int'
},
'episode': {
'page': 'combined',
're': [
'<h5>Original Air Date:</h5>.*?<div class="info-content">(.*?)</div>',
'\(Season \d+, Episode (\d+)\)',
],
2010-07-12 08:52:26 +00:00
'type': 'int'
},
'series': {
'page': 'combined',
're': '<h5>TV Series:</h5>.*?<a href="/title/tt(\d{7})',
'type': 'string'
},
2011-11-22 11:46:27 +00:00
'isSeries': {
'page': 'combined',
2013-03-01 10:31:35 +00:00
're': '<span class="tv-extra">(TV series|TV mini-series) ',
2011-11-22 11:46:27 +00:00
'type': 'string'
},
'title': {
2010-07-07 23:25:57 +00:00
'page': 'combined',
're': '<h1>(.*?) <span>',
'type': 'string'
},
'trivia': {
'page': 'trivia',
2011-08-05 13:51:39 +00:00
're': '<div class="sodatext">(.*?)<br',
2010-07-07 23:25:57 +00:00
'type': 'list',
},
'votes': {
'page': 'combined',
2010-07-10 08:24:56 +00:00
're': '<a href="ratings" class="tn15more">([\d,]*?) votes</a>',
2010-07-07 23:25:57 +00:00
'type': 'string'
},
2011-10-15 14:54:09 +00:00
'writer': {
'page': 'combined',
2010-07-07 23:25:57 +00:00
're': [
2010-07-10 08:24:56 +00:00
lambda data: data.split('Series Crew')[0],
2010-07-07 23:25:57 +00:00
'Writing credits</a>(.*?)</table>',
'<a href="/name/.*?/">(.*?)</a>'
],
'type': 'list'
},
'year': {
'page': 'combined',
2012-05-19 15:08:13 +00:00
're': '="og:title" content=".*?\((\d{4}).*?"',
2010-07-07 23:25:57 +00:00
'type': 'int'
}
}
def read_url(self, url, timeout):
2012-08-21 07:06:29 +00:00
if not url in self._cache:
self._cache[url] = read_url(url, timeout=timeout, unicode=True)
return self._cache[url]
2010-10-08 16:07:39 +00:00
2010-07-12 08:52:26 +00:00
def __init__(self, id, timeout=-1):
2010-12-09 03:37:28 +00:00
#use akas.imdb.com to always get original title:
#http://www.imdb.com/help/show_leaf?titlelanguagedisplay
self.baseUrl = "http://akas.imdb.com/title/tt%s/" % id
2010-07-12 08:52:26 +00:00
super(Imdb, self).__init__(timeout)
2011-04-15 09:46:20 +00:00
url = self.baseUrl + 'combined'
page = self.read_url(url, timeout=-1)
2012-05-22 09:22:08 +00:00
if '<title>IMDb: Page not found</title>' in page \
or 'The requested URL was not found on our server.' in page:
2011-04-15 09:46:20 +00:00
return
2012-05-22 09:22:08 +00:00
if "<p>We're sorry, something went wrong.</p>" in page:
2011-04-20 21:08:33 +00:00
time.sleep(1)
2011-04-20 21:12:31 +00:00
super(Imdb, self).__init__(0)
2010-07-07 23:25:57 +00:00
if 'alternativeTitles' in self:
if len(self['alternativeTitles']) == 2 and \
isinstance(self['alternativeTitles'][0], basestring):
self['alternativeTitles'] = [self['alternativeTitles']]
#normalize country names
if 'country' in self:
self['country'] = [normalize_country_name(c) or c for c in self['country']]
2013-07-23 12:54:32 +00:00
if 'sound' in self:
self['sound'] = list(set(self['sound']))
2012-10-01 21:29:57 +00:00
types = {}
2012-11-11 16:15:40 +00:00
stop_words = [
'alternative spelling',
'alternative title',
'alternative transliteration',
2013-07-16 11:41:49 +00:00
'closing credits title',
2012-11-11 16:15:40 +00:00
'complete title',
'IMAX version',
'informal short title',
2013-07-16 11:41:49 +00:00
'International (Spanish title)',
'Japan (imdb display title)',
2012-11-11 16:15:40 +00:00
'longer version',
'new title',
2013-07-16 11:41:49 +00:00
'original subtitled version',
'pre-release title',
'promotional abbreviation',
2012-11-11 16:15:40 +00:00
'recut version',
'reissue title',
'restored version',
'script title',
2013-05-31 09:54:57 +00:00
'short title',
2013-05-31 19:45:25 +00:00
'(subtitle)',
2013-07-16 11:41:49 +00:00
'TV title',
'working title',
2013-07-16 11:42:58 +00:00
'World-wide (Spanish title)',
2012-11-11 16:15:40 +00:00
]
#ignore english japanese titles
#for movies that are not only from japan
if ['Japan'] != self.get('country', []):
stop_words += [
'Japan (English title)'
]
for t in self.get('alternativeTitles', []):
2013-06-29 16:50:10 +00:00
for type in t[0].split('/'):
type = type.strip()
2012-09-22 20:56:10 +00:00
stop_word = False
2012-11-11 16:15:40 +00:00
for key in stop_words:
2012-09-22 20:52:18 +00:00
if key in type:
2012-09-22 20:56:10 +00:00
stop_word = True
break
if not stop_word:
if not type in types:
types[type] = []
types[type].append(t[1])
titles = {}
for type in types:
for title in types[type]:
if not title in titles:
titles[title] = []
titles[title].append(type)
def select_title(type):
title = types[type][0]
count = 0
if len(types[type]) > 1:
for t in types[type]:
if len(titles[t]) > count:
count = len(titles[t])
title = t
return title
types = {type: select_title(type) for type in types}
regexps = [
2012-10-01 21:29:57 +00:00
"^.+ \(imdb display title\) \(English title\)$",
2012-11-08 19:34:26 +00:00
"^USA \(imdb display title\)$",
2012-10-01 21:29:57 +00:00
"^International \(English title\)$",
2013-07-16 09:02:43 +00:00
"^International \(English title\)$",
"^UK \(imdb display title\)$",
2012-10-01 21:29:57 +00:00
"^International \(.+\) \(English title\)$",
2013-07-16 09:02:43 +00:00
"^World-wide \(English title\)$",
]
2012-11-11 16:15:40 +00:00
if 'Hong Kong' in self.get('country', []):
regexps += [
"Hong Kong \(English title\)"
]
english_countries = (
'USA', 'UK', 'United States', 'United Kingdom',
'Australia', 'New Zealand'
)
2012-11-11 16:15:40 +00:00
if not filter(lambda c: c in english_countries, self.get('country', [])):
regexps += [
2012-11-11 01:31:51 +00:00
"^[^(]+ \(English title\)$",
"^.+ \(.+\) \(English title\)$",
"^USA$",
"^UK$",
"^USA \(.+\)$",
"^UK \(.+\)$",
"^Australia \(.+\)$",
2013-07-16 09:02:43 +00:00
"World-wide \(English title\)",
"\(literal English title\)",
"^International \(.+ title\)$",
2012-11-08 19:34:26 +00:00
"^International \(.+\) \(.+ title\)$",
]
for regexp in regexps:
2012-10-01 21:29:57 +00:00
for type in types:
if re.compile(regexp).findall(type):
#print types[type], type
2012-10-01 21:29:57 +00:00
self['internationalTitle'] = types[type]
break
if 'internationalTitle' in self:
break
2010-12-09 03:37:28 +00:00
2012-09-14 09:27:36 +00:00
def cleanup_title(title):
if title.startswith('"') and title.endswith('"'):
title = title[1:-1]
if title.startswith("'") and title.endswith("'"):
title = title[1:-1]
2012-09-14 09:27:36 +00:00
title = re.sub('\(\#[.\d]+\)', '', title)
2012-09-14 09:31:29 +00:00
return title.strip()
2012-09-14 09:27:36 +00:00
for t in ('title', 'internationalTitle'):
2012-09-14 09:17:34 +00:00
if t in self:
2012-09-14 09:27:36 +00:00
self[t] = cleanup_title(self[t])
if 'internationalTitle' in self and \
2012-09-22 20:52:18 +00:00
self.get('title', '').lower() == self['internationalTitle'].lower():
del self['internationalTitle']
2011-10-15 14:54:09 +00:00
if 'alternativeTitles' in self:
alt = {}
for t in self['alternativeTitles']:
2013-06-29 16:50:10 +00:00
title = cleanup_title(t[1])
if title not in (self.get('title'), self.get('internationalTitle')):
if title not in alt:
alt[title] = []
2013-06-29 16:50:10 +00:00
for c in t[0].split('/'):
if not '(working title)' in c:
c = c.replace('International', '').replace('World-wide', '').split('(')[0].strip()
if c:
alt[title].append(c)
self['alternativeTitles'] = []
for t in sorted(alt, lambda a, b: cmp(sorted(alt[a]), sorted(alt[b]))):
if alt[t]:
countries = sorted([normalize_country_name(c) or c for c in alt[t]])
self['alternativeTitles'].append((t, countries))
2012-09-22 21:28:59 +00:00
if not self['alternativeTitles']:
del self['alternativeTitles']
2011-08-07 10:42:59 +00:00
if 'internationalTitle' in self:
self['originalTitle'] = self['title']
self['title'] = self.pop('internationalTitle')
2010-07-08 08:03:57 +00:00
if 'runtime' in self and self['runtime']:
2010-07-07 23:25:57 +00:00
if 'min' in self['runtime']: base=60
else: base=1
self['runtime'] = int(find_re(self['runtime'], '([0-9]+)')) * base
2010-07-10 08:24:56 +00:00
if 'runtime' in self and not self['runtime']:
del self['runtime']
if 'votes' in self: self['votes'] = self['votes'].replace(',', '')
2011-10-15 14:54:09 +00:00
if 'cast' in self:
if isinstance(self['cast'][0], basestring):
self['cast'] = [self['cast']]
self['actor'] = [c[0] for c in self['cast']]
2012-09-25 10:57:57 +00:00
def cleanup_character(c):
c = c.replace('(uncredited)', '').strip()
return c
self['cast'] = [{'actor': x[0], 'character': cleanup_character(x[1])}
for x in self['cast']]
2011-10-15 14:54:09 +00:00
2010-07-07 23:25:57 +00:00
if 'connections' in self:
cc={}
if len(self['connections']) == 3 and isinstance(self['connections'][0], basestring):
2010-07-08 08:03:57 +00:00
self['connections'] = [self['connections']]
for rel, data, _ in self['connections']:
2011-09-30 17:52:13 +00:00
#cc[unicode(rel)] = re.compile('<a href="/title/tt(\d{7})/">(.*?)</a>').findall(data)
def get_conn(c):
r = {
2011-09-30 17:52:13 +00:00
'id': c[0],
'title': cleanup_title(c[1]),
2011-09-30 17:52:13 +00:00
}
description = c[2].split('<br />')
2012-09-30 10:17:45 +00:00
if len(description) == 2 and description[-1].strip() != '-':
r['description'] = description[-1].strip()
return r
cc[unicode(rel)] = map(get_conn, re.compile('<a href="/title/tt(\d{7})/">(.*?)</a>(.*?)<\/div', re.DOTALL).findall(data))
2011-09-30 17:52:13 +00:00
2010-07-07 23:25:57 +00:00
self['connections'] = cc
2011-10-15 14:54:09 +00:00
for key in ('country', 'genre'):
2010-07-10 08:24:56 +00:00
if key in self:
self[key] = filter(lambda x: x.lower() != 'home', self[key])
#0092999
if '_director' in self:
2011-11-22 11:46:27 +00:00
if 'series' in self or 'isSeries' in self:
2011-11-22 11:42:15 +00:00
self['creator'] = self.pop('_director')
else:
del self['_director']
2011-11-22 11:46:27 +00:00
if 'isSeries' in self:
del self['isSeries']
2013-03-01 09:45:18 +00:00
self['isSeries'] = True
2012-09-23 13:12:07 +00:00
if 'episodeTitle' in self:
self['episodeTitle'] = re.sub('Episode \#\d+\.\d+', '', self['episodeTitle'])
2010-07-12 08:52:26 +00:00
if 'series' in self:
series = Imdb(self['series'], timeout=timeout)
self['seriesTitle'] = series['title']
2011-10-15 14:54:09 +00:00
if 'episodeTitle' in self:
self['seriesTitle'] = series['title']
2012-09-25 11:54:54 +00:00
if 'season' in self and 'episode' in self:
self['title'] = "%s (S%02dE%02d) %s" % (
2011-10-15 14:54:09 +00:00
self['seriesTitle'], self['season'], self['episode'], self['episodeTitle'])
2012-09-25 11:54:54 +00:00
else:
self['title'] = "%s (S01) %s" % (self['seriesTitle'], self['episodeTitle'])
self['season'] = 1
2012-09-23 13:12:07 +00:00
self['title'] = self['title'].strip()
2011-10-24 19:29:49 +00:00
if 'director' in self:
self['episodeDirector'] = self['director']
2011-10-15 19:32:32 +00:00
2011-10-18 12:33:45 +00:00
if not 'creator' in series and 'director' in series:
series['creator'] = series['director']
2011-10-18 13:50:16 +00:00
if len(series['creator']) > 10:
series['creator'] = series['director'][:1]
2011-10-18 12:33:45 +00:00
2011-10-24 19:29:49 +00:00
for key in ['creator', 'country']:
2010-12-07 18:29:53 +00:00
if key in series:
2011-10-15 14:54:09 +00:00
self[key] = series[key]
2011-10-24 19:29:49 +00:00
if 'year' in series:
self['seriesYear'] = series['year']
if not 'year' in self:
self['year'] = series['year']
if 'year' in self:
self['episodeYear'] = self['year']
if 'creator' in self:
self['seriesDirector'] = self['creator']
if 'originalTitle' in self:
del self['originalTitle']
2010-07-12 08:52:26 +00:00
else:
2011-10-15 14:54:09 +00:00
for key in ('seriesTitle', 'episodeTitle', 'season', 'episode'):
2010-07-12 08:52:26 +00:00
if key in self:
del self[key]
2011-10-15 19:32:32 +00:00
if 'creator' in self:
if 'director' in self:
self['episodeDirector'] = self['director']
self['director'] = self['creator']
#make lists unique but keep order
2013-02-20 04:16:34 +00:00
for key in ('director', 'language'):
if key in self:
self[key] = [x for i,x in enumerate(self[key])
if x not in self[key][i+1:]]
2013-03-14 08:47:10 +00:00
for key in ('actor', 'writer', 'producer', 'editor', 'composer'):
if key in self:
2012-08-30 09:46:25 +00:00
if isinstance(self[key][0], list):
self[key] = [i[0] for i in self[key] if i]
self[key] = sorted(list(set(self[key])),
lambda a, b: self[key].index(a) - self[key].index(b))
2011-08-09 08:30:13 +00:00
if 'budget' in self and 'gross' in self:
self['profit'] = self['gross'] - self['budget']
2011-10-28 17:24:09 +00:00
if 'releasedate' in self:
2013-06-28 14:53:25 +00:00
def parse_date(d):
try:
d = datetime.strptime(d, '%d %B %Y')
except:
try:
d = datetime.strptime(d, '%B %Y')
except:
return 'x'
return '%d-%02d-%02d' % (d.year, d.month, d.day)
self['releasedate'] = min([
parse_date(d) for d in self['releasedate']
])
2013-06-29 16:21:58 +00:00
if self['releasedate'] == 'x':
del self['releasedate']
2011-10-18 12:57:31 +00:00
if 'summary' in self:
self['summary'] = self['summary'].split('</p')[0].strip()
2011-10-15 14:54:09 +00:00
class ImdbCombined(Imdb):
def __init__(self, id, timeout=-1):
_regex = {}
for key in self.regex:
if self.regex[key]['page'] in ('combined', 'releaseinfo'):
_regex[key] = self.regex[key]
self.regex = _regex
super(ImdbCombined, self).__init__(id, timeout)
2012-08-15 15:15:40 +00:00
def get_movie_by_title(title, timeout=-1):
2011-04-15 09:46:20 +00:00
'''
This only works for exact title matches from the data dump
Usually in the format
Title (Year)
"Series Title" (Year) {(#Season.Episode)}
"Series Title" (Year) {Episode Title (#Season.Episode)}
If there is more than one film with that title for the year
Title (Year/I)
2012-08-15 15:15:40 +00:00
>>> get_movie_by_title(u'"Father Knows Best" (1954) {(#5.34)}')
2011-04-15 09:46:20 +00:00
u'1602860'
2012-08-15 15:15:40 +00:00
>>> get_movie_by_title(u'The Matrix (1999)')
2011-04-15 09:46:20 +00:00
u'0133093'
2012-08-15 15:15:40 +00:00
>>> get_movie_by_title(u'Little Egypt (1951)')
2011-04-15 09:46:20 +00:00
u'0043748'
2012-08-15 15:15:40 +00:00
>>> get_movie_by_title(u'Little Egypt (1897/I)')
2011-04-15 09:46:20 +00:00
u'0214882'
2012-08-15 15:15:40 +00:00
>>> get_movie_by_title(u'Little Egypt')
2011-04-15 09:46:20 +00:00
None
2012-08-15 15:15:40 +00:00
>>> get_movie_by_title(u'"Dexter" (2006) {Father Knows Best (#1.9)}')
2011-04-15 09:46:20 +00:00
u'0866567'
'''
params = {'s':'tt','q': title}
if isinstance(title, unicode):
try:
params['q'] = unicodedata.normalize('NFKC', params['q']).encode('latin-1')
except:
params['q'] = params['q'].encode('utf-8')
params = urllib.urlencode(params)
url = "http://akas.imdb.com/find?" + params
data = read_url(url, timeout=timeout, unicode=True)
2011-04-15 09:46:20 +00:00
#if search results in redirect, get id of current page
r = '<meta property="og:url" content="http://www.imdb.com/title/tt(\d{7})/" />'
results = re.compile(r).findall(data)
if results:
return results[0]
return None
2012-08-15 15:15:40 +00:00
def get_movie_id(title, director='', year='', timeout=-1):
2010-07-18 18:57:22 +00:00
'''
2012-08-15 15:15:40 +00:00
>>> get_movie_id('The Matrix')
2010-09-03 21:19:19 +00:00
u'0133093'
2012-08-15 15:15:40 +00:00
>>> get_movie_id('2 or 3 Things I Know About Her', 'Jean-Luc Godard')
2010-09-03 21:19:19 +00:00
u'0060304'
2012-08-15 15:15:40 +00:00
>>> get_movie_id('2 or 3 Things I Know About Her', 'Jean-Luc Godard', '1967')
2010-09-03 21:19:19 +00:00
u'0060304'
2010-12-31 07:23:28 +00:00
2012-08-15 15:15:40 +00:00
>>> get_movie_id(u"Histoire(s) du cinema: Le controle de l'univers", 'Jean-Luc Godard')
2010-12-31 07:23:28 +00:00
u'0179214'
2012-08-15 15:15:40 +00:00
>>> get_movie_id(u"Histoire(s) du cinéma: Le contrôle de l'univers", 'Jean-Luc Godard')
2010-12-31 07:23:28 +00:00
u'0179214'
2010-07-18 18:57:22 +00:00
'''
2011-03-09 12:10:20 +00:00
imdbId = {
(u'Le jour se l\xe8ve', u'Marcel Carn\xe9'): '0031514',
(u'Wings', u'Larisa Shepitko'): '0061196',
(u'The Ascent', u'Larisa Shepitko'): '0075404',
(u'Fanny and Alexander', u'Ingmar Bergman'): '0083922',
(u'Torment', u'Alf Sj\xf6berg'): '0036914',
(u'Crisis', u'Ingmar Bergman'): '0038675',
(u'To Joy', u'Ingmar Bergman'): '0043048',
(u'Humain, trop humain', u'Louis Malle'): '0071635',
(u'Place de la R\xe9publique', u'Louis Malle'): '0071999',
(u'God\u2019s Country', u'Louis Malle'): '0091125',
2011-03-15 19:16:35 +00:00
(u'Flunky, Work Hard', u'Mikio Naruse'): '0022036',
(u'The Courtesans of Bombay', u'Richard Robbins') : '0163591',
(u'Je tu il elle', u'Chantal Akerman') : '0071690',
(u'Hotel Monterey', u'Chantal Akerman') : '0068725',
(u'No Blood Relation', u'Mikio Naruse') : '023261',
(u'Apart from You', u'Mikio Naruse') : '0024214',
(u'Every-Night Dreams', u'Mikio Naruse') : '0024793',
(u'Street Without End', u'Mikio Naruse') : '0025338',
(u'Sisters of the Gion', u'Kenji Mizoguchi') : '0027672',
(u'Osaka Elegy', u'Kenji Mizoguchi') : '0028021',
(u'Blaise Pascal', u'Roberto Rossellini') : '0066839',
(u'Japanese Girls at the Harbor', u'Hiroshi Shimizu') : '0160535',
(u'The Private Life of Don Juan', u'Alexander Korda') : '0025681',
(u'Last Holiday', u'Henry Cass') : '0042665',
(u'A Colt Is My Passport', u'Takashi Nomura') : '0330536',
(u'Androcles and the Lion', u'Chester Erskine') : '0044355',
(u'Major Barbara', u'Gabriel Pascal') : '0033868',
(u'Come On Children', u'Allan King') : '0269104',
2011-03-09 12:10:20 +00:00
2011-03-15 19:16:35 +00:00
(u'Jimi Plays Monterey & Shake! Otis at Monterey', u'D. A. Pennebaker and Chris Hegedus') : '',
(u'Martha Graham: Dance on Film', u'Nathan Kroll') : '',
2012-07-07 13:04:16 +00:00
(u'Carmen', u'Carlos Saura'): '0085297',
(u'The Story of a Cheat', u'Sacha Guitry'): '0028201',
(u'Weekend', 'Andrew Haigh'): '1714210',
2011-03-09 12:10:20 +00:00
}.get((title, director), None)
if imdbId:
return imdbId
2010-12-31 07:23:28 +00:00
params = {'s':'tt','q': title}
2010-07-18 18:57:22 +00:00
if director:
2011-02-08 07:20:57 +00:00
params['q'] = u'"%s" %s' % (title, director)
2010-09-03 21:19:19 +00:00
if year:
2011-02-08 07:20:57 +00:00
params['q'] = u'"%s (%s)" %s' % (title, year, director)
2011-03-09 12:10:20 +00:00
google_query = "site:imdb.com %s" % params['q']
2011-04-15 09:46:20 +00:00
if isinstance(params['q'], unicode):
try:
params['q'] = unicodedata.normalize('NFKC', params['q']).encode('latin-1')
except:
params['q'] = params['q'].encode('utf-8')
2010-12-31 07:23:28 +00:00
params = urllib.urlencode(params)
url = "http://akas.imdb.com/find?" + params
#print url
data = read_url(url, timeout=timeout, unicode=True)
2010-12-31 07:23:28 +00:00
#if search results in redirect, get id of current page
r = '<meta property="og:url" content="http://www.imdb.com/title/tt(\d{7})/" />'
results = re.compile(r).findall(data)
if results:
return results[0]
#otherwise get first result
r = '<td valign="top">.*?<a href="/title/tt(\d{7})/"'
2011-03-09 12:10:20 +00:00
results = re.compile(r).findall(data)
2010-12-31 07:23:28 +00:00
if results:
return results[0]
2011-03-09 12:10:20 +00:00
2011-03-15 19:16:35 +00:00
#print (title, director), ": '',"
2011-03-09 12:10:20 +00:00
#print google_query
2013-06-14 10:17:18 +00:00
#results = google.find(google_query, timeout=timeout)
results = duckduckgo.find(google_query, timeout=timeout)
2011-03-09 12:10:20 +00:00
if results:
2013-06-14 10:17:18 +00:00
for r in results[:2]:
imdbId = find_re(r[1], 'title/tt(\d{7})')
if imdbId:
return imdbId
2010-12-31 07:23:28 +00:00
#or nothing
2010-07-18 18:57:22 +00:00
return ''
2012-08-15 15:15:40 +00:00
def get_movie_poster(imdbId):
2010-09-17 08:46:37 +00:00
'''
2012-08-15 15:15:40 +00:00
>>> get_movie_poster('0133093')
2010-09-17 08:46:37 +00:00
'http://ia.media-imdb.com/images/M/MV5BMjEzNjg1NTg2NV5BMl5BanBnXkFtZTYwNjY3MzQ5._V1._SX338_SY475_.jpg'
2012-08-15 15:15:40 +00:00
>>> get_movie_poster('0994352')
2010-09-17 08:46:37 +00:00
'http://ia.media-imdb.com/images/M/MV5BMjA3NzMyMzU1MV5BMl5BanBnXkFtZTcwNjc1ODUwMg@@._V1._SX594_SY755_.jpg'
'''
2010-07-19 10:05:01 +00:00
info = ImdbCombined(imdbId)
2011-10-18 13:30:16 +00:00
if 'posterId' in info:
url = "http://www.imdb.com/rg/action-box-title/primary-photo/media/rm%s/tt%s" % (info['posterId'], imdbId)
data = read_url(url)
poster = find_re(data, 'img id="primary-img".*?src="(.*?)"')
2010-07-19 10:05:01 +00:00
return poster
2010-09-17 08:46:37 +00:00
elif 'series' in info:
2012-08-15 15:15:40 +00:00
return get_movie_poster(info['series'])
2010-07-19 10:05:01 +00:00
return ''
def get_episodes(imdbId, season=None):
episodes = {}
url = 'http://www.imdb.com/title/tt%s/episodes' % imdbId
if season:
url += '?season=%d' % season
data = ox.cache.read_url(url)
for e in re.compile('<div data-const="tt(\d{7})".*?>.*?<div>S(\d+), Ep(\d+)<\/div>\n<\/div>', re.DOTALL).findall(data):
episodes['S%02dE%02d' %(int(e[1]), int(e[2]))] = e[0]
else:
data = ox.cache.read_url(url)
match = re.compile('<strong>Season (\d+)</strong>').findall(data)
if match:
for season in range(1, int(match[0]) + 1):
episodes.update(get_episodes(imdbId, season))
return episodes
2012-08-15 15:15:40 +00:00
def max_votes():
url = 'http://www.imdb.com/search/title?num_votes=500000,&sort=num_votes,desc'
data = ox.cache.read_url(url)
votes = max([int(v.replace(',', ''))
for v in re.compile('<td class="sort_col">([\d,]+)</td>').findall(data)])
2012-03-08 12:57:11 +00:00
return votes
2010-12-31 07:23:28 +00:00
def guess(title, director='', timeout=-1):
2012-08-15 15:15:40 +00:00
return get_movie_id(title, director, timeout=timeout)
2010-07-07 23:25:57 +00:00
if __name__ == "__main__":
import json
print json.dumps(Imdb('0306414'), indent=2)
#print json.dumps(Imdb('0133093'), indent=2)