python-ox/ox/web/imdb.py

315 lines
10 KiB
Python
Raw Normal View History

2010-07-07 23:25:57 +00:00
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
import urllib2
from urllib import quote, unquote
import re
import os
import time
import ox
2010-07-10 08:24:56 +00:00
from ox import findRe, stripTags
2010-07-07 23:25:57 +00:00
from ox.normalize import normalizeTitle, normalizeImdbId
from siteparser import SiteParser
import google
2010-07-10 08:24:56 +00:00
2010-07-07 23:25:57 +00:00
class Imdb(SiteParser):
regex = {
'alternative_titles': {
'page': 'releaseinfo',
're': [
'name="akas".*?<table.*?>(.*?)</table>',
"td>(.*?)</td>\n\n<td>(.*?)</td>"
],
'type': 'list'
},
2010-07-07 23:25:57 +00:00
'cast': {
'page': 'combined',
2010-07-10 08:24:56 +00:00
're': [
'<td class="nm">.*?>(.*?)</a>.*?<td class="char">(.*?)</td>',
lambda ll: [stripTags(l) for l in ll]
],
2010-07-07 23:25:57 +00:00
'type': 'list'
},
'cinematographers': {
'page': 'combined',
2010-07-07 23:25:57 +00:00
're': [
2010-07-10 08:24:56 +00:00
lambda data: data.split('Series Crew')[0],
2010-07-07 23:25:57 +00:00
'Cinematography by</a>(.*?)</table>',
'<a href="/name/.*?/">(.*?)</a>'
],
'type': 'list'
},
'connections': {
'page': 'movieconnections',
're': '<h5>(.*?)</h5>(.*?)\n\n',
'type': 'list'
},
'countries': {
'page': 'combined',
're': '<a href="/Sections/Countries/.*?/">(.*?)</a>',
'type': 'list'
},
'directors': {
'page': 'combined',
2010-07-07 23:25:57 +00:00
're': [
2010-07-10 08:24:56 +00:00
lambda data: data.split('Series Crew')[0],
2010-07-07 23:25:57 +00:00
'Directed by</a>(.*?)</table>',
'<a href="/name/.*?/">(.*?)</a>'
],
'type': 'list'
},
'editors': {
'page': 'combined',
2010-07-07 23:25:57 +00:00
're': [
2010-07-10 08:24:56 +00:00
lambda data: data.split('Series Crew')[0],
2010-07-07 23:25:57 +00:00
'Film Editing by</a>(.*?)</table>',
'<a href="/name/.*?/">(.*?)</a>'
],
'type': 'list'
},
2010-07-12 08:52:26 +00:00
'episode_title': {
'page': 'combined',
're': '<div id="tn15title">.*?<em>(.*?)</em>',
'type': 'string'
},
2010-07-07 23:25:57 +00:00
'filming_locations': {
'page': 'locations',
're': '<a href="/search/title\?locations=.*?">(.*?)</a>',
'type': 'list'
},
'genres': {
'page': 'combined',
're': '<a href="/Sections/Genres/.*?/">(.*?)</a>',
'type': 'list'
},
'keywords': {
'page': 'keywords',
're': '<a href="/keyword/.*?/">(.*?)</a>',
'type': 'list'
},
'languages': {
'page': 'combined',
're': '<a href="/Sections/Languages/.*?/">(.*?)</a>',
'type': 'list'
},
2010-07-12 09:04:34 +00:00
'original_title': {
'page': 'combined',
're': '<span class="title-extra">(.*?) <i>(original title)</i></span>',
'type': 'string'
},
2010-07-07 23:25:57 +00:00
'plot': {
'page': 'plotsummary',
're': '</div>.*?<p class="plotpar">(.*?)<i>',
2010-07-07 23:25:57 +00:00
'type': 'string'
},
'poster_id': {
'page': 'combined',
're': '/primary-photo/media/rm(.*?)/tt',
'type': 'list'
},
'poster_ids': {
'page': 'posters',
're': '/unknown-thumbnail/media/rm(.*?)/tt',
'type': 'list'
},
'producers': {
'page': 'combined',
're': [
lambda data: data.split('Series Crew')[0],
2010-07-07 23:25:57 +00:00
'Produced by</a>(.*?)</table>',
'<a href="/name/.*?/">(.*?)</a>'
],
'type': 'list'
},
'rating': {
'page': 'combined',
2010-07-10 08:24:56 +00:00
're': '<div class="starbar-meta">.*?<b>([\d,.]?)/10</b>',
2010-07-07 23:25:57 +00:00
'type': 'float'
},
2010-07-12 08:52:26 +00:00
'release date': {
2010-07-07 23:25:57 +00:00
'page': 'releaseinfo',
're': '<a href="/date/(\d{2})-(\d{2})/">.*?</a> <a href="/year/(\d{4})/">',
'type': 'date'
},
2010-07-08 08:59:15 +00:00
'reviews': {
'page': 'externalreviews',
're': [
'<ol>(.*?)</ol>',
'<li><a href="(http.*?)".*?>(.*?)</a></li>'
],
'type': 'list'
},
2010-07-07 23:25:57 +00:00
'runtime': {
'page': 'combined',
're': '<h5>Runtime:</h5><div class="info-content">.*?([0-9]+ sec|[0-9]+ min).*?</div>',
'type': 'string'
},
2010-07-12 08:52:26 +00:00
'season': {
'page': 'combined',
're': [
'<h5>Original Air Date:</h5>.*?<div class="info-content">(.*?)</div>',
'\(Season (\d+), Episode \d+\)',
],
2010-07-12 08:52:26 +00:00
'type': 'int'
},
'episode': {
'page': 'combined',
're': [
'<h5>Original Air Date:</h5>.*?<div class="info-content">(.*?)</div>',
'\(Season \d+, Episode (\d+)\)',
],
2010-07-12 08:52:26 +00:00
'type': 'int'
},
'series': {
'page': 'combined',
're': '<h5>TV Series:</h5>.*?<a href="/title/tt(\d{7})',
'type': 'string'
},
2010-07-07 23:25:57 +00:00
'title': {
'page': 'combined',
're': '<h1>(.*?) <span>',
'type': 'string'
},
'trivia': {
'page': 'trivia',
're': '<div class="sodatext">(.*?)<br>',
'type': 'list',
},
'votes': {
'page': 'combined',
2010-07-10 08:24:56 +00:00
're': '<a href="ratings" class="tn15more">([\d,]*?) votes</a>',
2010-07-07 23:25:57 +00:00
'type': 'string'
},
'writers': {
'page': 'combined',
2010-07-07 23:25:57 +00:00
're': [
2010-07-10 08:24:56 +00:00
lambda data: data.split('Series Crew')[0],
2010-07-07 23:25:57 +00:00
'Writing credits</a>(.*?)</table>',
'<a href="/name/.*?/">(.*?)</a>'
],
'type': 'list'
},
'year': {
'page': 'combined',
're': '<meta name="og:title" content=".*?\((\d{4})\).*?"',
2010-07-07 23:25:57 +00:00
'type': 'int'
}
}
2010-07-12 08:52:26 +00:00
def __init__(self, id, timeout=-1):
2010-07-07 23:25:57 +00:00
self.baseUrl = "http://www.imdb.com/title/tt%s/" % id
2010-07-12 08:52:26 +00:00
super(Imdb, self).__init__(timeout)
2010-07-07 23:25:57 +00:00
2010-07-12 08:52:26 +00:00
if 'title' in self and self['title'].startswith('"') and self['title'].endswith('"'):
self['title'] = self['title'][1:-1]
2010-07-08 08:03:57 +00:00
if 'runtime' in self and self['runtime']:
2010-07-07 23:25:57 +00:00
if 'min' in self['runtime']: base=60
else: base=1
self['runtime'] = int(findRe(self['runtime'], '([0-9]+)')) * base
2010-07-10 08:24:56 +00:00
if 'runtime' in self and not self['runtime']:
del self['runtime']
if 'votes' in self: self['votes'] = self['votes'].replace(',', '')
2010-07-07 23:25:57 +00:00
if 'connections' in self:
cc={}
2010-07-08 08:03:57 +00:00
if len(self['connections']) == 2 and isinstance(self['connections'][0], basestring):
self['connections'] = [self['connections']]
2010-07-07 23:25:57 +00:00
for rel, data in self['connections']:
cc[unicode(rel)] = re.compile('<a href="/title/tt(\d{7})/">').findall(data)
self['connections'] = cc
for key in ('countries', 'genres'):
2010-07-10 08:24:56 +00:00
if key in self:
self[key] = filter(lambda x: x.lower() != 'home', self[key])
2010-07-12 08:52:26 +00:00
if 'series' in self:
if 'episode_title' in self:
self['series_title'] = self['title']
self['title'] = "%s: %s" % (self['series_title'], self['episode_title'])
if 'episode_title' in self and 'season' in self and 'episode' in self:
self['title'] = "%s (S%02dE%02d) %s" % (
self['series_title'], self['season'], self['episode'], self['episode_title'])
else:
for key in ('series_title', 'episode_title', 'season', 'episode'):
if key in self:
del self[key]
class ImdbCombined(Imdb):
def __init__(self, id, timeout=-1):
_regex = {}
for key in self.regex:
if self.regex[key]['page'] == 'combined':
_regex[key] = self.regex[key]
self.regex = _regex
super(ImdbCombined, self).__init__(id, timeout)
2010-07-18 18:57:22 +00:00
def getMovieId(title, director='', year=''):
'''
>>> getMovieId('The Matrix')
'0133093'
'''
if year:
title = "%s (%s)" % (title, year)
if director:
query = 'site:imdb.com %s "%s"' % (director, title)
else:
query = 'site:imdb.com "%s"' % title
for (name, url, desc) in google.find(query, 5, timeout=-1):
if url.startswith('http://www.imdb.com/title/tt'):
return url[28:35]
return ''
2010-07-07 23:25:57 +00:00
def guess(title, director='', timeout=google.DEFAULT_TIMEOUT):
#FIXME: proper file -> title
title = title.split('-')[0]
title = title.split('(')[0]
title = title.split('.')[0]
title = title.strip()
imdb_url = 'http://www.imdb.com/find?q=%s' % quote(title.encode('utf-8'))
return_url = ''
#lest first try google
2010-07-10 08:24:56 +00:00
#i.e. site:imdb.com Michael Stevens "Sin"
2010-07-07 23:25:57 +00:00
if director:
search = 'site:imdb.com %s "%s"' % (director, title)
else:
search = 'site:imdb.com "%s"' % title
for (name, url, desc) in google.find(search, 2, timeout=timeout):
if url.startswith('http://www.imdb.com/title/tt'):
return normalizeImdbId(int(ox.intValue(url)))
try:
req = urllib2.Request(imdb_url, None, ox.net.DEFAULT_HEADERS)
u = urllib2.urlopen(req)
data = u.read()
return_url = u.url
u.close()
except:
return None
if return_url.startswith('http://www.imdb.com/title/tt'):
return return_url[28:35]
if data:
imdb_id = findRe(data.replace('\n', ' '), 'Popular Results.*?<ol><li>.*?<a href="/title/tt(.......)')
if imdb_id:
return imdb_id
imdb_url = 'http://www.imdb.com/find?q=%s;s=tt;site=aka' % quote(title.encode('utf-8'))
req = urllib2.Request(imdb_url, None, ox.net.DEFAULT_HEADERS)
u = urllib2.urlopen(req)
data = u.read()
return_url = u.url
u.close()
if return_url.startswith('http://www.imdb.com/title/tt'):
return return_url[28:35]
return None
if __name__ == "__main__":
import json
print json.dumps(Imdb('0306414'), indent=2)
#print json.dumps(Imdb('0133093'), indent=2)