2008-04-28 09:52:21 +00:00
|
|
|
|
# -*- coding: utf-8 -*-
|
2008-06-19 09:47:02 +00:00
|
|
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
2008-04-28 09:52:21 +00:00
|
|
|
|
import urllib2
|
2008-04-29 19:09:10 +00:00
|
|
|
|
from urllib import quote, unquote
|
2008-06-19 09:47:02 +00:00
|
|
|
|
import re
|
2008-04-28 09:52:21 +00:00
|
|
|
|
import os
|
|
|
|
|
import time
|
|
|
|
|
|
|
|
|
|
import chardet
|
2009-10-12 15:18:59 +00:00
|
|
|
|
import oxlib
|
|
|
|
|
from oxlib import stripTags, decodeHtml, findRe, findString
|
|
|
|
|
import oxlib.cache
|
|
|
|
|
from oxlib.normalize import normalizeTitle, normalizeImdbId
|
|
|
|
|
from oxlib import *
|
2008-04-28 09:52:21 +00:00
|
|
|
|
|
|
|
|
|
import google
|
|
|
|
|
|
2009-05-31 20:28:01 +00:00
|
|
|
|
'''
|
|
|
|
|
never timeout imdb data, to update cache remove data from cache folder
|
|
|
|
|
'''
|
2009-10-12 15:18:59 +00:00
|
|
|
|
def readUrlUnicode(url, data=None, headers=oxlib.cache.DEFAULT_HEADERS, timeout=-1):
|
|
|
|
|
return oxlib.cache.readUrlUnicode(url, data, headers, timeout)
|
2008-06-17 11:07:53 +00:00
|
|
|
|
|
2009-06-01 13:11:22 +00:00
|
|
|
|
'''
|
|
|
|
|
check if result is valid while updating
|
|
|
|
|
def validate(result, header):
|
|
|
|
|
return header['status'] == u'200'
|
|
|
|
|
|
|
|
|
|
try:
|
2009-10-12 15:18:59 +00:00
|
|
|
|
d = oxlib.cache.readUrlUnicode(url, data, headers, timeout=0, valid=validate)
|
|
|
|
|
except oxlib.cache.InvalidResult, e:
|
2009-06-01 13:11:22 +00:00
|
|
|
|
print e.headers
|
|
|
|
|
|
|
|
|
|
'''
|
2008-04-29 16:12:27 +00:00
|
|
|
|
def getMovieId(title, director='', year=''):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
'''
|
|
|
|
|
>>> getMovieId('The Matrix')
|
|
|
|
|
'0133093'
|
|
|
|
|
'''
|
|
|
|
|
if year:
|
|
|
|
|
title = "%s (%s)" % (title, year)
|
|
|
|
|
if director:
|
|
|
|
|
query = 'site:imdb.com %s "%s"' % (director, title)
|
|
|
|
|
else:
|
|
|
|
|
query = 'site:imdb.com "%s"' % title
|
2009-08-19 21:00:31 +00:00
|
|
|
|
for (name, url, desc) in google.find(query, 3, timeout=-1):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
if url.startswith('http://www.imdb.com/title/tt'):
|
|
|
|
|
return url[28:35]
|
|
|
|
|
return ''
|
2008-04-29 16:12:27 +00:00
|
|
|
|
|
|
|
|
|
def getMovieData(imdbId):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
return IMDb(imdbId).parse()
|
2008-04-29 16:12:27 +00:00
|
|
|
|
|
|
|
|
|
# internal functions below
|
|
|
|
|
def getUrlBase(imdbId):
|
2009-06-01 13:11:22 +00:00
|
|
|
|
return "http://www.imdb.com/title/tt%s/" % imdbId
|
2008-04-29 16:12:27 +00:00
|
|
|
|
|
|
|
|
|
def getRawMovieData(imdbId):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
imdbId = normalizeImdbId(imdbId)
|
|
|
|
|
data = getMovieInfo(imdbId)
|
|
|
|
|
data['credits'] = getMovieCredits(imdbId)
|
|
|
|
|
data['poster'] = getMoviePoster(imdbId)
|
|
|
|
|
data['company credits'] = getMovieCompanyCredits(imdbId)
|
|
|
|
|
data['filming locations'] = getMovieLocations(imdbId)
|
|
|
|
|
data['movie connections'] = getMovieConnections(imdbId)
|
|
|
|
|
data['external reviews'] = getMovieExternalReviews(imdbId)
|
|
|
|
|
data['trivia'] = getMovieTrivia(imdbId)
|
|
|
|
|
data['keywords'] = getMovieKeywords(imdbId)
|
|
|
|
|
data['media'] = {}
|
|
|
|
|
data['media']['images'] = getMovieImages(imdbId)
|
|
|
|
|
data['media']['trailers'] = getMovieTrailers(imdbId)
|
|
|
|
|
data['plotsummary'] = getMoviePlot(imdbId)
|
|
|
|
|
data['release dates'] = getMovieReleaseDates(imdbId)
|
|
|
|
|
data['release date'] = getMovieReleaseDate(imdbId)
|
|
|
|
|
return data
|
2008-04-30 13:31:50 +00:00
|
|
|
|
|
2009-11-30 00:46:56 +00:00
|
|
|
|
def getMovieInfo(imdbId, timeout=-1):
|
|
|
|
|
data = readUrlUnicode(getUrlBase(imdbId), timeout=timeout)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
info = dict()
|
|
|
|
|
info['poster'] = findRe(data, 'name="poster".*?<img .*?src="(.*?)"')
|
|
|
|
|
if info['poster'] and '_V' in info['poster']:
|
|
|
|
|
info['poster']= "%s.jpg" % info['poster'].split('._V')[0]
|
|
|
|
|
|
|
|
|
|
for i in re.compile('<h5>(.*?):</h5>(.*?)<div class="info"', re.DOTALL).findall(data):
|
|
|
|
|
title = stripTags(i[0]).strip().lower()
|
2009-11-30 00:46:56 +00:00
|
|
|
|
if title in ('genre', ):
|
|
|
|
|
txt = i[1].split('</div>')[0]
|
|
|
|
|
else:
|
|
|
|
|
txt= i[1]
|
|
|
|
|
txt = stripTags(txt).strip()
|
|
|
|
|
|
2008-06-19 09:47:02 +00:00
|
|
|
|
def cleanUp(k):
|
|
|
|
|
k = decodeHtml(k).replace(u'\xa0', ' ').strip()
|
|
|
|
|
if k.endswith('more'): k=k[:-len('more')].strip()
|
|
|
|
|
return k
|
|
|
|
|
txt = cleanUp(txt)
|
2008-07-05 13:35:46 +00:00
|
|
|
|
if title not in ('plot', 'trivia', 'filming locations', 'mpaa', 'tagline', 'original air date'):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
if '|' in txt:
|
|
|
|
|
txt = [cleanUp(k) for k in txt.split('|')]
|
|
|
|
|
elif ', ' in txt:
|
|
|
|
|
txt = [cleanUp(k) for k in txt.split(', ')]
|
2009-08-03 10:44:04 +00:00
|
|
|
|
elif title in ('country', 'language', 'genre'):
|
2008-07-16 18:05:30 +00:00
|
|
|
|
txt = [cleanUp(txt), ]
|
2008-07-29 17:04:23 +00:00
|
|
|
|
if title == 'tv series':
|
|
|
|
|
info['series_imdb'] = findRe(i[1], 'tt(\d{7})')
|
2008-07-05 13:35:46 +00:00
|
|
|
|
if title == 'original air date':
|
2008-07-13 13:31:16 +00:00
|
|
|
|
info['series_episode_info'] = txt.split('\n')[-1].strip()
|
2008-07-05 13:35:46 +00:00
|
|
|
|
txt = txt.split('\n')[0].strip()
|
2008-06-19 09:47:02 +00:00
|
|
|
|
if not title.startswith('moviemeter'):
|
|
|
|
|
info[title] = txt
|
2008-07-05 13:35:46 +00:00
|
|
|
|
for key in ('user comments', 'writers (wga)', 'plot keywords'):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
if key in info:
|
|
|
|
|
del info[key]
|
|
|
|
|
if 'release date' in info:
|
2008-11-11 00:34:34 +00:00
|
|
|
|
if isinstance(info['release date'], list):
|
|
|
|
|
info['release date'] = info['release date'][0]
|
2008-06-19 09:47:02 +00:00
|
|
|
|
info['release date'] = info['release date'].split('\n')[0]
|
|
|
|
|
if 'plot' in info:
|
|
|
|
|
info['plot'] = info['plot'].split('| add synopsis')[0].strip()
|
|
|
|
|
info['plot'] = info['plot'].split('| full synopsis')[0].strip()
|
|
|
|
|
if info['plot'] in ('add synopsis', 'full synopsis'):
|
|
|
|
|
info['plot'] = ''
|
|
|
|
|
|
|
|
|
|
#get Title
|
|
|
|
|
title = ''
|
|
|
|
|
year = ''
|
|
|
|
|
html_title = findRe(data, '<div id="tn15title">(.*?)</div>')
|
|
|
|
|
if not html_title:
|
|
|
|
|
html_title = findRe(data, '<title>(.*?)</title>')
|
2008-09-18 08:18:46 +00:00
|
|
|
|
else:
|
|
|
|
|
html_title = html_title.split('<span class="pro-link">')[0]
|
2008-06-19 09:47:02 +00:00
|
|
|
|
if html_title:
|
|
|
|
|
html_title = html_title.replace('<br />', ' ').replace(' ', ' ')
|
2009-07-14 12:49:00 +00:00
|
|
|
|
title = stripTags(html_title)
|
|
|
|
|
title = decodeHtml(title)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
year = findRe(title, '\((\d{4})\)')
|
|
|
|
|
if not year:
|
|
|
|
|
year = findRe(title, '\((\d{4})')
|
|
|
|
|
_y = findRe(title, r'(\([0-9\?]{4}[/IVXLCDM]*?\))')
|
|
|
|
|
if _y:
|
|
|
|
|
title = title.replace(_y, '')
|
|
|
|
|
for t in ('TV series', 'TV-Series', 'TV mini-series', '(mini)', '(VG)', '(V)', '(TV)'):
|
|
|
|
|
title = title.replace(t, '')
|
|
|
|
|
title = title.strip()
|
|
|
|
|
if title.find(u'\xa0') > -1:
|
|
|
|
|
title = title[:title.find(u'\xa0')].strip()
|
|
|
|
|
if title.startswith('"') and title.endswith('"'):
|
|
|
|
|
title = title[1:-1]
|
2008-07-05 13:35:46 +00:00
|
|
|
|
info['title'] = normalizeTitle(title)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
info['year'] = year
|
2008-07-13 13:31:16 +00:00
|
|
|
|
|
|
|
|
|
#Series
|
2008-07-05 13:35:46 +00:00
|
|
|
|
if title.startswith('"') and title.find('"',1) > 0 and \
|
|
|
|
|
title.find('"',1) == title.rfind('"'):
|
|
|
|
|
episode_title = title[title.rfind('"')+1:]
|
|
|
|
|
episode_title = re.sub("\?{4}", "", episode_title).strip()
|
|
|
|
|
episode_title = re.sub("\d{4}", "", episode_title).strip()
|
|
|
|
|
if episode_title == '-': episode_title=''
|
|
|
|
|
title = normalizeTitle(title[1:title.rfind('"')])
|
|
|
|
|
if episode_title:
|
|
|
|
|
info['episode title'] = episode_title
|
|
|
|
|
info['series title'] = title
|
|
|
|
|
info['title'] = "%s: %s" % (title, episode_title)
|
|
|
|
|
else:
|
|
|
|
|
info['title'] = title
|
|
|
|
|
|
2008-07-13 13:31:16 +00:00
|
|
|
|
se = re.compile("Season (\d*), Episode (\d*)\)").findall(info.get('series_episode_info', ''))
|
2008-07-05 13:35:46 +00:00
|
|
|
|
if se:
|
|
|
|
|
info['season'] = int(se[0][0])
|
|
|
|
|
info['episode'] = int(se[0][1])
|
|
|
|
|
info['title'] = "%s (S%02dE%02d) %s" % (
|
|
|
|
|
info['series title'], info['season'], info['episode'], info['episode title'])
|
|
|
|
|
info['title'] = info['title'].strip()
|
2008-07-13 13:31:16 +00:00
|
|
|
|
del info['series_episode_info']
|
2008-06-19 09:47:02 +00:00
|
|
|
|
|
|
|
|
|
#Rating
|
|
|
|
|
rating = findRe(data, '<b>([\d\.]*?)/10</b>')
|
|
|
|
|
if rating:
|
|
|
|
|
info['rating'] = float(rating)
|
|
|
|
|
else:
|
|
|
|
|
info['rating'] = -1
|
|
|
|
|
|
|
|
|
|
#Votes
|
2009-08-20 19:50:07 +00:00
|
|
|
|
info['votes'] = -1
|
|
|
|
|
if "user rating" in info:
|
2010-01-07 00:08:08 +00:00
|
|
|
|
if isinstance(info['user rating'], list):
|
|
|
|
|
info['user rating'] = ' '.join(info['user rating'])
|
2009-08-20 19:50:07 +00:00
|
|
|
|
votes = findRe(info['user rating'], '([\d,]*?) votes')
|
|
|
|
|
if votes:
|
|
|
|
|
info['votes'] = int(votes.replace(',', ''))
|
2008-06-19 09:47:02 +00:00
|
|
|
|
return info
|
2008-04-29 22:15:28 +00:00
|
|
|
|
|
2009-07-10 08:47:01 +00:00
|
|
|
|
def getMovieRuntimeSeconds(imdbId):
|
|
|
|
|
info = getMovieInfo(imdbId)
|
2009-08-06 10:10:57 +00:00
|
|
|
|
if 'runtime' in info:
|
|
|
|
|
value = info['runtime'][0]
|
|
|
|
|
parsed_value = findRe(value, '(.*?) min')
|
2009-07-10 08:47:01 +00:00
|
|
|
|
parsed_value = findRe(parsed_value, '([0-9]+)')
|
|
|
|
|
if not parsed_value:
|
2009-08-06 10:10:57 +00:00
|
|
|
|
parsed_value = findRe(value, '(.*?) sec')
|
|
|
|
|
parsed_value = findRe(parsed_value, '([0-9]+)')
|
|
|
|
|
if not parsed_value:
|
|
|
|
|
parsed_value = 0
|
|
|
|
|
else:
|
|
|
|
|
parsed_value = int(parsed_value)
|
2009-07-10 08:47:01 +00:00
|
|
|
|
else:
|
2009-08-06 10:10:57 +00:00
|
|
|
|
parsed_value = int(parsed_value) * 60
|
2009-07-10 08:47:01 +00:00
|
|
|
|
else:
|
2009-08-06 10:10:57 +00:00
|
|
|
|
parsed_value = -1
|
2009-07-10 08:47:01 +00:00
|
|
|
|
return parsed_value
|
2008-07-05 13:35:46 +00:00
|
|
|
|
|
2008-04-30 13:31:50 +00:00
|
|
|
|
def getMoviePoster(imdbId):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
info = getMovieInfo(imdbId)
|
|
|
|
|
return info['poster']
|
2008-04-29 22:15:28 +00:00
|
|
|
|
|
2008-04-30 13:31:50 +00:00
|
|
|
|
def getMovieYear(imdbId):
|
2008-07-05 13:35:46 +00:00
|
|
|
|
'''
|
|
|
|
|
>>> getMovieYear('0315404')
|
|
|
|
|
u'1964'
|
|
|
|
|
|
|
|
|
|
>>> getMovieYear('0734840')
|
|
|
|
|
u'1990'
|
|
|
|
|
|
|
|
|
|
>>> getMovieYear('0815352')
|
|
|
|
|
u'1964'
|
|
|
|
|
'''
|
2008-06-19 09:47:02 +00:00
|
|
|
|
info = getMovieInfo(imdbId)
|
|
|
|
|
return info['year']
|
2008-04-30 13:31:50 +00:00
|
|
|
|
|
|
|
|
|
def getMovieTitle(imdbId):
|
2008-07-05 13:35:46 +00:00
|
|
|
|
'''
|
|
|
|
|
>>> getMovieTitle('0306414')
|
|
|
|
|
u'The Wire'
|
|
|
|
|
|
|
|
|
|
>>> getMovieTitle('0734840')
|
|
|
|
|
u'Twin Peaks (S01E02) Episode #1.2'
|
|
|
|
|
|
|
|
|
|
>>> getMovieTitle('0734840')
|
|
|
|
|
u'Twin Peaks (S01E02) Episode #1.2'
|
|
|
|
|
|
|
|
|
|
>>> getMovieTitle('0749451')
|
|
|
|
|
u'The Wire (S01E01) The Target'
|
|
|
|
|
'''
|
2008-06-19 09:47:02 +00:00
|
|
|
|
info = getMovieInfo(imdbId)
|
|
|
|
|
return info['title']
|
2008-04-29 16:12:27 +00:00
|
|
|
|
|
2009-08-02 16:23:17 +00:00
|
|
|
|
def getMovieAKATitles(imdbId):
|
|
|
|
|
'''
|
|
|
|
|
>>> getMovieAKATitle('0040980')
|
|
|
|
|
[(u'Frauen der Nacht', u'Germany'),
|
|
|
|
|
(u'Les femmes de la nuit', u'France'),
|
|
|
|
|
(u'Women of the Night', u'(undefined)')]
|
|
|
|
|
'''
|
|
|
|
|
url = "%sreleaseinfo" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2009-08-02 16:23:17 +00:00
|
|
|
|
titles = findRe(data, 'name="akas".*?<table.*?>(.*?)</table>')
|
|
|
|
|
titles = re.compile("td>(.*?)</td>\n\n<td>(.*)</td>").findall(titles)
|
|
|
|
|
return titles
|
|
|
|
|
|
2008-04-29 16:12:27 +00:00
|
|
|
|
def creditList(data, section=None):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
if section == 'cast':
|
|
|
|
|
credits_ = re.compile('''<tr .*?<td class="nm">(.*?)</td><td class="ddd">.*?</td><td class="char">(.*?)</td></tr>''').findall(data)
|
|
|
|
|
else:
|
|
|
|
|
credits_ = re.compile('''<tr>.*?<td valign="top">(.*?)</td><td.*?</td><td valign="top">(.*?)</td></tr>''').findall(data)
|
|
|
|
|
credits = []
|
|
|
|
|
for c_ in credits_:
|
2009-11-30 00:46:56 +00:00
|
|
|
|
c = [stripTags(decodeHtml(c_[0]).strip()), stripTags(decodeHtml(c_[1]).strip())]
|
2008-06-19 09:47:02 +00:00
|
|
|
|
if section=='writers':
|
|
|
|
|
c[1] = c[1].replace('<br>', '').strip().replace(')', '').replace('(','')
|
|
|
|
|
if c[1].endswith(' and'): c[1] = c[1][:-4]
|
|
|
|
|
credits.append(c)
|
|
|
|
|
return credits
|
2008-04-29 16:12:27 +00:00
|
|
|
|
|
2008-04-30 13:31:50 +00:00
|
|
|
|
def getMovieCredits(imdbId):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
credits = dict()
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%sfullcredits" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
groups = data.split('<h5>')
|
|
|
|
|
for g in groups:
|
|
|
|
|
section = re.compile('''name="(.*?)".*? href="/Glossary''').findall(g)
|
|
|
|
|
if section:
|
|
|
|
|
credits[section[0]] = creditList(g, section[0])
|
|
|
|
|
return credits
|
2008-04-29 16:12:27 +00:00
|
|
|
|
|
2008-04-29 19:09:10 +00:00
|
|
|
|
def getMovieTrailers(imdbId):
|
2010-01-22 23:03:14 +00:00
|
|
|
|
from BeautifulSoup import BeautifulSoup
|
|
|
|
|
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%strailers" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
soup = BeautifulSoup(data)
|
|
|
|
|
videos = soup('div', {'class':"video-gallery"})
|
|
|
|
|
trailers = []
|
|
|
|
|
if videos:
|
|
|
|
|
for a in videos[0]('a'):
|
|
|
|
|
title = stripTags(unicode(a)).strip()
|
|
|
|
|
url = 'http://www.imdb.com' + a['href']
|
|
|
|
|
videoId = findRe(url, '/(vi\d*?)/')
|
|
|
|
|
iframeUrl = "http://www.imdb.com/video/trailer/%s/player" % videoId
|
2009-10-12 11:47:43 +00:00
|
|
|
|
iframe = readUrlUnicode(iframeUrl)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
videoUrl = unquote(findRe(iframe, 'addVariable\("file", "(.*?)"'))
|
|
|
|
|
trailers.append({'title': title, 'url': url, 'iframe': iframeUrl, 'flv':videoUrl})
|
|
|
|
|
return trailers
|
2008-04-28 09:52:21 +00:00
|
|
|
|
|
2008-04-29 22:15:28 +00:00
|
|
|
|
def getMovieQuotes(imdbId):
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%squotes" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
quotes = re.compile('<b>(.*?)</b>:(.*?)<br>', re.DOTALL).findall(findString(data, '<a name="q'))
|
|
|
|
|
quotes = [(q[0].strip(),q[1].strip()) for q in quotes]
|
|
|
|
|
return quotes
|
2008-04-29 22:15:28 +00:00
|
|
|
|
|
2008-05-23 11:08:40 +00:00
|
|
|
|
def getMoviePlot(imdbId):
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%splotsummary" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2008-12-07 14:39:39 +00:00
|
|
|
|
plot = findRe(data, '<p class="plotpar">(.*?)<i>').split('</p>')[0]
|
|
|
|
|
return plot.strip()
|
2008-05-23 11:08:40 +00:00
|
|
|
|
|
2008-04-29 22:15:28 +00:00
|
|
|
|
def getMovieTechnical(imdbId):
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%stechnical" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
results = {}
|
|
|
|
|
for t in re.compile('<h5>(.*?)</h5>(.*?)<br/>', re.DOTALL).findall(data):
|
|
|
|
|
results[t[0].strip()] = t[1].strip()
|
|
|
|
|
return results
|
2008-04-29 22:15:28 +00:00
|
|
|
|
|
|
|
|
|
def getMovieCompanyCredits(imdbId):
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%scompanycredits" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
results = {}
|
|
|
|
|
for field, c in re.compile('<h2>(.*?)</h2><ul>(.*?)</ul>').findall(data):
|
|
|
|
|
results[field.strip()] = []
|
|
|
|
|
for company in re.compile('<li>(.*?)</li>').findall(c):
|
|
|
|
|
results[field.strip()].append(company)
|
|
|
|
|
return results
|
2008-04-29 22:15:28 +00:00
|
|
|
|
|
|
|
|
|
def getMovieLocations(imdbId):
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%slocations" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2009-08-06 10:10:57 +00:00
|
|
|
|
locations = re.compile('<dt><a href="/List.*?>(.*?)</a></dt>').findall(data)
|
2009-11-30 00:46:56 +00:00
|
|
|
|
locations = [decodeHtml(l) for l in locations]
|
2008-06-19 09:47:02 +00:00
|
|
|
|
return locations
|
2008-04-29 22:15:28 +00:00
|
|
|
|
|
|
|
|
|
def getMovieImages(imdbId, keys=('still_frame', 'poster', 'product')):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
photos = {}
|
|
|
|
|
for key in keys:
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%smediaindex?refine=%s" % (getUrlBase(imdbId), key)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
photos[key] = {}
|
|
|
|
|
for s in re.compile('''<img alt="(.*?)".*?src="(http://ia.media-imdb.com/.*?.jpg)''').findall(data):
|
|
|
|
|
img= "%s.jpg" % s[1].split('._V')[0]
|
|
|
|
|
title = s[0]
|
|
|
|
|
if key=='still_frame':
|
|
|
|
|
if not "_CR0" in s[1]:
|
|
|
|
|
photos[key][img] = title
|
|
|
|
|
else:
|
|
|
|
|
photos[key][img] = title
|
|
|
|
|
return photos
|
2008-04-29 22:15:28 +00:00
|
|
|
|
|
2008-04-29 19:09:10 +00:00
|
|
|
|
def getMovieStills(imdbId):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
return getMovieImages(imdbId, ['still_frame'])['still_frame']
|
2008-04-29 22:15:28 +00:00
|
|
|
|
|
|
|
|
|
def getMoviePosters(imdbId):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
posters = getMovieImages(imdbId, ['poster'])['poster']
|
|
|
|
|
poster = getMoviePoster(imdbId)
|
|
|
|
|
if poster:
|
|
|
|
|
posters[poster] = 'main poster'
|
|
|
|
|
return posters
|
2008-05-10 08:29:15 +00:00
|
|
|
|
|
2008-04-29 22:15:28 +00:00
|
|
|
|
def getMovieTrivia(imdbId):
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%strivia" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2009-10-23 18:53:48 +00:00
|
|
|
|
data = findRe(data, '<ul class="trivia">(.*?)</ul>')
|
|
|
|
|
trivia = re.compile('<li>(.*?)</li>', re.DOTALL).findall(data)
|
2009-08-06 10:10:57 +00:00
|
|
|
|
def clean(t):
|
|
|
|
|
t = decodeHtml(t)
|
|
|
|
|
t = t.replace(u'”', '"')
|
|
|
|
|
if t.endswith('<br><br>'):
|
|
|
|
|
t = t[:-8]
|
2009-11-30 00:46:56 +00:00
|
|
|
|
if t.endswith('<br>\n<br>'):
|
|
|
|
|
t = t[:-len('<br>\n<br>')]
|
2009-08-06 10:10:57 +00:00
|
|
|
|
return t.strip()
|
|
|
|
|
trivia = [clean(t) for t in trivia]
|
|
|
|
|
return trivia
|
2008-04-29 19:09:10 +00:00
|
|
|
|
|
2008-04-30 13:31:50 +00:00
|
|
|
|
def getMovieConnections(imdbId):
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%smovieconnections" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
connections={}
|
|
|
|
|
for c in re.compile('''<h5>(.*?)</h5>(.*?)\n\n''', re.DOTALL).findall(data):
|
|
|
|
|
connections[unicode(c[0])] = re.compile('''<a href="/title/tt(\d{7})/">''').findall(c[1])
|
|
|
|
|
return connections
|
2008-04-30 13:31:50 +00:00
|
|
|
|
|
|
|
|
|
def getMovieKeywords(imdbId):
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%skeywords" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
keywords = []
|
|
|
|
|
for keyword in re.compile('''<a.*?href="/keyword.*?>(.*?)</a>''').findall(data):
|
|
|
|
|
keyword = decodeHtml(keyword)
|
|
|
|
|
keyword = keyword.replace(u'\xa0', ' ')
|
|
|
|
|
keywords.append(keyword)
|
|
|
|
|
return keywords
|
2008-04-30 13:31:50 +00:00
|
|
|
|
|
|
|
|
|
def getMovieExternalReviews(imdbId):
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%sexternalreviews" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2009-10-23 18:53:48 +00:00
|
|
|
|
data = findRe(data, '<ol>(.*?)</ol>')
|
|
|
|
|
_reviews = re.compile('<li><a href="(http.*?)".*?>(.*?)</a></li>').findall(data)
|
2009-08-06 10:10:57 +00:00
|
|
|
|
reviews = {}
|
|
|
|
|
for r in _reviews:
|
|
|
|
|
reviews[r[0]] = r[1]
|
|
|
|
|
return reviews
|
2008-04-30 13:31:50 +00:00
|
|
|
|
|
2008-05-25 17:29:14 +00:00
|
|
|
|
def getMovieReleaseDate(imdbId):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
releasedates = getMovieReleaseDates(imdbId)
|
2008-07-02 16:55:41 +00:00
|
|
|
|
first_release = None
|
2008-06-19 09:47:02 +00:00
|
|
|
|
for r in releasedates:
|
|
|
|
|
if not first_release or r[1] < first_release:
|
|
|
|
|
first_release = r[1]
|
|
|
|
|
return first_release
|
2008-05-25 17:29:14 +00:00
|
|
|
|
|
2008-07-01 12:40:36 +00:00
|
|
|
|
def _parseDate(d):
|
2008-10-04 15:17:13 +00:00
|
|
|
|
'''
|
|
|
|
|
>>>_parseDate('3 March 1972')
|
|
|
|
|
'1972-03-03'
|
|
|
|
|
'''
|
2008-07-01 12:40:36 +00:00
|
|
|
|
try:
|
|
|
|
|
parsed_date = time.strptime(d, "%d %B %Y")
|
2008-10-04 13:57:23 +00:00
|
|
|
|
parsed_date = '%s-%02d-%02d' % (parsed_date.tm_year, parsed_date.tm_mon, parsed_date.tm_mday)
|
2008-07-01 12:40:36 +00:00
|
|
|
|
return parsed_date
|
|
|
|
|
except:
|
|
|
|
|
try:
|
|
|
|
|
parsed_date = time.strptime(d, "%B %Y")
|
2008-10-04 13:57:23 +00:00
|
|
|
|
parsed_date = '%s-%02d-01' % (parsed_date.tm_year, parsed_date.tm_mon)
|
2008-07-01 12:40:36 +00:00
|
|
|
|
return parsed_date
|
|
|
|
|
except:
|
|
|
|
|
pass
|
|
|
|
|
try:
|
|
|
|
|
parsed_date = time.strptime(d, "%Y")
|
2008-10-04 13:57:23 +00:00
|
|
|
|
parsed_date = '%s-01-01' % (parsed_date.tm_year)
|
2008-07-01 12:40:36 +00:00
|
|
|
|
return parsed_date
|
|
|
|
|
except:
|
|
|
|
|
pass
|
|
|
|
|
return d
|
|
|
|
|
|
2008-05-25 17:29:14 +00:00
|
|
|
|
def getMovieReleaseDates(imdbId):
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%sreleaseinfo" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
releasedates = []
|
|
|
|
|
regexp = '''<tr><td>(.*?)</td>.*?<td align="right">(.*?)</td>.*?<td>(.*?)</td></tr>'''
|
2008-05-25 17:29:14 +00:00
|
|
|
|
|
2008-06-19 09:47:02 +00:00
|
|
|
|
for r in re.compile(regexp, re.DOTALL).findall(data):
|
|
|
|
|
r_ = (stripTags(r[0]).strip(),
|
2008-07-01 12:40:36 +00:00
|
|
|
|
_parseDate(stripTags(r[1]).strip()),
|
2008-06-19 09:47:02 +00:00
|
|
|
|
decodeHtml(stripTags(r[2]).strip()))
|
|
|
|
|
releasedates.append(r_)
|
|
|
|
|
return releasedates
|
2008-05-25 17:29:14 +00:00
|
|
|
|
|
|
|
|
|
def getMovieBusinessSum(imdbId):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
business = getMovieBusiness(imdbId)
|
|
|
|
|
b_ = {'budget': 0, 'gross': 0, 'profit': 0}
|
|
|
|
|
if 'budget' in business:
|
2008-09-30 16:05:19 +00:00
|
|
|
|
#b_['budget'] = sum([int(intValue(i.replace(',', ''))) for i in business['budget']])
|
2008-09-30 16:30:40 +00:00
|
|
|
|
budget = filter(lambda x: x.startswith('$'), business['budget'])
|
|
|
|
|
if not budget:
|
|
|
|
|
budget = business['budget']
|
|
|
|
|
b_['budget'] = int(intValue(budget[0].replace(',', '')))
|
|
|
|
|
|
2008-06-19 09:47:02 +00:00
|
|
|
|
if 'gross' in business:
|
2009-11-30 00:46:56 +00:00
|
|
|
|
gross = filter(lambda x: x.startswith('$'), business['gross'])
|
|
|
|
|
if gross:
|
|
|
|
|
b_['gross'] = int(intValue(gross[0].replace(',', '')))
|
2008-09-30 16:05:19 +00:00
|
|
|
|
#b_['gross'] = sum([int(intValue(i.replace(',', ''))) for i in business['gross']])
|
|
|
|
|
#if 'weekend gross' in business:
|
|
|
|
|
# b_['gross'] += sum([int(intValue(i.replace(',', ''))) for i in business['weekend gross']])
|
2008-06-19 09:47:02 +00:00
|
|
|
|
if b_['budget'] and b_['gross']:
|
|
|
|
|
b_['profit'] = b_['gross'] - b_['budget']
|
|
|
|
|
return b_
|
2008-05-25 17:29:14 +00:00
|
|
|
|
|
|
|
|
|
def getMovieFlimingDates(imdbId):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
business = getMovieBusiness(imdbId)
|
|
|
|
|
if 'filming dates' in business and business['filming dates']:
|
|
|
|
|
return business['filming dates'][0]
|
|
|
|
|
return ''
|
2008-05-25 17:29:14 +00:00
|
|
|
|
|
|
|
|
|
def getMovieBusiness(imdbId):
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%sbusiness" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
business = {}
|
|
|
|
|
for r in re.compile('''<h5>(.*?)</h5>(.*?)<br/>.<br/>''', re.DOTALL).findall(data):
|
|
|
|
|
key = stripTags(r[0]).strip().lower()
|
|
|
|
|
value = [decodeHtml(stripTags(b).strip()) for b in r[1].split('<br/>')]
|
|
|
|
|
business[key] = value
|
|
|
|
|
return business
|
2008-05-25 17:29:14 +00:00
|
|
|
|
|
|
|
|
|
def getMovieEpisodes(imdbId):
|
2009-06-01 13:11:22 +00:00
|
|
|
|
url = "%sepisodes" % getUrlBase(imdbId)
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
episodes = {}
|
|
|
|
|
regexp = r'''<h4>Season (.*?), Episode (.*?): <a href="/title/tt(.*?)/">(.*?)</a></h4>(.*?)</b><br>(.*?)<br/>'''
|
|
|
|
|
for r in re.compile(regexp, re.DOTALL).findall(data):
|
|
|
|
|
try:
|
|
|
|
|
episode = "S%02dE%02d" % (int(r[0]), int(r[1]))
|
|
|
|
|
episodes[episode] = {}
|
|
|
|
|
episodes[episode]['imdb'] = r[2]
|
|
|
|
|
episodes[episode]['title'] = r[3].strip()
|
|
|
|
|
if episodes[episode]['title'].startswith('Episode #%d'%int(r[0])):
|
|
|
|
|
episodes[episode]['title'] = u''
|
|
|
|
|
description = decodeHtml(r[5])
|
|
|
|
|
description = stripTags(description.split('Next US airings:')[0])
|
|
|
|
|
episodes[episode]['description'] = description.strip()
|
|
|
|
|
episodes[episode]['date'] = ''
|
|
|
|
|
try:
|
|
|
|
|
d = stripTags(r[4])
|
|
|
|
|
d = d.replace('Original Air Date: ', '')
|
|
|
|
|
d = time.strftime("%Y-%m-%d", time.strptime(d, '%d %B %Y'))
|
|
|
|
|
episodes[episode]['date'] = d
|
|
|
|
|
except:
|
|
|
|
|
pass
|
|
|
|
|
except:
|
|
|
|
|
import traceback
|
|
|
|
|
print traceback.print_exc()
|
|
|
|
|
pass
|
|
|
|
|
return episodes
|
2008-05-25 17:29:14 +00:00
|
|
|
|
|
2008-04-29 19:09:10 +00:00
|
|
|
|
'''the old code below'''
|
2008-04-28 09:52:21 +00:00
|
|
|
|
|
|
|
|
|
class IMDb:
|
2008-06-19 09:47:02 +00:00
|
|
|
|
def __init__(self, imdbId):
|
|
|
|
|
self.imdb = imdbId
|
2009-06-01 13:11:22 +00:00
|
|
|
|
self.pageUrl = getUrlBase(imdbId)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
|
|
|
|
|
def getPage(self):
|
2009-10-12 11:47:43 +00:00
|
|
|
|
return readUrlUnicode(self.pageUrl)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
|
|
|
|
|
def parse_raw_value(self, key, value):
|
|
|
|
|
if key in ('runtime', 'language', 'genre', 'country', 'tagline', 'plot_outline'):
|
|
|
|
|
value = stripTags(value).strip()
|
|
|
|
|
if key == 'runtime':
|
2009-07-10 08:47:01 +00:00
|
|
|
|
parsed_value = getMovieRuntimeSeconds(self.imdb)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
elif key in ('country', 'language'):
|
|
|
|
|
parsed_value = value.split(' / ')
|
|
|
|
|
if len(parsed_value) == 1:
|
|
|
|
|
parsed_value = parsed_value[0].split(' | ')
|
|
|
|
|
parsed_value = [v.strip() for v in parsed_value]
|
|
|
|
|
elif key == 'genre':
|
|
|
|
|
parsed_value = value.replace('more', '').strip().split(' / ')
|
|
|
|
|
if len(parsed_value) == 1:
|
|
|
|
|
parsed_value = parsed_value[0].split(' | ')
|
|
|
|
|
parsed_value = [v.strip() for v in parsed_value]
|
|
|
|
|
elif key == 'tagline':
|
|
|
|
|
parsed_value = value.replace('more', '').strip()
|
|
|
|
|
elif key == 'plot_outline':
|
|
|
|
|
parsed_value = value.replace('(view trailer)', '').strip()
|
|
|
|
|
if parsed_value.endswith('more'):
|
|
|
|
|
parsed_value = parsed_value[:-4].strip()
|
|
|
|
|
elif key == 'tv_series':
|
|
|
|
|
m = re.compile('<a href="/title/tt(.*?)/">(.*?)</a>').findall(value)
|
|
|
|
|
if m:
|
|
|
|
|
parsed_value = m[0][0]
|
|
|
|
|
else:
|
|
|
|
|
parsed_value = ''
|
|
|
|
|
elif key == 'also_known_as':
|
|
|
|
|
parsed_value = ''
|
|
|
|
|
m = re.compile('(.*) \(International: English title').findall(value)
|
|
|
|
|
if m:
|
|
|
|
|
parsed_value = m[0]
|
|
|
|
|
else:
|
|
|
|
|
m = re.compile('(.*) \(USA').findall(value)
|
|
|
|
|
if m:
|
|
|
|
|
parsed_value = m[0]
|
|
|
|
|
parsed_value = parsed_value.split('<br />')[-1].split('(')[0]
|
|
|
|
|
director = self.getCredits().get('director', None)
|
|
|
|
|
if director:
|
|
|
|
|
director = director[0]
|
|
|
|
|
parsed_value = parsed_value.replace(director, '')
|
|
|
|
|
if parsed_value.startswith("'s"):
|
|
|
|
|
parsed_value = parsed_value[2:].strip()
|
|
|
|
|
parsed_value = decodeHtml(parsed_value.strip())
|
2008-04-28 09:52:21 +00:00
|
|
|
|
else:
|
2008-06-19 09:47:02 +00:00
|
|
|
|
print value
|
|
|
|
|
parsed_value = value
|
|
|
|
|
return parsed_value
|
|
|
|
|
|
|
|
|
|
def parseYear(self):
|
2010-01-07 00:08:08 +00:00
|
|
|
|
return getMovieYear(self.imdb)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
|
|
|
|
|
def parse(self):
|
2010-01-22 23:03:14 +00:00
|
|
|
|
from BeautifulSoup import BeautifulSoup
|
|
|
|
|
|
2008-06-19 09:47:02 +00:00
|
|
|
|
data = self.getPage()
|
|
|
|
|
IMDbDict ={}
|
2009-05-31 18:12:43 +00:00
|
|
|
|
info = getMovieInfo(self.imdb)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
#Poster
|
|
|
|
|
IMDbDict['poster'] = getMoviePoster(self.imdb)
|
|
|
|
|
if not IMDbDict['poster']:
|
|
|
|
|
IMDbDict['poster'] = 'http://i.imdb.com/Heads/npa.gif'
|
|
|
|
|
#Title, Year
|
|
|
|
|
IMDbDict['year'] = self.parseYear()
|
2008-07-05 13:35:46 +00:00
|
|
|
|
IMDbDict['title'] = getMovieTitle(self.imdb)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
|
|
|
|
|
#Rating
|
2010-01-08 11:37:37 +00:00
|
|
|
|
#FIXME: in the future this could be just:
|
2010-01-08 11:52:39 +00:00
|
|
|
|
#m = findRe(data, '<span id="voteuser">(.*?)</span>')
|
2008-06-19 09:47:02 +00:00
|
|
|
|
m = re.compile('<b>(.*?)/10</b>', re.IGNORECASE).search(data)
|
2008-04-28 09:52:21 +00:00
|
|
|
|
if m:
|
2010-01-08 11:37:37 +00:00
|
|
|
|
r = stripTags(m.group(1))
|
|
|
|
|
if r:
|
|
|
|
|
IMDbDict['rating'] = int(float(r) * 1000)
|
|
|
|
|
else:
|
|
|
|
|
IMDbDict['rating'] = -1
|
2008-06-19 09:47:02 +00:00
|
|
|
|
else:
|
|
|
|
|
IMDbDict['rating'] = -1
|
|
|
|
|
#Votes
|
2009-05-31 18:12:43 +00:00
|
|
|
|
IMDbDict['votes'] = info['votes']
|
2008-06-19 09:47:02 +00:00
|
|
|
|
|
|
|
|
|
data = data.replace('\n',' ')
|
|
|
|
|
#some values
|
|
|
|
|
keys = ('runtime', 'language', 'genre', 'country', 'tagline', 'plot_outline', 'tv_series', 'also_known_as')
|
|
|
|
|
for key in keys:
|
|
|
|
|
IMDbDict[key] = ''
|
|
|
|
|
IMDbDict['runtime'] = 0
|
|
|
|
|
soup = BeautifulSoup(data)
|
|
|
|
|
for info in soup('div', {'class': 'info'}):
|
|
|
|
|
key = unicode(info).split('</h5>')[0].split('<h5>')
|
|
|
|
|
if len(key) > 1:
|
|
|
|
|
raw_value = unicode(info).split('</h5>')[1]
|
|
|
|
|
key = key[1][:-1].lower().replace(' ', '_')
|
|
|
|
|
if key in keys:
|
|
|
|
|
IMDbDict[key] = self.parse_raw_value(key, raw_value)
|
|
|
|
|
IMDbDict['title_english'] = IMDbDict.pop('also_known_as', IMDbDict['title'])
|
|
|
|
|
#is episode
|
|
|
|
|
IMDbDict['episode_of'] = IMDbDict.pop('tv_series', '')
|
|
|
|
|
|
|
|
|
|
IMDbDict['episodes'] = getMovieEpisodes(self.imdb)
|
|
|
|
|
if IMDbDict['episodes']:
|
|
|
|
|
IMDbDict['tvshow'] = True
|
|
|
|
|
else:
|
|
|
|
|
IMDbDict['tvshow'] = False
|
|
|
|
|
IMDbDict['credits'] = self.getCredits()
|
|
|
|
|
IMDbDict['plot'] = getMoviePlot(self.imdb)
|
|
|
|
|
IMDbDict['keywords'] = getMovieKeywords(self.imdb)
|
|
|
|
|
IMDbDict['trivia'] = getMovieTrivia(self.imdb)
|
|
|
|
|
IMDbDict['connections'] = getMovieConnections(self.imdb)
|
|
|
|
|
IMDbDict['locations'] = getMovieLocations(self.imdb)
|
|
|
|
|
IMDbDict['release_date'] = getMovieReleaseDate(self.imdb)
|
|
|
|
|
IMDbDict['business'] = getMovieBusinessSum(self.imdb)
|
|
|
|
|
IMDbDict['reviews'] = getMovieExternalReviews(self.imdb)
|
|
|
|
|
IMDbDict['stills'] = getMovieStills(self.imdb)
|
|
|
|
|
#IMDbDict['trailer'] = getMovieTrailer(self.imdb)
|
|
|
|
|
self.IMDbDict = IMDbDict
|
|
|
|
|
|
|
|
|
|
if IMDbDict['episode_of']:
|
2008-07-16 18:05:30 +00:00
|
|
|
|
episode_of = getMovieInfo(IMDbDict['episode_of'])
|
2008-06-19 09:47:02 +00:00
|
|
|
|
for key in ('country', 'language'):
|
|
|
|
|
if not IMDbDict[key]:
|
|
|
|
|
IMDbDict[key] = episode_of[key]
|
|
|
|
|
return self.IMDbDict
|
|
|
|
|
|
|
|
|
|
def getCredits(self):
|
|
|
|
|
raw_credits = getMovieCredits(self.imdb)
|
|
|
|
|
credits = {}
|
|
|
|
|
|
|
|
|
|
def getNames(creditList):
|
|
|
|
|
return [stripTags(decodeHtml(c[0])) for c in creditList]
|
|
|
|
|
|
|
|
|
|
credits['director'] = getNames(raw_credits.get('directors', ''))
|
|
|
|
|
credits['writer'] = getNames(raw_credits.get('writers', ''))
|
|
|
|
|
credits['producer'] = getNames(raw_credits.get('producers', ''))
|
2009-06-01 13:16:12 +00:00
|
|
|
|
credits['cinematographer'] = getNames(raw_credits.get('cinematographers', ''))
|
|
|
|
|
credits['editor'] = getNames(raw_credits.get('editors', ''))
|
2008-06-19 09:47:02 +00:00
|
|
|
|
credits['cast'] = [(stripTags(decodeHtml(c[0])),stripTags(decodeHtml(c[1]))) for c in raw_credits.get('cast', [])]
|
|
|
|
|
|
|
|
|
|
self.credits = credits
|
|
|
|
|
return self.credits
|
2008-04-29 16:12:27 +00:00
|
|
|
|
|
2008-04-28 09:52:21 +00:00
|
|
|
|
|
2008-06-19 09:47:02 +00:00
|
|
|
|
def guess(title, director=''):
|
|
|
|
|
#FIXME: proper file -> title
|
|
|
|
|
title = title.split('-')[0]
|
|
|
|
|
title = title.split('(')[0]
|
|
|
|
|
title = title.split('.')[0]
|
|
|
|
|
title = title.strip()
|
|
|
|
|
imdb_url = 'http://www.imdb.com/find?q=%s' % quote(title.encode('utf-8'))
|
|
|
|
|
return_url = ''
|
|
|
|
|
|
|
|
|
|
#lest first try google
|
|
|
|
|
#i.e. site:imdb.com Michael Stevens Sin
|
|
|
|
|
if director:
|
|
|
|
|
search = 'site:imdb.com %s "%s"' % (director, title)
|
2008-04-28 09:52:21 +00:00
|
|
|
|
else:
|
2008-06-19 09:47:02 +00:00
|
|
|
|
search = 'site:imdb.com "%s"' % title
|
|
|
|
|
for (name, url, desc) in google.find(search, 2):
|
|
|
|
|
if url.startswith('http://www.imdb.com/title/tt'):
|
2009-10-12 15:18:59 +00:00
|
|
|
|
return normalizeImdbId(int(oxlib.intValue(url)))
|
2008-04-28 09:52:21 +00:00
|
|
|
|
|
2008-06-19 09:47:02 +00:00
|
|
|
|
try:
|
2009-10-12 15:18:59 +00:00
|
|
|
|
req = urllib2.Request(imdb_url, None, oxlib.net.DEFAULT_HEADERS)
|
2008-06-19 09:47:02 +00:00
|
|
|
|
u = urllib2.urlopen(req)
|
|
|
|
|
data = u.read()
|
|
|
|
|
return_url = u.url
|
|
|
|
|
u.close()
|
|
|
|
|
except:
|
|
|
|
|
return None
|
|
|
|
|
if return_url.startswith('http://www.imdb.com/title/tt'):
|
|
|
|
|
return return_url[28:35]
|
|
|
|
|
if data:
|
|
|
|
|
imdb_id = findRe(data.replace('\n', ' '), 'Popular Results.*?<ol><li>.*?<a href="/title/tt(.......)')
|
|
|
|
|
if imdb_id:
|
|
|
|
|
return imdb_id
|
|
|
|
|
|
|
|
|
|
imdb_url = 'http://www.imdb.com/find?q=%s;s=tt;site=aka' % quote(title.encode('utf-8'))
|
2009-10-12 15:18:59 +00:00
|
|
|
|
req = urllib2.Request(imdb_url, None, oxlib.net.DEFAULT_HEADERS)
|
2008-04-28 09:52:21 +00:00
|
|
|
|
u = urllib2.urlopen(req)
|
|
|
|
|
data = u.read()
|
|
|
|
|
return_url = u.url
|
|
|
|
|
u.close()
|
2008-06-19 09:47:02 +00:00
|
|
|
|
if return_url.startswith('http://www.imdb.com/title/tt'):
|
|
|
|
|
return return_url[28:35]
|
|
|
|
|
|
2008-04-28 09:52:21 +00:00
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
def getEpisodeData(title, episode, show_url = None):
|
2008-06-19 09:47:02 +00:00
|
|
|
|
'''
|
|
|
|
|
Collect information about an episode.
|
|
|
|
|
|
|
|
|
|
Returns dict with title, show, description and episode
|
|
|
|
|
'''
|
|
|
|
|
episodeData = {
|
|
|
|
|
'title': u'',
|
|
|
|
|
'show': title,
|
|
|
|
|
'description': u'',
|
|
|
|
|
'episode': episode,
|
|
|
|
|
}
|
|
|
|
|
description = u''
|
|
|
|
|
if not show_url:
|
|
|
|
|
imdbid = guess(title)
|
|
|
|
|
else:
|
|
|
|
|
imdbid = "%07d" % int(re.compile('title/tt(\d*)').findall(link)[0])
|
|
|
|
|
if imdbid:
|
|
|
|
|
i = IMDb(imdbid).parse()
|
|
|
|
|
episodeData['title'] = i['episodes'][episode]['title']
|
|
|
|
|
episodeData['description'] = i['episodes'][episode]['description']
|
|
|
|
|
episodeData['imdb'] = i['episodes'][episode]['imdb']
|
|
|
|
|
return episodeData
|
2008-04-28 09:52:21 +00:00
|
|
|
|
|
2008-09-30 14:00:21 +00:00
|
|
|
|
def getPersonData(imdbId):
|
|
|
|
|
imdbId = normalizeImdbId(imdbId)
|
|
|
|
|
url = u'http://www.imdb.com/name/nm%s/' % imdbId
|
2009-10-12 11:47:43 +00:00
|
|
|
|
data = readUrlUnicode(url)
|
2008-09-30 14:00:21 +00:00
|
|
|
|
info = dict()
|
|
|
|
|
info['name'] = findRe(data, u'<title>(.*?)</title>')
|
|
|
|
|
filmo = data.split(u'<h3>Additional Details</h3>')[0]
|
|
|
|
|
movies = {}
|
|
|
|
|
for part in filmo.split(u'<div class="filmo"')[1:]:
|
|
|
|
|
section = findRe(part, u'a name=".*?">(.*?):</a></h5>')
|
|
|
|
|
section = decodeHtml(section)
|
|
|
|
|
movies[section] = re.compile(u'href="/title/tt(\d{7})/"').findall(part)
|
|
|
|
|
info['movies'] = movies
|
|
|
|
|
return info
|
|
|
|
|
|
2008-04-28 09:52:21 +00:00
|
|
|
|
if __name__ == '__main__':
|
2008-06-19 09:47:02 +00:00
|
|
|
|
import sys
|
|
|
|
|
#print parse(sys.argv[1])
|
|
|
|
|
print "imdb:", guess(sys.argv[1])
|
|
|
|
|
|