2008-06-19 09:47:02 +00:00
|
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
|
|
|
# encoding: utf-8
|
2008-05-09 11:21:42 +00:00
|
|
|
import re
|
|
|
|
|
2008-07-03 09:24:49 +00:00
|
|
|
from oxlib.cache import getUrlUnicode
|
|
|
|
from oxlib.html import stripTags
|
|
|
|
from oxlib.text import findRe
|
2008-05-09 11:21:42 +00:00
|
|
|
|
2008-07-03 09:21:18 +00:00
|
|
|
import imdb
|
|
|
|
|
2008-05-09 11:21:42 +00:00
|
|
|
|
|
|
|
def getMovieData(title = '', director = '', imdbId = ''):
|
|
|
|
data = {'posterUrls': []}
|
|
|
|
if not imdbId:
|
|
|
|
imdbId = imdb.getMovieId(title, director)
|
|
|
|
print imdbId
|
|
|
|
html = getUrlUnicode('http://impawards.com/archives/latest.html', timeout = 0)
|
|
|
|
pages = int(findRe(html, '<a href = page(.*?).html>'))
|
|
|
|
for page in range(pages + 1, 0, -1):
|
|
|
|
print page
|
|
|
|
if page <= pages:
|
|
|
|
html = getUrlUnicode('http://impawards.com/archives/page%s.html' % page, timeout = -1)
|
|
|
|
urls = parseArchivePage(html)
|
|
|
|
print urls
|
|
|
|
for url in urls:
|
|
|
|
html = getUrlUnicode(url)
|
|
|
|
d = parseMoviePage(html)
|
|
|
|
print d
|
|
|
|
if d['imdbId'] == imdbId:
|
|
|
|
data['posterUrls'].append(d['posterUrl'])
|
|
|
|
print d['posterUrl']
|
|
|
|
data['posterUrls'].sort()
|
|
|
|
return data
|
|
|
|
|
|
|
|
def parseArchivePage(html):
|
|
|
|
urls = []
|
|
|
|
results = re.compile('<a href = \.\./(.*?)>', re.DOTALL).findall(html)
|
|
|
|
for result in results:
|
|
|
|
urls.append('http://impawards.com/%s' % result)
|
|
|
|
return urls
|
|
|
|
|
|
|
|
def parseMoviePage(html):
|
|
|
|
data = {}
|
|
|
|
data['imdbId'] = findRe(html, 'imdb.com/title/tt(.*?) ')
|
2008-05-09 14:30:18 +00:00
|
|
|
data['title'] = stripTags(findRe(html, '<table WIDTH="400" BGCOLOR="#222222">(.*?) \(<a href="eligible.html">'))
|
2008-05-09 11:54:41 +00:00
|
|
|
data['year'] = findRe(html, '\(<a href="eligible.html">(.*?)</a>\)')
|
|
|
|
result = findRe(html, '<a href = (\w*?_xlg.html) target= _blank>')
|
|
|
|
if result:
|
|
|
|
url = 'http://impawards.com/%s/%s' % (data['year'], result)
|
|
|
|
html = getUrlUnicode(url, timeout = -1)
|
|
|
|
d = parsePosterPage(html, data['year'])
|
|
|
|
data['posterUrl'] = d['posterUrl']
|
|
|
|
else:
|
|
|
|
data['posterUrl'] = 'http://impawards.com/%s/%s' % (data['year'], findRe(html, '<td align=center><br><img SRC="(.*?)"'))
|
|
|
|
return data
|
|
|
|
|
|
|
|
def parsePosterPage(html, year):
|
|
|
|
data = {}
|
|
|
|
data['posterUrl'] = 'http://impawards.com/%s/%s' % (year, findRe(html, '<img SRC="(.*?)"'))
|
2008-05-09 11:21:42 +00:00
|
|
|
return data
|
|
|
|
|
2008-05-09 14:30:18 +00:00
|
|
|
def archivePosters():
|
|
|
|
import os
|
2008-07-03 09:24:49 +00:00
|
|
|
from oxlib.net import getUrl
|
2008-05-09 14:30:18 +00:00
|
|
|
pathname = '/Volumes/Rolux Home/Desktop/Data/impawards.com'
|
|
|
|
html = getUrlUnicode('http://impawards.com/archives/latest.html', timeout = 0)
|
|
|
|
pages = int(findRe(html, '<a href = page(.*?).html>'))
|
|
|
|
for page in range(pages + 1, 0, -1):
|
|
|
|
if page <= pages:
|
|
|
|
html = getUrlUnicode('http://impawards.com/archives/page%s.html' % page, timeout = -1)
|
|
|
|
urls = parseArchivePage(html)
|
|
|
|
print urls
|
|
|
|
for url in urls:
|
|
|
|
html = getUrlUnicode(url)
|
|
|
|
data = parseMoviePage(html)
|
|
|
|
dirname = '%s/%s/%s' % (pathname, data['imdbId'][:4], data['imdbId'])
|
|
|
|
filename = '%s/%s' % (dirname, os.path.split(data['posterUrl'])[1])
|
|
|
|
if not os.path.exists(filename):
|
|
|
|
jpg = getUrl(data['posterUrl'])
|
|
|
|
if not os.path.exists(dirname):
|
|
|
|
os.makedirs(dirname)
|
|
|
|
f = open(filename, 'w')
|
|
|
|
f.write(jpg)
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
|
2008-05-09 11:21:42 +00:00
|
|
|
if __name__ == '__main__':
|
2008-05-09 14:30:18 +00:00
|
|
|
archivePosters()
|
2008-06-19 09:47:02 +00:00
|
|
|
getMovieData('Brick', 'Rian Johnson')
|