96 lines
3.5 KiB
Python
96 lines
3.5 KiB
Python
# vi:si:et:sw=4:sts=4:ts=4
|
|
# encoding: utf-8
|
|
import os
|
|
import re
|
|
|
|
from oxlib.cache import getUrlUnicode
|
|
from oxlib.html import stripTags
|
|
from oxlib.text import findRe
|
|
|
|
import imdb
|
|
|
|
|
|
def getMovieData(title = '', director = '', imdbId = ''):
|
|
data = {'posterUrls': []}
|
|
if not imdbId:
|
|
imdbId = imdb.getMovieId(title, director)
|
|
print imdbId
|
|
html = getUrlUnicode('http://impawards.com/archives/latest.html', timeout = 0)
|
|
pages = int(findRe(html, '<a href = page(.*?).html>'))
|
|
for page in range(pages + 1, 0, -1):
|
|
print page
|
|
if page <= pages:
|
|
html = getUrlUnicode('http://impawards.com/archives/page%s.html' % page, timeout = -1)
|
|
urls = parseArchivePage(html)
|
|
print urls
|
|
for url in urls:
|
|
html = getUrlUnicode(url)
|
|
d = parseMoviePage(html)
|
|
print d
|
|
if d['imdbId'] == imdbId:
|
|
data['posterUrls'].append(d['posterUrl'])
|
|
print d['posterUrl']
|
|
data['posterUrls'].sort()
|
|
return data
|
|
|
|
def parseArchivePage(html):
|
|
urls = []
|
|
results = re.compile('<a href = \.\./(.*?)>', re.DOTALL).findall(html)
|
|
for result in results:
|
|
urls.append('http://impawards.com/%s' % result)
|
|
return urls
|
|
|
|
def parseMoviePage(html):
|
|
data = {}
|
|
data['imdbId'] = findRe(html, 'imdb.com/title/tt(.*?) ')
|
|
data['title'] = stripTags(findRe(html, '<p class="name white">(.*?) \(<a href="alpha1.html">'))
|
|
data['year'] = findRe(html, '\(<a href="alpha1.html">(.*?)</a>\)')
|
|
result = findRe(html, '<a href = (\w*?_xlg.html)')
|
|
if result:
|
|
url = 'http://impawards.com/%s/%s' % (data['year'], result)
|
|
html = getUrlUnicode(url, timeout = -1)
|
|
data['posterUrl'] = 'http://impawards.com/%s/%s' % (data['year'], findRe(html, '<img SRC="(.*?)"'))
|
|
else:
|
|
data['posterUrl'] = 'http://impawards.com/%s/%s' % (data['year'], findRe(html, '<img src="(posters.*?)" alt='))
|
|
return data
|
|
|
|
def archivePosters():
|
|
import os
|
|
from oxlib.net import getUrl
|
|
pathname = '/Volumes/Rolux Home/Desktop/Data/impawards.com'
|
|
html = getUrlUnicode('http://impawards.com/archives/latest.html', timeout = 0)
|
|
pages = int(findRe(html, '<a href= page(.*?).html>'))
|
|
for page in range(pages + 1, 0, -1):
|
|
print "Page %d of %d" % (page, pages)
|
|
if page <= pages:
|
|
html = getUrlUnicode('http://impawards.com/archives/page%s.html' % page, timeout = -1)
|
|
urls = parseArchivePage(html)
|
|
for url in urls:
|
|
html = getUrlUnicode(url)
|
|
data = parseMoviePage(html)
|
|
print data
|
|
if '"' in data['posterUrl']:
|
|
print url
|
|
sys.exit()
|
|
dirname = '%s/%s/%s/%s' % (pathname, data['imdbId'][:1], data['imdbId'][:4], data['imdbId'])
|
|
filename = '%s/%s' % (dirname, os.path.split(data['posterUrl'])[1])
|
|
if not os.path.exists(filename):
|
|
jpg = getUrl(data['posterUrl'])
|
|
if not os.path.exists(dirname):
|
|
os.makedirs(dirname)
|
|
f = open(filename, 'w')
|
|
f.write(jpg)
|
|
f.close()
|
|
|
|
def cleanup():
|
|
for dirname, dirs, files in os.walk('/Volumes/Rolux Home/Desktop/Data/impawards.com'):
|
|
for filename in files:
|
|
if '"' in filename:
|
|
print filename
|
|
os.remove(dirname + '/' + filename)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
# cleanup()
|
|
archivePosters()
|
|
getMovieData('Brick', 'Rian Johnson')
|