python-oxweb/oxweb/impawards.py

97 lines
3.5 KiB
Python
Raw Normal View History

2008-06-19 09:47:02 +00:00
# vi:si:et:sw=4:sts=4:ts=4
# encoding: utf-8
2009-07-04 10:25:24 +00:00
import os
2008-05-09 11:21:42 +00:00
import re
2008-07-03 09:24:49 +00:00
from oxlib.cache import getUrlUnicode
from oxlib.html import stripTags
from oxlib.text import findRe
2008-05-09 11:21:42 +00:00
2008-07-03 09:21:18 +00:00
import imdb
2008-05-09 11:21:42 +00:00
def getMovieData(title = '', director = '', imdbId = ''):
data = {'posterUrls': []}
if not imdbId:
imdbId = imdb.getMovieId(title, director)
print imdbId
html = getUrlUnicode('http://impawards.com/archives/latest.html', timeout = 0)
pages = int(findRe(html, '<a href = page(.*?).html>'))
for page in range(pages + 1, 0, -1):
print page
if page <= pages:
html = getUrlUnicode('http://impawards.com/archives/page%s.html' % page, timeout = -1)
urls = parseArchivePage(html)
print urls
for url in urls:
html = getUrlUnicode(url)
d = parseMoviePage(html)
print d
if d['imdbId'] == imdbId:
data['posterUrls'].append(d['posterUrl'])
print d['posterUrl']
data['posterUrls'].sort()
return data
def parseArchivePage(html):
urls = []
results = re.compile('<a href = \.\./(.*?)>', re.DOTALL).findall(html)
for result in results:
urls.append('http://impawards.com/%s' % result)
return urls
def parseMoviePage(html):
data = {}
data['imdbId'] = findRe(html, 'imdb.com/title/tt(.*?) ')
2009-07-04 10:25:24 +00:00
data['title'] = stripTags(findRe(html, '<p class="name white">(.*?) \(<a href="alpha1.html">'))
data['year'] = findRe(html, '\(<a href="alpha1.html">(.*?)</a>\)')
result = findRe(html, '<a href = (\w*?_xlg.html)')
2008-05-09 11:54:41 +00:00
if result:
url = 'http://impawards.com/%s/%s' % (data['year'], result)
html = getUrlUnicode(url, timeout = -1)
2009-07-04 10:25:24 +00:00
data['posterUrl'] = 'http://impawards.com/%s/%s' % (data['year'], findRe(html, '<img SRC="(.*?)"'))
2008-05-09 11:54:41 +00:00
else:
2009-07-04 10:25:24 +00:00
data['posterUrl'] = 'http://impawards.com/%s/%s' % (data['year'], findRe(html, '<img src="(posters.*?)" alt='))
2008-05-09 11:21:42 +00:00
return data
2008-05-09 14:30:18 +00:00
def archivePosters():
import os
2008-07-03 09:24:49 +00:00
from oxlib.net import getUrl
2008-05-09 14:30:18 +00:00
pathname = '/Volumes/Rolux Home/Desktop/Data/impawards.com'
html = getUrlUnicode('http://impawards.com/archives/latest.html', timeout = 0)
2009-07-04 10:25:24 +00:00
pages = int(findRe(html, '<a href= page(.*?).html>'))
2008-05-09 14:30:18 +00:00
for page in range(pages + 1, 0, -1):
2009-07-04 10:25:24 +00:00
print "Page %d of %d" % (page, pages)
2008-05-09 14:30:18 +00:00
if page <= pages:
html = getUrlUnicode('http://impawards.com/archives/page%s.html' % page, timeout = -1)
urls = parseArchivePage(html)
for url in urls:
html = getUrlUnicode(url)
data = parseMoviePage(html)
2009-07-04 10:25:24 +00:00
print data
if '"' in data['posterUrl']:
print url
sys.exit()
dirname = '%s/%s/%s/%s' % (pathname, data['imdbId'][:1], data['imdbId'][:4], data['imdbId'])
2008-05-09 14:30:18 +00:00
filename = '%s/%s' % (dirname, os.path.split(data['posterUrl'])[1])
if not os.path.exists(filename):
jpg = getUrl(data['posterUrl'])
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(filename, 'w')
f.write(jpg)
f.close()
2009-07-04 10:25:24 +00:00
def cleanup():
for dirname, dirs, files in os.walk('/Volumes/Rolux Home/Desktop/Data/impawards.com'):
for filename in files:
if '"' in filename:
print filename
os.remove(dirname + '/' + filename)
2008-05-09 14:30:18 +00:00
2008-05-09 11:21:42 +00:00
if __name__ == '__main__':
2009-07-04 10:25:24 +00:00
# cleanup()
2008-05-09 14:30:18 +00:00
archivePosters()
2008-06-19 09:47:02 +00:00
getMovieData('Brick', 'Rian Johnson')