cleaning up impawards module

This commit is contained in:
Rolux 2009-07-13 19:55:28 +02:00
parent 928e6a4769
commit 04d9d7c500

View file

@ -9,88 +9,80 @@ from oxlib.text import findRe
import imdb import imdb
def getData(id):
'''
>>> getData('1991/silence_of_the_lambs')['imdbId']
u'0102926'
def getMovieData(title = '', director = '', imdbId = ''): >>> getData('1991/silence_of_the_lambs')['posters'][0]
data = {'posterUrls': []} u'http://www.impawards.com/1991/posters/silence_of_the_lambs_ver1_xlg.jpg'
if not imdbId:
imdbId = imdb.getMovieId(title, director)
print imdbId
html = getUrlUnicode('http://impawards.com/archives/latest.html', timeout = 0)
pages = int(findRe(html, '<a href = page(.*?).html>'))
for page in range(pages + 1, 0, -1):
print page
if page <= pages:
html = getUrlUnicode('http://impawards.com/archives/page%s.html' % page, timeout = -1)
urls = parseArchivePage(html)
print urls
for url in urls:
html = getUrlUnicode(url)
d = parseMoviePage(html)
print d
if d['imdbId'] == imdbId:
data['posterUrls'].append(d['posterUrl'])
print d['posterUrl']
data['posterUrls'].sort()
return data
def parseArchivePage(html):
urls = []
results = re.compile('<a href = \.\./(.*?)>', re.DOTALL).findall(html)
for result in results:
urls.append('http://impawards.com/%s' % result)
return urls
def parseMoviePage(html): >>> getData('1991/silence_of_the_lambs')['url']
data = {} u'http://www.impawards.com/1991/silence_of_the_lambs_ver1.html'
'''
data = {
'url': getUrl(id)
}
html = getUrlUnicode(data['url'])
data['imdbId'] = findRe(html, 'imdb.com/title/tt(.*?) ') data['imdbId'] = findRe(html, 'imdb.com/title/tt(.*?) ')
data['title'] = stripTags(findRe(html, '<p class="name white">(.*?) \(<a href="alpha1.html">')) data['title'] = stripTags(findRe(html, '<p class="name white">(.*?) \(<a href="alpha1.html">'))
data['year'] = findRe(html, '\(<a href="alpha1.html">(.*?)</a>\)') data['year'] = findRe(html, '\(<a href="alpha1.html">(.*?)</a>\)')
result = findRe(html, '<a href = (\w*?_xlg.html)') data['posters'] = []
if result: results = re.compile('<a href = (%s.*?html)' % id[5:], re.DOTALL).findall(html)
url = 'http://impawards.com/%s/%s' % (data['year'], result) for result in results:
html = getUrlUnicode(url, timeout = -1) result = result.replace('_xlg.html', '.html')
data['posterUrl'] = 'http://impawards.com/%s/%s' % (data['year'], findRe(html, '<img SRC="(.*?)"')) print result
else: url = 'http://www.impawards.com/%s/%s' % (data['year'], result)
data['posterUrl'] = 'http://impawards.com/%s/%s' % (data['year'], findRe(html, '<img src="(posters.*?)" alt=')) html = getUrlUnicode(url)
result = findRe(html, '<a href = (\w*?_xlg.html)')
if result:
url = 'http://www.impawards.com/%s/%s' % (data['year'], result)
html = getUrlUnicode(url)
poster = 'http://www.impawards.com/%s/%s' % (data['year'], findRe(html, '<img SRC="(.*?)"'))
else:
poster = 'http://www.impawards.com/%s/%s' % (data['year'], findRe(html, '<img src="(posters.*?)" alt='))
data['posters'].append(poster)
return data return data
def archivePosters(): def getId(url):
import os split = url.split('/')
from oxlib.net import getUrl year = split[3]
pathname = '/Volumes/Rolux Home/Desktop/Data/impawards.com' split = split[4][:-5].split('_')
html = getUrlUnicode('http://impawards.com/archives/latest.html', timeout = 0) if split[-1] == 'xlg':
split.pop()
if findRe(split[-1], 'ver\d+$'):
split.pop()
id = '%s/%s' % (year, '_'.join(split))
return id
def getIds():
ids = []
html = getUrlUnicode('http://www.impawards.com/archives/latest.html', timeout = 0)
pages = int(findRe(html, '<a href= page(.*?).html>')) pages = int(findRe(html, '<a href= page(.*?).html>'))
for page in range(pages + 1, 0, -1): for page in range(pages + 1, 0, -1):
print "Page %d of %d" % (page, pages) for id in getIdsByPage(page):
if page <= pages: if not id in ids:
html = getUrlUnicode('http://impawards.com/archives/page%s.html' % page, timeout = -1) ids.append(id)
urls = parseArchivePage(html) return ids
for url in urls:
html = getUrlUnicode(url)
data = parseMoviePage(html)
print data
if '"' in data['posterUrl']:
print url
sys.exit()
dirname = '%s/%s/%s/%s' % (pathname, data['imdbId'][:1], data['imdbId'][:4], data['imdbId'])
filename = '%s/%s' % (dirname, os.path.split(data['posterUrl'])[1])
if not os.path.exists(filename):
jpg = getUrl(data['posterUrl'])
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(filename, 'w')
f.write(jpg)
f.close()
def cleanup():
for dirname, dirs, files in os.walk('/Volumes/Rolux Home/Desktop/Data/impawards.com'):
for filename in files:
if '"' in filename:
print filename
os.remove(dirname + '/' + filename)
def getIdsByPage(page):
ids = []
html = getUrlUnicode('http://www.impawards.com/archives/page%s.html' % page, timeout = -1)
results = re.compile('<a href = \.\./(.*?)>', re.DOTALL).findall(html)
for result in results:
url = 'http://impawards.com/%s' % result
ids.append(getId(url))
return set(ids)
def getUrl(id):
url = "http://www.impawards.com/%s.html" % id
html = getUrlUnicode(url)
if findRe(html, "No Movie Posters on This Page"):
url = "http://www.impawards.com/%s_ver1.html" % id
return url
if __name__ == '__main__': if __name__ == '__main__':
# cleanup() print getData("1982/blade_runner")
archivePosters() print getData("1991/silence_of_the_lambs")
getMovieData('Brick', 'Rian Johnson') print getData("1999/matrix")
getIds()