cleaning up impawards module

This commit is contained in:
Rolux 2009-07-13 19:55:28 +02:00
parent 928e6a4769
commit 04d9d7c500

View file

@ -9,88 +9,80 @@ from oxlib.text import findRe
import imdb
def getData(id):
'''
>>> getData('1991/silence_of_the_lambs')['imdbId']
u'0102926'
def getMovieData(title = '', director = '', imdbId = ''):
data = {'posterUrls': []}
if not imdbId:
imdbId = imdb.getMovieId(title, director)
print imdbId
html = getUrlUnicode('http://impawards.com/archives/latest.html', timeout = 0)
pages = int(findRe(html, '<a href = page(.*?).html>'))
for page in range(pages + 1, 0, -1):
print page
if page <= pages:
html = getUrlUnicode('http://impawards.com/archives/page%s.html' % page, timeout = -1)
urls = parseArchivePage(html)
print urls
for url in urls:
html = getUrlUnicode(url)
d = parseMoviePage(html)
print d
if d['imdbId'] == imdbId:
data['posterUrls'].append(d['posterUrl'])
print d['posterUrl']
data['posterUrls'].sort()
return data
>>> getData('1991/silence_of_the_lambs')['posters'][0]
u'http://www.impawards.com/1991/posters/silence_of_the_lambs_ver1_xlg.jpg'
def parseArchivePage(html):
urls = []
results = re.compile('<a href = \.\./(.*?)>', re.DOTALL).findall(html)
for result in results:
urls.append('http://impawards.com/%s' % result)
return urls
def parseMoviePage(html):
data = {}
>>> getData('1991/silence_of_the_lambs')['url']
u'http://www.impawards.com/1991/silence_of_the_lambs_ver1.html'
'''
data = {
'url': getUrl(id)
}
html = getUrlUnicode(data['url'])
data['imdbId'] = findRe(html, 'imdb.com/title/tt(.*?) ')
data['title'] = stripTags(findRe(html, '<p class="name white">(.*?) \(<a href="alpha1.html">'))
data['year'] = findRe(html, '\(<a href="alpha1.html">(.*?)</a>\)')
result = findRe(html, '<a href = (\w*?_xlg.html)')
if result:
url = 'http://impawards.com/%s/%s' % (data['year'], result)
html = getUrlUnicode(url, timeout = -1)
data['posterUrl'] = 'http://impawards.com/%s/%s' % (data['year'], findRe(html, '<img SRC="(.*?)"'))
else:
data['posterUrl'] = 'http://impawards.com/%s/%s' % (data['year'], findRe(html, '<img src="(posters.*?)" alt='))
data['posters'] = []
results = re.compile('<a href = (%s.*?html)' % id[5:], re.DOTALL).findall(html)
for result in results:
result = result.replace('_xlg.html', '.html')
print result
url = 'http://www.impawards.com/%s/%s' % (data['year'], result)
html = getUrlUnicode(url)
result = findRe(html, '<a href = (\w*?_xlg.html)')
if result:
url = 'http://www.impawards.com/%s/%s' % (data['year'], result)
html = getUrlUnicode(url)
poster = 'http://www.impawards.com/%s/%s' % (data['year'], findRe(html, '<img SRC="(.*?)"'))
else:
poster = 'http://www.impawards.com/%s/%s' % (data['year'], findRe(html, '<img src="(posters.*?)" alt='))
data['posters'].append(poster)
return data
def archivePosters():
import os
from oxlib.net import getUrl
pathname = '/Volumes/Rolux Home/Desktop/Data/impawards.com'
html = getUrlUnicode('http://impawards.com/archives/latest.html', timeout = 0)
def getId(url):
split = url.split('/')
year = split[3]
split = split[4][:-5].split('_')
if split[-1] == 'xlg':
split.pop()
if findRe(split[-1], 'ver\d+$'):
split.pop()
id = '%s/%s' % (year, '_'.join(split))
return id
def getIds():
ids = []
html = getUrlUnicode('http://www.impawards.com/archives/latest.html', timeout = 0)
pages = int(findRe(html, '<a href= page(.*?).html>'))
for page in range(pages + 1, 0, -1):
print "Page %d of %d" % (page, pages)
if page <= pages:
html = getUrlUnicode('http://impawards.com/archives/page%s.html' % page, timeout = -1)
urls = parseArchivePage(html)
for url in urls:
html = getUrlUnicode(url)
data = parseMoviePage(html)
print data
if '"' in data['posterUrl']:
print url
sys.exit()
dirname = '%s/%s/%s/%s' % (pathname, data['imdbId'][:1], data['imdbId'][:4], data['imdbId'])
filename = '%s/%s' % (dirname, os.path.split(data['posterUrl'])[1])
if not os.path.exists(filename):
jpg = getUrl(data['posterUrl'])
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(filename, 'w')
f.write(jpg)
f.close()
for id in getIdsByPage(page):
if not id in ids:
ids.append(id)
return ids
def cleanup():
for dirname, dirs, files in os.walk('/Volumes/Rolux Home/Desktop/Data/impawards.com'):
for filename in files:
if '"' in filename:
print filename
os.remove(dirname + '/' + filename)
def getIdsByPage(page):
ids = []
html = getUrlUnicode('http://www.impawards.com/archives/page%s.html' % page, timeout = -1)
results = re.compile('<a href = \.\./(.*?)>', re.DOTALL).findall(html)
for result in results:
url = 'http://impawards.com/%s' % result
ids.append(getId(url))
return set(ids)
def getUrl(id):
url = "http://www.impawards.com/%s.html" % id
html = getUrlUnicode(url)
if findRe(html, "No Movie Posters on This Page"):
url = "http://www.impawards.com/%s_ver1.html" % id
return url
if __name__ == '__main__':
# cleanup()
archivePosters()
getMovieData('Brick', 'Rian Johnson')
print getData("1982/blade_runner")
print getData("1991/silence_of_the_lambs")
print getData("1999/matrix")
getIds()