impawards sample app
This commit is contained in:
parent
2fe6a9e989
commit
ae6ab0d0a7
1 changed files with 28 additions and 1 deletions
|
@ -2,6 +2,7 @@ import re
|
||||||
|
|
||||||
import ox.imdb as imdb
|
import ox.imdb as imdb
|
||||||
from oxutils.cache import getUrlUnicode
|
from oxutils.cache import getUrlUnicode
|
||||||
|
from oxutils.html import stripTags
|
||||||
from oxutils.text import findRe
|
from oxutils.text import findRe
|
||||||
|
|
||||||
|
|
||||||
|
@ -38,7 +39,7 @@ def parseArchivePage(html):
|
||||||
def parseMoviePage(html):
|
def parseMoviePage(html):
|
||||||
data = {}
|
data = {}
|
||||||
data['imdbId'] = findRe(html, 'imdb.com/title/tt(.*?) ')
|
data['imdbId'] = findRe(html, 'imdb.com/title/tt(.*?) ')
|
||||||
data['title'] = findRe(html, '<font size=\+3>(.*?) \(')
|
data['title'] = stripTags(findRe(html, '<table WIDTH="400" BGCOLOR="#222222">(.*?) \(<a href="eligible.html">'))
|
||||||
data['year'] = findRe(html, '\(<a href="eligible.html">(.*?)</a>\)')
|
data['year'] = findRe(html, '\(<a href="eligible.html">(.*?)</a>\)')
|
||||||
result = findRe(html, '<a href = (\w*?_xlg.html) target= _blank>')
|
result = findRe(html, '<a href = (\w*?_xlg.html) target= _blank>')
|
||||||
if result:
|
if result:
|
||||||
|
@ -55,5 +56,31 @@ def parsePosterPage(html, year):
|
||||||
data['posterUrl'] = 'http://impawards.com/%s/%s' % (year, findRe(html, '<img SRC="(.*?)"'))
|
data['posterUrl'] = 'http://impawards.com/%s/%s' % (year, findRe(html, '<img SRC="(.*?)"'))
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
def archivePosters():
|
||||||
|
import os
|
||||||
|
from oxutils.net import getUrl
|
||||||
|
pathname = '/Volumes/Rolux Home/Desktop/Data/impawards.com'
|
||||||
|
html = getUrlUnicode('http://impawards.com/archives/latest.html', timeout = 0)
|
||||||
|
pages = int(findRe(html, '<a href = page(.*?).html>'))
|
||||||
|
for page in range(pages + 1, 0, -1):
|
||||||
|
if page <= pages:
|
||||||
|
html = getUrlUnicode('http://impawards.com/archives/page%s.html' % page, timeout = -1)
|
||||||
|
urls = parseArchivePage(html)
|
||||||
|
print urls
|
||||||
|
for url in urls:
|
||||||
|
html = getUrlUnicode(url)
|
||||||
|
data = parseMoviePage(html)
|
||||||
|
dirname = '%s/%s/%s' % (pathname, data['imdbId'][:4], data['imdbId'])
|
||||||
|
filename = '%s/%s' % (dirname, os.path.split(data['posterUrl'])[1])
|
||||||
|
if not os.path.exists(filename):
|
||||||
|
jpg = getUrl(data['posterUrl'])
|
||||||
|
if not os.path.exists(dirname):
|
||||||
|
os.makedirs(dirname)
|
||||||
|
f = open(filename, 'w')
|
||||||
|
f.write(jpg)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
archivePosters()
|
||||||
getMovieData('Brick', 'Rian Johnson')
|
getMovieData('Brick', 'Rian Johnson')
|
Loading…
Reference in a new issue