updating criterion
This commit is contained in:
parent
c78864d3ab
commit
25997b68dd
2 changed files with 50 additions and 60 deletions
|
@ -10,41 +10,24 @@ from oxlib.text import findRe, removeSpecialCharacters
|
||||||
import imdb
|
import imdb
|
||||||
|
|
||||||
|
|
||||||
def getIds():
|
|
||||||
ids = []
|
|
||||||
html = getUrlUnicode("http://www.criterion.com/library/dvd")
|
|
||||||
results = re.compile("page=(.*?)\"").findall(html)
|
|
||||||
pages = int(results[len(results) - 2])
|
|
||||||
for page in range(1, pages + 1):
|
|
||||||
html = getUrlUnicode("http://www.criterion.com/library/dvd?page=" + str(page))
|
|
||||||
results = re.compile("films/(.*?)\"").findall(html)
|
|
||||||
for result in results:
|
|
||||||
ids.append(result)
|
|
||||||
results = re.compile("boxsets/(.*?)\"").findall(html)
|
|
||||||
for result in results:
|
|
||||||
html = getUrlUnicode("http://www.criterion.com/boxsets/" + result)
|
|
||||||
results = re.compile("films/(.*?)\"").findall(html)
|
|
||||||
for result in results:
|
|
||||||
ids.append(result)
|
|
||||||
return map(lambda id: str(id), sorted(map(lambda id: int(id), set(ids))))
|
|
||||||
|
|
||||||
def getData(id):
|
def getData(id):
|
||||||
'''
|
'''
|
||||||
>>> getData('1333')['imdbId']
|
>>> getData('1333')['imdbId']
|
||||||
'0060304'
|
'0060304'
|
||||||
|
|
||||||
>>> getData('236')['posterUrl']
|
>>> getData('236')['posters'][0]
|
||||||
'http://criterion_production.s3.amazonaws.com/release_images/1586/ThirdManReplace.jpg'
|
'http://criterion_production.s3.amazonaws.com/release_images/1586/ThirdManReplace.jpg'
|
||||||
|
|
||||||
>>> getData('786')['posterUrl']
|
>>> getData('786')['posters'][0]
|
||||||
'http://criterion_production.s3.amazonaws.com/product_images/185/343_box_348x490.jpg'
|
'http://criterion_production.s3.amazonaws.com/product_images/185/343_box_348x490.jpg'
|
||||||
'''
|
'''
|
||||||
data = {}
|
data = {
|
||||||
data['id'] = id
|
"url": getUrl(id)
|
||||||
|
}
|
||||||
try:
|
try:
|
||||||
html = getUrlUnicode("http://www.criterion.com/films/" + id)
|
html = getUrlUnicode(data["url"])
|
||||||
except:
|
except:
|
||||||
html = getUrl("http://www.criterion.com/films/" + id)
|
html = getUrl(data["url"])
|
||||||
data["number"] = findRe(html, "<p class=\"spinenumber\">(.*?)</p>")
|
data["number"] = findRe(html, "<p class=\"spinenumber\">(.*?)</p>")
|
||||||
data["title"] = findRe(html, "<h2 class=\"movietitle\">(.*?)</h2>")
|
data["title"] = findRe(html, "<h2 class=\"movietitle\">(.*?)</h2>")
|
||||||
data["director"] = findRe(html, "<h2 class=\"director\">(.*?)</h2>")
|
data["director"] = findRe(html, "<h2 class=\"director\">(.*?)</h2>")
|
||||||
|
@ -58,48 +41,51 @@ def getData(id):
|
||||||
result = re.compile("<div class=\"editioninfo\">(.*?)</div>", re.DOTALL).findall(html)[1]
|
result = re.compile("<div class=\"editioninfo\">(.*?)</div>", re.DOTALL).findall(html)[1]
|
||||||
result = findRe(result, "<a href=\"(.*?)\">")
|
result = findRe(result, "<a href=\"(.*?)\">")
|
||||||
if not "/boxsets/" in result:
|
if not "/boxsets/" in result:
|
||||||
data["posterUrl"] = result
|
data["posters"] = [result]
|
||||||
else:
|
else:
|
||||||
html_ = getUrlUnicode(result)
|
html_ = getUrlUnicode(result)
|
||||||
result = findRe(html_, "<a href=\"http://www.criterion.com/films/%s\">(.*?)</a>" % id)
|
result = findRe(html_, "<a href=\"http://www.criterion.com/films/%s\">(.*?)</a>" % id)
|
||||||
result = findRe(result, "src=\"(.*?)\"")
|
result = findRe(result, "src=\"(.*?)\"")
|
||||||
data["posterUrl"] = result.replace("_w100", "")
|
data["posters"] = [result.replace("_w100", "")]
|
||||||
result = findRe(html, "<img alt=\"Film Still\" height=\"252\" src=\"(.*?)\"")
|
result = findRe(html, "<img alt=\"Film Still\" height=\"252\" src=\"(.*?)\"")
|
||||||
if result:
|
if result:
|
||||||
data["stillUrl"] = result
|
data["stills"] = [result]
|
||||||
data["trailerUrl"] = ""
|
data["trailers"] = []
|
||||||
else:
|
else:
|
||||||
data["stillUrl"] = findRe(html, "\"thumbnailURL\", \"(.*?)\"")
|
data["stills"] = [findRe(html, "\"thumbnailURL\", \"(.*?)\"")]
|
||||||
data["trailerUrl"] = findRe(html, "\"videoURL\", \"(.*?)\"")
|
data["trailers"] = [findRe(html, "\"videoURL\", \"(.*?)\"")]
|
||||||
data['imdbId'] = imdb.getMovieId(data['title'], data['director'], data['year'])
|
data['imdbId'] = imdb.getMovieId(data['title'], data['director'], data['year'])
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def getPosterUrl(id):
|
def getId(url):
|
||||||
data = getData(id)
|
return url.split("/")[-1]
|
||||||
return data['posterUrl']
|
|
||||||
|
|
||||||
def getMovieId(title = '', director = '', year = '', imdbId = ''):
|
def getIds():
|
||||||
if not imdbId:
|
ids = []
|
||||||
imdbId = imdb.getMovieId(title, director, year)
|
html = getUrlUnicode("http://www.criterion.com/library/dvd")
|
||||||
ids = getIds()
|
results = re.compile("page=(.*?)\"").findall(html)
|
||||||
for id in ids:
|
pages = int(results[len(results) - 2])
|
||||||
data = getData(id)
|
for page in range(pages, 0, -1):
|
||||||
if imdb.getMovieId(data['title'], data['director'], data['year'] == imdbId):
|
for id in getIdsByPage(page):
|
||||||
return id
|
ids.append(id)
|
||||||
return ''
|
return map(lambda id: str(id), sorted(map(lambda id: int(id), set(ids))))
|
||||||
|
|
||||||
def getMovieData(title = '', director = '', year = '', imdbId = ''):
|
def getIdsByPage(page):
|
||||||
'''
|
ids = []
|
||||||
>>> getMovieData('Pierrot le fou', 'Jean-Luc Godard', '1965')['id']
|
html = getUrlUnicode("http://www.criterion.com/library/dvd?page=%s" % page)
|
||||||
'149'
|
results = re.compile("films/(.*?)\"").findall(html)
|
||||||
'''
|
for result in results:
|
||||||
data = {}
|
ids.append(result)
|
||||||
if not imdbId:
|
results = re.compile("boxsets/(.*?)\"").findall(html)
|
||||||
imdbId = imdb.getMovieId(title, director, year)
|
for result in results:
|
||||||
id = getMovieId(imdbId = imdbId)
|
html = getUrlUnicode("http://www.criterion.com/boxsets/" + result)
|
||||||
if id:
|
results = re.compile("films/(.*?)\"").findall(html)
|
||||||
data_ = getData(id)
|
for result in results:
|
||||||
data['id'] = data_['id']
|
ids.append(result)
|
||||||
data['posterUrl'] = data_['posterUrl']
|
return set(ids)
|
||||||
data['synopsis'] = data_['synopsis']
|
|
||||||
return data
|
def getUrl(id):
|
||||||
|
return "http://www.criterion.com/films/%s" % id
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
print getIds()
|
|
@ -57,11 +57,12 @@ def getId(url):
|
||||||
def getIds():
|
def getIds():
|
||||||
ids = []
|
ids = []
|
||||||
html = getUrlUnicode('http://www.impawards.com/archives/latest.html', timeout = 0)
|
html = getUrlUnicode('http://www.impawards.com/archives/latest.html', timeout = 0)
|
||||||
pages = int(findRe(html, '<a href= page(.*?).html>'))
|
pages = int(findRe(html, '<a href= page(.*?).html>')) + 1
|
||||||
for page in range(pages + 1, 0, -1):
|
for page in range(pages, 0, -1):
|
||||||
for id in getIdsByPage(page):
|
for id in getIdsByPage(page):
|
||||||
if not id in ids:
|
if not id in ids:
|
||||||
ids.append(id)
|
ids.append(id)
|
||||||
|
print sorted(ids), len(ids), "%d/%d" % (page, pages)
|
||||||
return ids
|
return ids
|
||||||
|
|
||||||
def getIdsByPage(page):
|
def getIdsByPage(page):
|
||||||
|
@ -79,3 +80,6 @@ def getUrl(id):
|
||||||
if findRe(html, "No Movie Posters on This Page"):
|
if findRe(html, "No Movie Posters on This Page"):
|
||||||
url = "http://www.impawards.com/%s_ver1.html" % id
|
url = "http://www.impawards.com/%s_ver1.html" % id
|
||||||
return url
|
return url
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
getIds()
|
||||||
|
|
Loading…
Reference in a new issue