2008-05-10 07:38:14 +00:00
|
|
|
# -*- coding: UTF-8 -*-
|
|
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
2008-05-08 08:25:44 +00:00
|
|
|
import re
|
|
|
|
|
2009-07-15 13:29:22 +00:00
|
|
|
import oxlib.cache
|
2008-07-03 09:24:49 +00:00
|
|
|
from oxlib.cache import getUrlUnicode
|
|
|
|
from oxlib.html import stripTags
|
|
|
|
from oxlib.text import findRe, removeSpecialCharacters
|
2008-05-10 07:38:14 +00:00
|
|
|
|
2008-07-03 09:21:18 +00:00
|
|
|
import imdb
|
|
|
|
|
2009-07-15 13:29:22 +00:00
|
|
|
def getId(url):
|
|
|
|
return url.split("/")[-1]
|
|
|
|
|
|
|
|
def getUrl(id):
|
|
|
|
return "http://www.criterion.com/films/%s" % id
|
2008-06-19 09:47:02 +00:00
|
|
|
|
2009-07-04 10:25:24 +00:00
|
|
|
def getData(id):
|
2008-05-10 07:38:14 +00:00
|
|
|
'''
|
2009-07-04 10:25:24 +00:00
|
|
|
>>> getData('1333')['imdbId']
|
|
|
|
'0060304'
|
|
|
|
|
2009-07-13 18:40:59 +00:00
|
|
|
>>> getData('236')['posters'][0]
|
2009-07-04 10:25:24 +00:00
|
|
|
'http://criterion_production.s3.amazonaws.com/release_images/1586/ThirdManReplace.jpg'
|
|
|
|
|
2009-07-13 18:40:59 +00:00
|
|
|
>>> getData('786')['posters'][0]
|
2009-07-04 10:25:24 +00:00
|
|
|
'http://criterion_production.s3.amazonaws.com/product_images/185/343_box_348x490.jpg'
|
2008-05-10 07:38:14 +00:00
|
|
|
'''
|
2009-07-13 18:40:59 +00:00
|
|
|
data = {
|
|
|
|
"url": getUrl(id)
|
|
|
|
}
|
2009-07-04 10:25:24 +00:00
|
|
|
try:
|
2009-07-13 18:40:59 +00:00
|
|
|
html = getUrlUnicode(data["url"])
|
2009-07-04 10:25:24 +00:00
|
|
|
except:
|
2009-07-15 13:29:22 +00:00
|
|
|
html = oxlib.cache.getUrl(data["url"])
|
2009-07-04 10:25:24 +00:00
|
|
|
data["number"] = findRe(html, "<p class=\"spinenumber\">(.*?)</p>")
|
|
|
|
data["title"] = findRe(html, "<h2 class=\"movietitle\">(.*?)</h2>")
|
|
|
|
data["director"] = findRe(html, "<h2 class=\"director\">(.*?)</h2>")
|
|
|
|
results = re.compile("<p><strong>(.*?)</strong></p>").findall(html)
|
|
|
|
data["country"] = results[0]
|
|
|
|
data["year"] = results[1]
|
|
|
|
result = findRe(html, "<div class=\"synopsis contentbox lightgray\">(.*?)</div>")
|
|
|
|
data["synopsis"] = findRe(result, "<p>(.*?)</p>")
|
|
|
|
result = findRe(html, "<div class=\"editioninfo\">(.*?)</div>")
|
|
|
|
if 'Blu-Ray' in result or 'Essential Art House DVD' in result:
|
|
|
|
result = re.compile("<div class=\"editioninfo\">(.*?)</div>", re.DOTALL).findall(html)[1]
|
|
|
|
result = findRe(result, "<a href=\"(.*?)\">")
|
|
|
|
if not "/boxsets/" in result:
|
2009-07-13 18:40:59 +00:00
|
|
|
data["posters"] = [result]
|
2009-07-04 10:25:24 +00:00
|
|
|
else:
|
|
|
|
html_ = getUrlUnicode(result)
|
|
|
|
result = findRe(html_, "<a href=\"http://www.criterion.com/films/%s\">(.*?)</a>" % id)
|
|
|
|
result = findRe(result, "src=\"(.*?)\"")
|
2009-07-13 18:40:59 +00:00
|
|
|
data["posters"] = [result.replace("_w100", "")]
|
2009-07-04 10:25:24 +00:00
|
|
|
result = findRe(html, "<img alt=\"Film Still\" height=\"252\" src=\"(.*?)\"")
|
|
|
|
if result:
|
2009-07-13 18:40:59 +00:00
|
|
|
data["stills"] = [result]
|
|
|
|
data["trailers"] = []
|
2009-07-04 10:25:24 +00:00
|
|
|
else:
|
2009-07-13 18:40:59 +00:00
|
|
|
data["stills"] = [findRe(html, "\"thumbnailURL\", \"(.*?)\"")]
|
|
|
|
data["trailers"] = [findRe(html, "\"videoURL\", \"(.*?)\"")]
|
2009-07-04 10:25:24 +00:00
|
|
|
data['imdbId'] = imdb.getMovieId(data['title'], data['director'], data['year'])
|
2008-05-10 07:38:14 +00:00
|
|
|
return data
|
|
|
|
|
2009-07-13 18:40:59 +00:00
|
|
|
def getIds():
|
|
|
|
ids = []
|
|
|
|
html = getUrlUnicode("http://www.criterion.com/library/dvd")
|
|
|
|
results = re.compile("page=(.*?)\"").findall(html)
|
|
|
|
pages = int(results[len(results) - 2])
|
|
|
|
for page in range(pages, 0, -1):
|
|
|
|
for id in getIdsByPage(page):
|
|
|
|
ids.append(id)
|
|
|
|
return map(lambda id: str(id), sorted(map(lambda id: int(id), set(ids))))
|
|
|
|
|
|
|
|
def getIdsByPage(page):
|
|
|
|
ids = []
|
|
|
|
html = getUrlUnicode("http://www.criterion.com/library/dvd?page=%s" % page)
|
|
|
|
results = re.compile("films/(.*?)\"").findall(html)
|
|
|
|
for result in results:
|
|
|
|
ids.append(result)
|
|
|
|
results = re.compile("boxsets/(.*?)\"").findall(html)
|
|
|
|
for result in results:
|
|
|
|
html = getUrlUnicode("http://www.criterion.com/boxsets/" + result)
|
|
|
|
results = re.compile("films/(.*?)\"").findall(html)
|
|
|
|
for result in results:
|
|
|
|
ids.append(result)
|
|
|
|
return set(ids)
|
2008-05-10 09:10:23 +00:00
|
|
|
|
2009-07-13 18:40:59 +00:00
|
|
|
if __name__ == '__main__':
|
2009-07-15 13:29:22 +00:00
|
|
|
print getIds()
|