python-ox/ox/web/criterion.py

105 lines
3.8 KiB
Python
Raw Normal View History

2010-07-07 23:25:57 +00:00
# -*- coding: UTF-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
import re
import ox.cache
from ox.cache import read_url
from ox.html import strip_tags
from ox.text import find_re, remove_special_characters
2010-07-07 23:25:57 +00:00
import imdb
2012-08-15 15:15:40 +00:00
def get_id(url):
2010-07-07 23:25:57 +00:00
return url.split("/")[-1]
2012-08-15 15:15:40 +00:00
def get_url(id):
2010-07-07 23:25:57 +00:00
return "http://www.criterion.com/films/%s" % id
2012-08-15 15:15:40 +00:00
def get_data(id, timeout=ox.cache.cache_timeout, get_imdb=False):
2010-07-07 23:25:57 +00:00
'''
2012-08-15 15:15:40 +00:00
>>> get_data('1333')['imdbId']
2010-09-03 21:19:19 +00:00
u'0060304'
2010-07-07 23:25:57 +00:00
2012-08-15 15:15:40 +00:00
>>> get_data('236')['posters'][0]
2010-09-03 21:19:19 +00:00
u'http://criterion_production.s3.amazonaws.com/release_images/1586/ThirdManReplace.jpg'
2010-07-07 23:25:57 +00:00
2012-08-15 15:15:40 +00:00
>>> get_data('786')['posters'][0]
2010-09-03 21:19:19 +00:00
u'http://criterion_production.s3.amazonaws.com/product_images/185/343_box_348x490.jpg'
2010-07-07 23:25:57 +00:00
'''
data = {
2012-08-15 15:15:40 +00:00
"url": get_url(id)
2010-07-07 23:25:57 +00:00
}
try:
html = read_url(data["url"], timeout=timeout, unicode=True)
2010-07-07 23:25:57 +00:00
except:
html = ox.cache.read_url(data["url"], timeout=timeout)
data["number"] = find_re(html, "<li>Spine #(\d+)")
2011-02-27 15:33:25 +00:00
data["title"] = find_re(html, "<meta property=['\"]og:title['\"] content=['\"](.*?)['\"]")
2011-03-09 12:10:20 +00:00
data["title"] = data["title"].split(u' \u2014 The Television Version')[0]
data["director"] = strip_tags(find_re(html, "<h2 class=\"director\">(.*?)</h2>"))
results = find_re(html, '<div class="left_column">(.*?)</div>')
2010-07-18 18:57:22 +00:00
results = re.compile("<li>(.*?)</li>").findall(results)
2010-07-07 23:25:57 +00:00
data["country"] = results[0]
data["year"] = results[1]
data["synopsis"] = strip_tags(find_re(html, "<p><strong>SYNOPSIS:</strong> (.*?)</p>"))
2010-07-18 18:57:22 +00:00
result = find_re(html, "<div class=\"purchase\">(.*?)</div>")
2010-07-07 23:25:57 +00:00
if 'Blu-Ray' in result or 'Essential Art House DVD' in result:
r = re.compile('<h3 class="section_title first">Other Editions</h3>(.*?)</div>', re.DOTALL).findall(html)
if r:
result = r[0]
result = find_re(result, "<a href=\"(.*?)\"")
2010-07-07 23:25:57 +00:00
if not "/boxsets/" in result:
data["posters"] = [result]
else:
html_ = read_url(result, unicode=True)
result = find_re(html_, '<a href="http://www.criterion.com/films/%s.*?">(.*?)</a>' % id)
result = find_re(result, "src=\"(.*?)\"")
if result:
data["posters"] = [result.replace("_w100", "")]
else:
data["posters"] = []
result = find_re(html, "<img alt=\"Film Still\" height=\"252\" src=\"(.*?)\"")
2010-07-07 23:25:57 +00:00
if result:
data["stills"] = [result]
data["trailers"] = []
else:
data["stills"] = filter(lambda x: x, [find_re(html, "\"thumbnailURL\", \"(.*?)\"")])
data["trailers"] = filter(lambda x: x, [find_re(html, "\"videoURL\", \"(.*?)\"")])
2010-09-03 21:19:19 +00:00
if timeout == ox.cache.cache_timeout:
timeout = -1
2011-11-01 21:53:36 +00:00
if get_imdb:
2012-08-15 15:15:40 +00:00
data['imdbId'] = imdb.get_movie_id(data['title'],
2011-11-01 17:37:45 +00:00
data['director'], data['year'], timeout=timeout)
2010-07-07 23:25:57 +00:00
return data
2012-08-15 15:15:40 +00:00
def get_ids():
2010-07-07 23:25:57 +00:00
ids = []
html = read_url("http://www.criterion.com/library/expanded_view?m=dvd&p=1&pp=50&s=spine", unicode=True)
2011-02-23 10:12:37 +00:00
results = re.compile("\&amp;p=(\d+)\&").findall(html)
pages = max(map(int, results))
for page in range(1, pages):
2012-08-15 15:15:40 +00:00
for id in get_idsByPage(page):
2010-07-07 23:25:57 +00:00
ids.append(id)
return map(lambda id: str(id), sorted(map(lambda id: int(id), set(ids))))
2012-08-15 15:15:40 +00:00
def get_idsByPage(page):
2010-07-07 23:25:57 +00:00
ids = []
2011-11-01 21:53:36 +00:00
url = "http://www.criterion.com/library/expanded_view?m=dvd&p=%s&pp=50&s=spine" % page
html = read_url(url, unicode=True)
2010-07-18 18:39:26 +00:00
results = re.compile("films/(\d+)").findall(html)
2010-07-07 23:25:57 +00:00
for result in results:
ids.append(result)
results = re.compile("boxsets/(.*?)\"").findall(html)
for result in results:
html = read_url("http://www.criterion.com/boxsets/" + result, unicode=True)
2010-07-18 18:39:26 +00:00
results = re.compile("films/(\d+)").findall(html)
2010-07-07 23:25:57 +00:00
for result in results:
ids.append(result)
return set(ids)
if __name__ == '__main__':
2012-08-15 15:15:40 +00:00
print get_ids()