From 80cb6ac7fb292fabab2f02038f92be0d39fd459e Mon Sep 17 00:00:00 2001 From: j Date: Mon, 21 May 2018 10:59:50 +0200 Subject: [PATCH] criterion ids --- ox/web/criterion.py | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/ox/web/criterion.py b/ox/web/criterion.py index 4f41279..eed45a4 100644 --- a/ox/web/criterion.py +++ b/ox/web/criterion.py @@ -96,23 +96,16 @@ def get_data(id, timeout=ox.cache.cache_timeout, get_imdb=False): def get_ids(page=None): ids = [] - if page: - url = "http://www.criterion.com/library/expanded_view?m=dvd&p=%s&pp=50&s=spine" % page - html = read_url(url, unicode=True) - results = re.compile("films/(\d+)").findall(html) + html = read_url("https://www.criterion.com/shop/browse/list?sort=spine_number", unicode=True) + results = re.compile("films/(\d+)-").findall(html) + ids += results + results = re.compile("boxsets/(.*?)\"").findall(html) + for result in results: + html = read_url("http://www.criterion.com/boxsets/" + result, unicode=True) + results = re.compile("films/(\d+)-").findall(html) ids += results - results = re.compile("boxsets/(.*?)\"").findall(html) - for result in results: - html = read_url("http://www.criterion.com/boxsets/" + result, unicode=True) - results = re.compile("films/(\d+)").findall(html) - ids += results - return set(ids) - html = read_url("http://www.criterion.com/library/expanded_view?m=dvd&p=1&pp=50&s=spine", unicode=True) - results = re.compile("\&p=(\d+)\&").findall(html) - pages = max(map(int, results)) - for page in range(1, pages): - ids += get_ids(page) return sorted(set(ids), key=int) + if __name__ == '__main__': print(get_ids())