2010-07-07 23:25:57 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
|
|
|
import re
|
|
|
|
from urllib import quote
|
2012-07-08 11:09:58 +00:00
|
|
|
from lxml.html import document_fromstring
|
2010-07-07 23:25:57 +00:00
|
|
|
|
2012-08-14 13:58:05 +00:00
|
|
|
from ox.cache import read_url
|
2012-08-14 14:12:43 +00:00
|
|
|
from ox import find_re, strip_tags
|
2010-07-07 23:25:57 +00:00
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_url(id=None, imdb=None):
|
|
|
|
if imdb:
|
|
|
|
url = "http://www.imdb.com/title/tt%s/criticreviews" % imdb
|
|
|
|
data = read_url(url)
|
|
|
|
metacritic_url = find_re(data, '"(http://www.metacritic.com/movie/.*?)"')
|
|
|
|
return metacritic_url or None
|
2012-07-08 11:16:59 +00:00
|
|
|
return 'http://www.metacritic.com/movie/%s' % id
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_id(url):
|
2012-07-08 11:16:59 +00:00
|
|
|
return url.split('/')[-1]
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_show_url(title):
|
2010-07-07 23:25:57 +00:00
|
|
|
title = quote(title)
|
|
|
|
url = "http://www.metacritic.com/search/process?ty=6&ts=%s&tfs=tvshow_title&x=0&y=0&sb=0&release_date_s=&release_date_e=&metascore_s=&metascore_e=" % title
|
2012-08-14 13:58:05 +00:00
|
|
|
data = read_url(url)
|
2012-08-14 14:12:43 +00:00
|
|
|
return find_re(data, '(http://www.metacritic.com/tv/shows/.*?)\?')
|
2010-07-07 23:25:57 +00:00
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_data(url):
|
2012-08-14 13:58:05 +00:00
|
|
|
data = read_url(url, unicode=True)
|
2012-07-08 11:09:58 +00:00
|
|
|
doc = document_fromstring(data)
|
|
|
|
score = filter(lambda s: s.attrib.get('property') == 'v:average',
|
|
|
|
doc.xpath('//span[@class="score_value"]'))
|
|
|
|
if score:
|
|
|
|
score = int(score[0].text)
|
|
|
|
else:
|
|
|
|
score = -1
|
|
|
|
authors = [a.text
|
|
|
|
for a in doc.xpath('//div[@class="review_content"]//div[@class="author"]//a')]
|
2012-07-08 11:16:59 +00:00
|
|
|
sources = [d.text
|
2012-07-08 11:09:58 +00:00
|
|
|
for d in doc.xpath('//div[@class="review_content"]//div[@class="source"]/a')]
|
|
|
|
reviews = [d.text
|
|
|
|
for d in doc.xpath('//div[@class="review_content"]//div[@class="review_body"]')]
|
|
|
|
scores = [int(d.text.strip())
|
|
|
|
for d in doc.xpath('//div[@class="review_content"]//div[contains(@class, "critscore")]')]
|
2012-07-08 11:16:59 +00:00
|
|
|
urls = [a.attrib['href']
|
2012-07-08 11:09:58 +00:00
|
|
|
for a in doc.xpath('//div[@class="review_content"]//a[contains(@class, "external")]')]
|
2010-07-07 23:25:57 +00:00
|
|
|
|
2012-07-08 11:09:58 +00:00
|
|
|
metacritics = []
|
|
|
|
for i in range(len(authors)):
|
|
|
|
metacritics.append({
|
|
|
|
'critic': authors[i],
|
2012-07-08 11:16:59 +00:00
|
|
|
'url': urls[i],
|
|
|
|
'source': sources[i],
|
2012-08-14 13:58:05 +00:00
|
|
|
'quote': strip_tags(reviews[i]).strip(),
|
2012-07-08 11:16:59 +00:00
|
|
|
'score': scores[i],
|
2012-07-08 11:09:58 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
return {
|
2012-07-08 11:16:59 +00:00
|
|
|
'critics': metacritics,
|
2012-08-15 15:15:40 +00:00
|
|
|
'id': get_id(url),
|
2012-07-08 11:16:59 +00:00
|
|
|
'score': score,
|
|
|
|
'url': url,
|
2012-07-08 11:09:58 +00:00
|
|
|
}
|
2010-07-07 23:25:57 +00:00
|
|
|
|