update rottentomatoes and metacritic
This commit is contained in:
parent
a4271fd81a
commit
e74dc77ab1
2 changed files with 65 additions and 34 deletions
|
@ -2,10 +2,16 @@
|
||||||
# vi:si:et:sw=4:sts=4:ts=4
|
# vi:si:et:sw=4:sts=4:ts=4
|
||||||
import re
|
import re
|
||||||
from urllib import quote
|
from urllib import quote
|
||||||
|
from lxml.html import document_fromstring
|
||||||
|
|
||||||
from ox.cache import readUrl, readUrlUnicode
|
from ox.cache import readUrl, readUrlUnicode
|
||||||
from ox import findRe, decodeHtml, stripTags
|
from ox import findRe, stripTags
|
||||||
|
|
||||||
|
def getUrlByImdb(imdb):
|
||||||
|
url = "http://www.imdb.com/title/tt%s/criticreviews" % imdb
|
||||||
|
data = readUrl(url)
|
||||||
|
metacritic_url = findRe(data, '"(http://www.metacritic.com/movie/.*?)"')
|
||||||
|
return metacritic_url or None
|
||||||
|
|
||||||
def getMetacriticShowUrl(title):
|
def getMetacriticShowUrl(title):
|
||||||
title = quote(title)
|
title = quote(title)
|
||||||
|
@ -13,33 +19,39 @@ def getMetacriticShowUrl(title):
|
||||||
data = readUrl(url)
|
data = readUrl(url)
|
||||||
return findRe(data, '(http://www.metacritic.com/tv/shows/.*?)\?')
|
return findRe(data, '(http://www.metacritic.com/tv/shows/.*?)\?')
|
||||||
|
|
||||||
def getData(title, url=None):
|
def getData(url):
|
||||||
if not url:
|
|
||||||
url = getMetacriticShowUrl(title)
|
|
||||||
if not url:
|
|
||||||
return None
|
|
||||||
data = readUrlUnicode(url)
|
data = readUrlUnicode(url)
|
||||||
score = findRe(data, 'ALT="Metascore: (.*?)"')
|
doc = document_fromstring(data)
|
||||||
|
score = filter(lambda s: s.attrib.get('property') == 'v:average',
|
||||||
|
doc.xpath('//span[@class="score_value"]'))
|
||||||
if score:
|
if score:
|
||||||
score = int(score)
|
score = int(score[0].text)
|
||||||
else:
|
else:
|
||||||
score = -1
|
score = -1
|
||||||
|
authors = [a.text
|
||||||
reviews = re.compile(
|
for a in doc.xpath('//div[@class="review_content"]//div[@class="author"]//a')]
|
||||||
'<div class="scoreandreview"><div class="criticscore">(.*?)</div>'
|
publications = [d.text
|
||||||
'.*?<span class="publication">(.*?)</span>'
|
for d in doc.xpath('//div[@class="review_content"]//div[@class="source"]/a')]
|
||||||
'.*?<span class="criticname">(.*?)</span></div>'
|
reviews = [d.text
|
||||||
'.*?<div class="quote">(.*?)<br>'
|
for d in doc.xpath('//div[@class="review_content"]//div[@class="review_body"]')]
|
||||||
'.*?<a href="(.*?)" ', re.DOTALL).findall(data)
|
scores = [int(d.text.strip())
|
||||||
|
for d in doc.xpath('//div[@class="review_content"]//div[contains(@class, "critscore")]')]
|
||||||
|
links = [a.attrib['href']
|
||||||
|
for a in doc.xpath('//div[@class="review_content"]//a[contains(@class, "external")]')]
|
||||||
|
|
||||||
metacritics = []
|
metacritics = []
|
||||||
for review in reviews:
|
for i in range(len(authors)):
|
||||||
metacritics.append({
|
metacritics.append({
|
||||||
'score': int(review[0]),
|
'score': scores[i],
|
||||||
'publication':review[1],
|
'publication': publications[i],
|
||||||
'critic':decodeHtml(review[2]),
|
'critic': authors[i],
|
||||||
'quote': stripTags(review[3]).strip(),
|
'quote': stripTags(reviews[i]).strip(),
|
||||||
'link': review[4],
|
'link': links[i],
|
||||||
})
|
})
|
||||||
return dict(score = score, critics = metacritics, url = url)
|
|
||||||
|
return {
|
||||||
|
'score': score,
|
||||||
|
'critics': metacritics,
|
||||||
|
'url': url
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ from ox.cache import getHeaders, readUrl, readUrlUnicode
|
||||||
from ox import findRe, stripTags
|
from ox import findRe, stripTags
|
||||||
|
|
||||||
|
|
||||||
def readUrlByImdb(imdb):
|
def getUrlByImdb(imdb):
|
||||||
#this would also wor but does not cache:
|
#this would also wor but does not cache:
|
||||||
'''
|
'''
|
||||||
from urllib2 import urlopen
|
from urllib2 import urlopen
|
||||||
|
@ -21,14 +21,33 @@ def readUrlByImdb(imdb):
|
||||||
return "http://www.rottentomatoes.com" + movies[0]
|
return "http://www.rottentomatoes.com" + movies[0]
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def get_og(data, key):
|
||||||
|
return findRe(data, '<meta property="og:%s".*?content="(.*?)"' % key)
|
||||||
|
|
||||||
def getData(url):
|
def getData(url):
|
||||||
data = readUrlUnicode(url)
|
data = readUrl(url)
|
||||||
r = {}
|
r = {}
|
||||||
r['title'] = findRe(data, '<h1 class="movie_title">(.*?)</h1>')
|
r['title'] = findRe(data, '<h1 class="movie_title">(.*?)</h1>')
|
||||||
if '(' in r['title']:
|
if '(' in r['title']:
|
||||||
r['year'] = findRe(r['title'], '\((\d*?)\)')
|
r['year'] = findRe(r['title'], '\((\d*?)\)')
|
||||||
r['title'] = re.sub('\((\d*?)\)', '', r['title']).strip()
|
r['title'] = stripTags(re.sub('\((\d*?)\)', '', r['title'])).strip()
|
||||||
r['synopsis'] = findRe(data, '<span id="movie_synopsis_all".*?>(.*?)</span>')
|
r['summary'] = stripTags(findRe(data, '<p id="movieSynopsis" class="movie_synopsis" itemprop="description">(.*?)</p>')).strip()
|
||||||
r['average rating'] = findRe(data, '<div id="bubble_allCritics".*?>(.*?)</div>').strip()
|
r['summary'] = r['summary'].replace('\t', ' ').replace('\n', ' ').replace(' ', ' ').replace(' ', ' ')
|
||||||
|
if not r['summary']:
|
||||||
|
r['summary'] = get_og(data, 'description')
|
||||||
|
|
||||||
|
meter = re.compile('<span id="all-critics-meter" class="meter(.*?)">(.*?)</span>').findall(data)
|
||||||
|
meter = filter(lambda m: m[1].isdigit(), meter)
|
||||||
|
if meter:
|
||||||
|
r['tomatometer'] = meter[0][1]
|
||||||
|
r['rating'] = findRe(data, 'Average Rating: <span>([\d.]+)/10</span>')
|
||||||
|
r['user_score'] = findRe(data, '<span class="meter popcorn numeric ">(\d+)</span>')
|
||||||
|
r['user_rating'] = findRe(data, 'Average Rating: ([\d.]+)/5')
|
||||||
|
poster = get_og(data, 'image')
|
||||||
|
if poster and not 'poster_default.gif' in poster:
|
||||||
|
r['posters'] = [poster]
|
||||||
|
for key in r.keys():
|
||||||
|
if not r[key]:
|
||||||
|
del r[key]
|
||||||
return r
|
return r
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue