2010-07-07 23:25:57 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
|
|
|
import re
|
|
|
|
import time
|
|
|
|
|
2012-08-14 14:12:43 +00:00
|
|
|
from ox import strip_tags, find_re
|
2012-08-14 13:58:05 +00:00
|
|
|
from ox.cache import read_url
|
2010-07-07 23:25:57 +00:00
|
|
|
|
|
|
|
|
|
|
|
def getId(url):
|
2012-07-08 12:16:57 +00:00
|
|
|
return url.split("/")[-1]
|
2010-07-07 23:25:57 +00:00
|
|
|
|
|
|
|
def getData(id):
|
|
|
|
'''
|
|
|
|
>>> getData('129689')['cast'][1][1]
|
|
|
|
u'Marianne'
|
|
|
|
>>> getData('129689')['credits'][0][0]
|
|
|
|
u'Jean-Luc Godard'
|
|
|
|
>>> getData('129689')['posters'][0]
|
|
|
|
u'http://image.allmusic.com/00/adg/cov200/dru800/u812/u81260bbffr.jpg'
|
|
|
|
>>> getData('129689')['rating']
|
|
|
|
u'4.5'
|
|
|
|
'''
|
2012-07-08 12:16:57 +00:00
|
|
|
if id.startswith('http'):
|
|
|
|
id = getId(id)
|
2010-07-07 23:25:57 +00:00
|
|
|
data = {
|
|
|
|
"url": getUrl(id)
|
|
|
|
}
|
2012-08-14 13:58:05 +00:00
|
|
|
html = read_url(data["url"], unicode=True)
|
2010-07-07 23:25:57 +00:00
|
|
|
data['aka'] = parseList(html, 'AKA')
|
2012-08-14 14:12:43 +00:00
|
|
|
data['category'] = find_re(html, '<dt>category</dt>.*?<dd>(.*?)</dd>')
|
2012-07-08 12:16:57 +00:00
|
|
|
data['countries'] = parseList(html, 'countries')
|
|
|
|
data['director'] = parseEntry(html, 'directed by')
|
|
|
|
data['genres'] = parseList(html, 'genres')
|
|
|
|
data['keywords'] = parseList(html, 'keywords')
|
2012-08-14 14:12:43 +00:00
|
|
|
data['posters'] = [find_re(html, '<img src="(http://cps-.*?)"')]
|
2012-07-08 12:16:57 +00:00
|
|
|
data['produced'] = parseList(html, 'produced by')
|
2012-08-14 14:12:43 +00:00
|
|
|
data['rating'] = find_re(html, 'Stars" title="(.*?) Stars"')
|
2012-07-08 12:16:57 +00:00
|
|
|
data['released'] = parseEntry(html, 'released by')
|
|
|
|
data['releasedate'] = parseList(html, 'release date')
|
|
|
|
data['runtime'] = parseEntry(html, 'run time').replace('min.', '').strip()
|
|
|
|
data['set'] = parseEntry(html, 'set in')
|
2012-08-14 14:12:43 +00:00
|
|
|
data['synopsis'] = strip_tags(find_re(html, '<div class="toggle-text" itemprop="description">(.*?)</div>')).strip()
|
2012-07-08 12:16:57 +00:00
|
|
|
data['themes'] = parseList(html, 'themes')
|
|
|
|
data['types'] = parseList(html, 'types')
|
2012-08-14 14:12:43 +00:00
|
|
|
data['year'] = find_re(html, '<span class="year">.*?(\d+)')
|
2012-07-08 12:16:57 +00:00
|
|
|
#data['stills'] = [re.sub('_derived.*?/', '', i) for i in re.compile('<a href="#" title="movie still".*?<img src="(.*?)"', re.DOTALL).findall(html)]
|
|
|
|
data['stills'] = re.compile('<a href="#" title="movie still".*?<img src="(.*?)"', re.DOTALL).findall(html)
|
2012-08-14 13:58:05 +00:00
|
|
|
#html = read_url("http://allmovie.com/work/%s/cast" % id, unicode=True)
|
2012-07-08 12:16:57 +00:00
|
|
|
#data['cast'] = parseTable(html)
|
2012-08-14 13:58:05 +00:00
|
|
|
#html = read_url("http://allmovie.com/work/%s/credits" % id, unicode=True)
|
2012-07-08 12:16:57 +00:00
|
|
|
#data['credits'] = parseTable(html)
|
2012-08-14 13:58:05 +00:00
|
|
|
html = read_url("http://allmovie.com/work/%s/review" % id, unicode=True)
|
2012-08-14 14:12:43 +00:00
|
|
|
data['review'] = strip_tags(find_re(html, '<div class="toggle-text" itemprop="description">(.*?)</div>')).strip()
|
2010-07-07 23:25:57 +00:00
|
|
|
return data
|
|
|
|
|
|
|
|
def getUrl(id):
|
2012-07-08 12:16:57 +00:00
|
|
|
return "http://allmovie.com/work/%s" % id
|
2010-07-07 23:25:57 +00:00
|
|
|
|
|
|
|
def parseEntry(html, title):
|
2012-08-14 14:12:43 +00:00
|
|
|
html = find_re(html, '<dt>%s</dt>.*?<dd>(.*?)</dd>' % title)
|
2012-08-14 13:58:05 +00:00
|
|
|
return strip_tags(html).strip()
|
2010-07-07 23:25:57 +00:00
|
|
|
|
|
|
|
def parseList(html, title):
|
2012-08-14 14:12:43 +00:00
|
|
|
html = find_re(html, '<dt>%s</dt>.*?<dd>(.*?)</dd>' % title.lower())
|
2012-08-14 13:58:05 +00:00
|
|
|
r = map(lambda x: strip_tags(x), re.compile('<li>(.*?)</li>', re.DOTALL).findall(html))
|
2012-07-08 12:16:57 +00:00
|
|
|
if not r and html:
|
2012-08-14 13:58:05 +00:00
|
|
|
r = [strip_tags(html)]
|
2012-07-08 12:16:57 +00:00
|
|
|
return r
|
2010-07-07 23:25:57 +00:00
|
|
|
|
|
|
|
def parseTable(html):
|
|
|
|
return map(
|
|
|
|
lambda x: map(
|
2012-08-14 13:58:05 +00:00
|
|
|
lambda x: strip_tags(x).strip().replace(' ', ''),
|
2010-07-07 23:25:57 +00:00
|
|
|
x.split('<td width="305">-')
|
|
|
|
),
|
2012-08-14 14:12:43 +00:00
|
|
|
find_re(html, '<div id="results-table">(.*?)</table>').split('</tr>')[:-1]
|
2010-07-07 23:25:57 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
def parseText(html, title):
|
2012-08-14 14:12:43 +00:00
|
|
|
return strip_tags(find_re(html, '%s</td>.*?<td colspan="2"><p>(.*?)</td>' % title)).strip()
|
2010-07-07 23:25:57 +00:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
print getData('129689')
|
|
|
|
# print getData('177524')
|
|
|
|
|