2010-07-07 23:25:57 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
2011-04-07 18:52:03 +00:00
|
|
|
import re
|
2010-07-07 23:25:57 +00:00
|
|
|
from urllib import urlencode
|
|
|
|
|
2010-07-28 13:08:06 +00:00
|
|
|
from ox.utils import json
|
2012-08-14 13:58:05 +00:00
|
|
|
from ox.cache import read_url
|
2012-08-14 14:12:43 +00:00
|
|
|
from ox import find_re, decode_html
|
2010-07-07 23:25:57 +00:00
|
|
|
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_id(url):
|
2010-07-07 23:25:57 +00:00
|
|
|
return url.split("/")[-1]
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_url(id=None, imdb=None, allmovie=None):
|
|
|
|
if imdb:
|
2012-08-21 07:07:26 +00:00
|
|
|
query = '"%s"'% imdb
|
2012-08-15 15:15:40 +00:00
|
|
|
result = find(query)
|
|
|
|
if result:
|
|
|
|
url = result[0][1]
|
|
|
|
data = get_movie_data(url)
|
|
|
|
if 'imdb_id' in data:
|
|
|
|
return url
|
|
|
|
return ""
|
|
|
|
if allmovie:
|
|
|
|
query = '"amg_id = 1:%s"'% allmovie
|
|
|
|
result = find(query)
|
|
|
|
if result:
|
|
|
|
url = result[0][1]
|
|
|
|
return url
|
|
|
|
return ''
|
2010-07-07 23:25:57 +00:00
|
|
|
return "http://en.wikipedia.org/wiki/%s" % id
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_movie_id(title, director='', year=''):
|
2010-07-07 23:25:57 +00:00
|
|
|
query = '"%s" film %s %s' % (title, director, year)
|
|
|
|
result = find(query, 1)
|
|
|
|
if result:
|
|
|
|
return result[0][1]
|
|
|
|
return ''
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_wiki_data(wikipedia_url):
|
|
|
|
url = wikipedia_url.replace('wikipedia.org/wiki/', 'wikipedia.org/w/index.php?title=')
|
2010-07-07 23:25:57 +00:00
|
|
|
url = "%s&action=raw" % url
|
2012-08-14 13:58:05 +00:00
|
|
|
data = read_url(url).decode('utf-8')
|
2010-07-07 23:25:57 +00:00
|
|
|
return data
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_movie_data(wikipedia_url):
|
|
|
|
if not wikipedia_url.startswith('http'):
|
|
|
|
wikipedia_url = get_url(wikipedia_url)
|
|
|
|
data = get_wiki_data(wikipedia_url)
|
2012-08-14 14:12:43 +00:00
|
|
|
filmbox_data = find_re(data, '''\{\{[Ii]nfobox.[Ff]ilm(.*?)\n\}\}''')
|
2010-07-07 23:25:57 +00:00
|
|
|
filmbox = {}
|
2012-07-08 12:16:57 +00:00
|
|
|
_box = filmbox_data.strip().split('|')
|
2010-07-07 23:25:57 +00:00
|
|
|
for row in _box:
|
|
|
|
d = row.split('=')
|
|
|
|
if len(d) == 2:
|
|
|
|
key = d[0].strip()
|
|
|
|
if key[0] == '|':
|
|
|
|
key = key[1:]
|
|
|
|
value = d[1].strip()
|
2012-01-13 15:19:04 +00:00
|
|
|
if '<br>' in value:
|
|
|
|
value = value.split('<br>')
|
2011-04-07 18:52:03 +00:00
|
|
|
filmbox[key.strip()] = value
|
2012-07-08 12:16:57 +00:00
|
|
|
if not filmbox_data:
|
2012-07-07 16:51:19 +00:00
|
|
|
return filmbox
|
2012-01-13 15:32:22 +00:00
|
|
|
if 'amg_id' in filmbox and not filmbox['amg_id'].isdigit():
|
|
|
|
del filmbox['amg_id']
|
2011-04-07 18:52:03 +00:00
|
|
|
if 'Allmovie movie' in data:
|
2012-08-14 14:12:43 +00:00
|
|
|
filmbox['amg_id'] = find_re(data, 'Allmovie movie\|.*?(\d+)')
|
2012-07-08 12:16:57 +00:00
|
|
|
elif 'Allmovie title' in data:
|
2012-08-14 14:12:43 +00:00
|
|
|
filmbox['amg_id'] = find_re(data, 'Allmovie title\|.*?(\d+)')
|
2012-07-08 12:16:57 +00:00
|
|
|
|
|
|
|
if 'Official website' in data:
|
2012-08-14 14:12:43 +00:00
|
|
|
filmbox['website'] = find_re(data, 'Official website\|(.*?)}').strip()
|
2010-07-07 23:25:57 +00:00
|
|
|
|
2012-01-10 09:00:29 +00:00
|
|
|
r = re.compile('{{IMDb title\|id=(\d{7})', re.IGNORECASE).findall(data)
|
2011-04-07 18:52:03 +00:00
|
|
|
if r:
|
|
|
|
filmbox['imdb_id'] = r[0]
|
2012-01-10 09:00:29 +00:00
|
|
|
else:
|
|
|
|
r = re.compile('{{IMDb title\|(\d{7})', re.IGNORECASE).findall(data)
|
|
|
|
if r:
|
|
|
|
filmbox['imdb_id'] = r[0]
|
|
|
|
|
2012-01-13 14:07:12 +00:00
|
|
|
r = re.compile('{{Internet Archive.*?\|id=(.*?)[\|}]', re.IGNORECASE).findall(data)
|
2012-01-10 09:00:29 +00:00
|
|
|
if r:
|
|
|
|
filmbox['archiveorg_id'] = r[0]
|
|
|
|
|
2012-01-13 15:32:22 +00:00
|
|
|
r = re.compile('{{mojo title\|(.*?)[\|}]', re.IGNORECASE).findall(data)
|
2011-04-07 18:52:03 +00:00
|
|
|
if r:
|
2012-01-13 15:19:04 +00:00
|
|
|
filmbox['mojo_id'] = r[0].replace('id=', '')
|
2011-04-07 18:52:03 +00:00
|
|
|
|
2012-01-13 15:32:22 +00:00
|
|
|
r = re.compile('{{rotten-tomatoes\|(.*?)[\|}]', re.IGNORECASE).findall(data)
|
2011-04-07 18:52:03 +00:00
|
|
|
if r:
|
2012-01-13 15:19:04 +00:00
|
|
|
filmbox['rottentomatoes_id'] = r[0].replace('id=', '')
|
2010-07-07 23:25:57 +00:00
|
|
|
if 'google video' in data:
|
2012-08-14 14:12:43 +00:00
|
|
|
filmbox['google_video_id'] = find_re(data, 'google video\|.*?(\d*?)[\|}]')
|
2010-07-07 23:25:57 +00:00
|
|
|
if 'DEFAULTSORT' in data:
|
2012-08-14 14:12:43 +00:00
|
|
|
filmbox['title_sort'] = find_re(data, '''\{\{DEFAULTSORT:(.*?)\}\}''')
|
2010-07-07 23:25:57 +00:00
|
|
|
return filmbox
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_image_url(name):
|
2012-07-07 16:03:02 +00:00
|
|
|
url = 'http://en.wikipedia.org/wiki/Image:' + name.replace(' ', '%20')
|
2012-08-14 13:58:05 +00:00
|
|
|
data = read_url(url, unicode=True)
|
2012-08-14 14:12:43 +00:00
|
|
|
url = find_re(data, 'href="(http://upload.wikimedia.org/.*?)"')
|
2012-07-07 16:03:02 +00:00
|
|
|
if not url:
|
2012-08-14 14:12:43 +00:00
|
|
|
url = find_re(data, 'href="(//upload.wikimedia.org/.*?)"')
|
2012-07-07 16:03:02 +00:00
|
|
|
if url:
|
|
|
|
url = 'http:' + url
|
2010-07-07 23:25:57 +00:00
|
|
|
return url
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_poster_url(wikipedia_url):
|
|
|
|
if not wikipedia_url.startswith('http'): wikipedia_url = get_url(wikipedia_url)
|
|
|
|
data = get_movie_data(wikipedia_url)
|
2010-07-07 23:25:57 +00:00
|
|
|
if 'image' in data:
|
2012-08-15 15:15:40 +00:00
|
|
|
return get_image_url(data['image'])
|
2010-07-07 23:25:57 +00:00
|
|
|
return ''
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_movie_poster(wikipedia_url):
|
|
|
|
# deprecated, use get_poster_url()
|
|
|
|
return get_poster_url(wikipedia_url)
|
2010-07-07 23:25:57 +00:00
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_allmovie_id(wikipedia_url):
|
|
|
|
data = get_movie_data(wikipedia_url)
|
2010-07-07 23:25:57 +00:00
|
|
|
return data.get('amg_id', '')
|
|
|
|
|
|
|
|
def find(query, max_results=10):
|
|
|
|
query = {'action': 'query', 'list':'search', 'format': 'json',
|
|
|
|
'srlimit': max_results, 'srwhat': 'text', 'srsearch': query.encode('utf-8')}
|
|
|
|
url = "http://en.wikipedia.org/w/api.php?" + urlencode(query)
|
2012-08-14 13:58:05 +00:00
|
|
|
data = read_url(url)
|
2010-07-07 23:25:57 +00:00
|
|
|
if not data:
|
2012-08-14 13:58:05 +00:00
|
|
|
data = read_url(url, timeout=0)
|
2010-07-28 13:08:06 +00:00
|
|
|
result = json.loads(data)
|
2010-07-07 23:25:57 +00:00
|
|
|
results = []
|
|
|
|
if result and 'query' in result:
|
|
|
|
for r in result['query']['search']:
|
|
|
|
title = r['title']
|
|
|
|
url = "http://en.wikipedia.org/wiki/%s" % title.replace(' ', '_')
|
|
|
|
results.append((title, url, ''))
|
|
|
|
return results
|
|
|
|
|