python-ox/ox/web/wikipedia.py

157 lines
5.2 KiB
Python
Raw Permalink Normal View History

2010-07-07 23:25:57 +00:00
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
2014-09-30 19:04:46 +00:00
from __future__ import print_function
2011-04-07 18:52:03 +00:00
import re
2014-09-30 19:04:46 +00:00
2023-07-27 11:07:13 +00:00
import urllib
2010-07-07 23:25:57 +00:00
from ox.utils import json
from ox.cache import read_url
2014-09-30 19:04:46 +00:00
from ox import find_re
2010-07-07 23:25:57 +00:00
2012-08-15 15:15:40 +00:00
def get_id(url):
2010-07-07 23:25:57 +00:00
return url.split("/")[-1]
2012-08-15 15:15:40 +00:00
def get_url(id=None, imdb=None, allmovie=None):
if imdb:
2019-08-08 15:08:13 +00:00
query = '"%s"' % imdb
2012-08-15 15:15:40 +00:00
result = find(query)
if result:
url = result[0][1]
data = get_movie_data(url)
if 'imdb_id' in data:
return url
return ""
if allmovie:
2019-08-08 15:08:13 +00:00
query = '"amg_id = 1:%s"' % allmovie
2012-08-15 15:15:40 +00:00
result = find(query)
if result:
url = result[0][1]
return url
return ''
2010-07-07 23:25:57 +00:00
return "http://en.wikipedia.org/wiki/%s" % id
2012-08-15 15:15:40 +00:00
def get_movie_id(title, director='', year=''):
2010-07-07 23:25:57 +00:00
query = '"%s" film %s %s' % (title, director, year)
result = find(query, 1)
if result:
return result[0][1]
return ''
2012-08-15 15:15:40 +00:00
def get_wiki_data(wikipedia_url):
url = wikipedia_url.replace('wikipedia.org/wiki/', 'wikipedia.org/w/index.php?title=')
2010-07-07 23:25:57 +00:00
url = "%s&action=raw" % url
data = read_url(url).decode('utf-8')
2010-07-07 23:25:57 +00:00
return data
2012-08-15 15:15:40 +00:00
def get_movie_data(wikipedia_url):
if not wikipedia_url.startswith('http'):
wikipedia_url = get_url(wikipedia_url)
data = get_wiki_data(wikipedia_url)
2024-09-11 21:52:01 +00:00
filmbox_data = find_re(data, r'''\{\{[Ii]nfobox.[Ff]ilm(.*?)\n\}\}''')
2010-07-07 23:25:57 +00:00
filmbox = {}
2012-07-08 12:16:57 +00:00
_box = filmbox_data.strip().split('|')
2010-07-07 23:25:57 +00:00
for row in _box:
d = row.split('=')
if len(d) == 2:
2013-03-01 23:45:57 +00:00
_key = d[0].strip()
if _key:
key = _key
if key[0] == '|':
key = key[1:]
2013-12-22 08:08:43 +00:00
key = key.strip()
value = d[1].strip()
value = value.replace('<!-- see WP:ALT -->', '')
if '<br>' in value:
value = value.split('<br>')
if value:
if key in filmbox:
2023-07-27 11:07:13 +00:00
if isinstance(value, list) and isinstance(filmbox[key], str):
2013-12-22 08:08:43 +00:00
filmbox[key] = [filmbox[key]] + value
else:
filmbox[key] += value
if isinstance(filmbox[key], list):
filmbox[key] = [k for k in filmbox[key] if k]
2013-05-06 08:25:56 +00:00
else:
2013-12-22 08:08:43 +00:00
filmbox[key] = value
2012-07-08 12:16:57 +00:00
if not filmbox_data:
return filmbox
2012-01-13 15:32:22 +00:00
if 'amg_id' in filmbox and not filmbox['amg_id'].isdigit():
del filmbox['amg_id']
2011-04-07 18:52:03 +00:00
if 'Allmovie movie' in data:
2024-08-30 11:30:47 +00:00
filmbox['amg_id'] = find_re(data, r'Allmovie movie\|.*?(\d+)')
2012-07-08 12:16:57 +00:00
elif 'Allmovie title' in data:
2024-08-30 11:30:47 +00:00
filmbox['amg_id'] = find_re(data, r'Allmovie title\|.*?(\d+)')
2012-07-08 12:16:57 +00:00
if 'Official website' in data:
2024-08-30 11:30:47 +00:00
filmbox['website'] = find_re(data, r'Official website\|(.*?)}').strip()
2010-07-07 23:25:57 +00:00
2024-08-30 11:30:47 +00:00
r = re.compile(r'{{IMDb title\|id=(\d{7})', re.IGNORECASE).findall(data)
2011-04-07 18:52:03 +00:00
if r:
filmbox['imdb_id'] = r[0]
else:
2024-08-30 11:30:47 +00:00
r = re.compile(r'{{IMDb title\|(\d{7})', re.IGNORECASE).findall(data)
if r:
filmbox['imdb_id'] = r[0]
2024-08-30 11:30:47 +00:00
r = re.compile(r'{{Internet Archive.*?\|id=(.*?)[\|}]', re.IGNORECASE).findall(data)
if r:
filmbox['archiveorg_id'] = r[0]
2024-08-30 11:30:47 +00:00
r = re.compile(r'{{mojo title\|(.*?)[\|}]', re.IGNORECASE).findall(data)
2011-04-07 18:52:03 +00:00
if r:
2012-01-13 15:19:04 +00:00
filmbox['mojo_id'] = r[0].replace('id=', '')
2011-04-07 18:52:03 +00:00
2024-08-30 11:30:47 +00:00
r = re.compile(r'{{rotten-tomatoes\|(.*?)[\|}]', re.IGNORECASE).findall(data)
2011-04-07 18:52:03 +00:00
if r:
2012-01-13 15:19:04 +00:00
filmbox['rottentomatoes_id'] = r[0].replace('id=', '')
2010-07-07 23:25:57 +00:00
if 'google video' in data:
2024-08-30 11:30:47 +00:00
filmbox['google_video_id'] = find_re(data, r'google video\|.*?(\d*?)[\|}]')
2010-07-07 23:25:57 +00:00
if 'DEFAULTSORT' in data:
2024-08-30 11:30:47 +00:00
filmbox['title_sort'] = find_re(data, r'''\{\{DEFAULTSORT:(.*?)\}\}''')
2010-07-07 23:25:57 +00:00
return filmbox
2012-08-15 15:15:40 +00:00
def get_image_url(name):
2012-07-07 16:03:02 +00:00
url = 'http://en.wikipedia.org/wiki/Image:' + name.replace(' ', '%20')
2016-09-07 13:06:52 +00:00
data = read_url(url).decode('utf-8')
url = find_re(data, 'href="(http://upload.wikimedia.org/.*?)"')
2012-07-07 16:03:02 +00:00
if not url:
url = find_re(data, 'href="(//upload.wikimedia.org/.*?)"')
2012-07-07 16:03:02 +00:00
if url:
url = 'http:' + url
2010-07-07 23:25:57 +00:00
return url
2012-08-15 15:15:40 +00:00
def get_poster_url(wikipedia_url):
if not wikipedia_url.startswith('http'): wikipedia_url = get_url(wikipedia_url)
data = get_movie_data(wikipedia_url)
2010-07-07 23:25:57 +00:00
if 'image' in data:
2012-08-15 15:15:40 +00:00
return get_image_url(data['image'])
2010-07-07 23:25:57 +00:00
return ''
2012-08-15 15:15:40 +00:00
def get_movie_poster(wikipedia_url):
# deprecated, use get_poster_url()
return get_poster_url(wikipedia_url)
2010-07-07 23:25:57 +00:00
2012-08-15 15:15:40 +00:00
def get_allmovie_id(wikipedia_url):
data = get_movie_data(wikipedia_url)
2010-07-07 23:25:57 +00:00
return data.get('amg_id', '')
def find(query, max_results=10):
2019-08-08 15:08:13 +00:00
query = {'action': 'query', 'list': 'search', 'format': 'json',
2010-07-07 23:25:57 +00:00
'srlimit': max_results, 'srwhat': 'text', 'srsearch': query.encode('utf-8')}
2014-09-30 19:04:46 +00:00
url = "http://en.wikipedia.org/w/api.php?" + urllib.parse.urlencode(query)
data = read_url(url)
2010-07-07 23:25:57 +00:00
if not data:
2016-09-07 13:06:52 +00:00
data = read_url(url, timeout=0)
2014-09-30 19:04:46 +00:00
result = json.loads(data.decode('utf-8'))
2010-07-07 23:25:57 +00:00
results = []
if result and 'query' in result:
for r in result['query']['search']:
title = r['title']
url = "http://en.wikipedia.org/wiki/%s" % title.replace(' ', '_')
results.append((title, url, ''))
return results