2010-07-07 23:25:57 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
2023-03-10 16:39:31 +00:00
|
|
|
from collections import defaultdict
|
2014-09-30 19:04:46 +00:00
|
|
|
|
2023-02-03 17:28:49 +00:00
|
|
|
import json
|
2010-07-07 23:25:57 +00:00
|
|
|
import re
|
|
|
|
import time
|
2011-04-15 09:46:20 +00:00
|
|
|
import unicodedata
|
2010-07-07 23:25:57 +00:00
|
|
|
|
2023-07-27 11:07:13 +00:00
|
|
|
from urllib.parse import urlencode
|
2014-09-30 19:04:46 +00:00
|
|
|
|
|
|
|
from .. import find_re, strip_tags, decode_html
|
|
|
|
from .. import cache
|
2010-07-07 23:25:57 +00:00
|
|
|
|
2014-09-30 19:04:46 +00:00
|
|
|
|
|
|
|
from . siteparser import SiteParser
|
|
|
|
from . import duckduckgo
|
2013-06-28 14:53:25 +00:00
|
|
|
from ..utils import datetime
|
2023-03-10 16:39:31 +00:00
|
|
|
from ..geo import normalize_country_name, get_country_name
|
2011-10-30 12:31:19 +00:00
|
|
|
|
2018-05-02 09:00:55 +00:00
|
|
|
|
|
|
|
def prepare_url(url, data=None, headers=cache.DEFAULT_HEADERS, timeout=cache.cache_timeout, valid=None, unicode=False):
|
2010-10-08 16:07:39 +00:00
|
|
|
headers = headers.copy()
|
2017-08-02 14:48:22 +00:00
|
|
|
# https://webapps.stackexchange.com/questions/11003/how-can-i-disable-reconfigure-imdbs-automatic-geo-location-so-it-does-not-defau
|
2023-07-06 13:04:08 +00:00
|
|
|
#headers['X-Forwarded-For'] = '72.21.206.80'
|
2020-02-18 15:27:25 +00:00
|
|
|
headers['Accept-Language'] = 'en'
|
|
|
|
|
2018-05-02 09:00:55 +00:00
|
|
|
return url, data, headers, timeout, unicode
|
|
|
|
|
|
|
|
def read_url(url, data=None, headers=cache.DEFAULT_HEADERS, timeout=cache.cache_timeout, valid=None, unicode=False):
|
|
|
|
url, data, headers, timeout, unicode = prepare_url(url, data, headers, timeout, valid, unicode)
|
2014-09-30 19:04:46 +00:00
|
|
|
return cache.read_url(url, data, headers, timeout, unicode=unicode)
|
2010-07-10 08:24:56 +00:00
|
|
|
|
2018-05-02 09:00:55 +00:00
|
|
|
def delete_url(url, data=None, headers=cache.DEFAULT_HEADERS):
|
|
|
|
url, data, headers, timeout, unicode = prepare_url(url, data, headers)
|
|
|
|
cache.store.delete(url, data, headers)
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_url(id):
|
2020-05-12 08:46:17 +00:00
|
|
|
return "http://www.imdb.com/title/tt%s/" % id
|
2011-10-30 12:31:19 +00:00
|
|
|
|
2018-01-14 17:24:29 +00:00
|
|
|
|
|
|
|
def reference_section(id):
|
|
|
|
return {
|
|
|
|
'page': 'reference',
|
|
|
|
're': [
|
2024-07-08 12:33:07 +00:00
|
|
|
r'<h4 name="{id}" id="{id}".*?<table(.*?)</table>'.format(id=id),
|
|
|
|
r'<a href="/name/.*?>(.*?)</a>'
|
2018-01-14 17:24:29 +00:00
|
|
|
],
|
|
|
|
'type': 'list'
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def zebra_list(label, more=None):
|
|
|
|
conditions = {
|
|
|
|
'page': 'reference',
|
|
|
|
're': [
|
2024-07-08 12:33:07 +00:00
|
|
|
r'_label">' + label + '</td>.*?<ul(.*?)</ul>',
|
|
|
|
r'<li.*?>(.*?)</li>'
|
2018-01-14 17:24:29 +00:00
|
|
|
],
|
|
|
|
'type': 'list',
|
|
|
|
}
|
|
|
|
if more:
|
|
|
|
conditions['re'] += more
|
|
|
|
return conditions
|
|
|
|
|
|
|
|
def zebra_table(label, more=None, type='string'):
|
|
|
|
conditions = {
|
|
|
|
'page': 'reference',
|
|
|
|
're': [
|
2024-07-08 12:33:07 +00:00
|
|
|
r'_label">' + label + '</td>.*?<td>(.*?)</td>',
|
2018-01-14 17:24:29 +00:00
|
|
|
],
|
|
|
|
'type': type,
|
|
|
|
}
|
|
|
|
if more:
|
|
|
|
conditions['re'] += more
|
|
|
|
return conditions
|
|
|
|
|
2018-05-01 09:59:38 +00:00
|
|
|
def parse_aspectratio(value):
|
|
|
|
r = value
|
|
|
|
if ':' in value:
|
|
|
|
r = value.split(':')
|
|
|
|
n = r[0]
|
|
|
|
d = r[1].strip().split(' ')[0]
|
|
|
|
try:
|
2018-09-13 06:40:35 +00:00
|
|
|
if float(d):
|
|
|
|
value = str(float(n) / float(d))
|
|
|
|
else:
|
|
|
|
value = str(float(n))
|
2018-05-01 09:59:38 +00:00
|
|
|
except:
|
|
|
|
print('failed to parse aspect: %s' % value)
|
2018-05-02 09:00:55 +00:00
|
|
|
else:
|
|
|
|
value = '.'.join(value.strip().split('.')[:2])
|
2018-05-01 09:59:38 +00:00
|
|
|
return value
|
2018-01-14 17:24:29 +00:00
|
|
|
|
2019-02-21 12:13:05 +00:00
|
|
|
|
|
|
|
def technical(label):
|
|
|
|
return {
|
|
|
|
'page': 'technical',
|
|
|
|
're': [
|
2024-07-08 12:33:07 +00:00
|
|
|
r'<td class="label">\s*?%s\s*?</td>.*?<td>\s*?(.*?)\s*?</td>' % label,
|
2019-02-21 12:13:05 +00:00
|
|
|
lambda data: [
|
2024-07-08 12:33:07 +00:00
|
|
|
re.sub(r'\s+', ' ', d.strip()) for d in data.strip().split('<br>')
|
2019-02-21 12:13:05 +00:00
|
|
|
] if data else []
|
|
|
|
],
|
|
|
|
'type': 'list'
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-02-03 17:28:49 +00:00
|
|
|
def tech_spec(metadata):
|
|
|
|
tech = {}
|
|
|
|
for row in metadata['props']['pageProps']['contentData']['section']['items']:
|
|
|
|
title = {
|
|
|
|
'aspect ratio': 'aspectratio',
|
|
|
|
'sound mix': 'sound',
|
|
|
|
}.get(row['rowTitle'].lower(), row['rowTitle'].lower())
|
|
|
|
tech[title] = []
|
|
|
|
for content in row['listContent']:
|
|
|
|
value = content['text']
|
|
|
|
tech[title].append(value)
|
|
|
|
return tech
|
|
|
|
|
|
|
|
|
|
|
|
def movie_connections(metadata):
|
2023-07-07 09:20:14 +00:00
|
|
|
|
2023-02-03 17:28:49 +00:00
|
|
|
connections = {}
|
2023-07-06 12:43:26 +00:00
|
|
|
if 'props' not in metadata:
|
|
|
|
return connections
|
2023-02-03 17:28:49 +00:00
|
|
|
for row in metadata['props']['pageProps']['contentData']['categories']:
|
|
|
|
title = {
|
|
|
|
}.get(row['name'], row['name'])
|
|
|
|
if title not in connections:
|
|
|
|
connections[title] = []
|
|
|
|
|
|
|
|
for item in row['section']['items']:
|
|
|
|
item_ = {
|
|
|
|
'id': item['id'][2:],
|
|
|
|
}
|
|
|
|
|
|
|
|
item_['title'] = re.compile('<a.*?>(.*?)</a>').findall(item['listContent'][0]['html'])[0]
|
|
|
|
if len(item['listContent']) >=2:
|
|
|
|
item_['description'] = strip_tags(item['listContent'][1]['html'])
|
|
|
|
connections[title].append(item_)
|
|
|
|
return connections
|
|
|
|
|
|
|
|
|
|
|
|
def get_category_by_id(metadata, id):
|
|
|
|
for category in metadata['props']['pageProps']['contentData']['categories']:
|
|
|
|
if category['id'] == id:
|
|
|
|
return category
|
|
|
|
|
|
|
|
|
|
|
|
def get_release_date(metadata):
|
|
|
|
releases = get_category_by_id(metadata, 'releases')
|
|
|
|
def parse_date(d):
|
|
|
|
parsed = None
|
|
|
|
for fmt in (
|
|
|
|
'%B %d, %Y',
|
|
|
|
'%d %B %Y',
|
|
|
|
'%B %Y',
|
|
|
|
):
|
|
|
|
try:
|
|
|
|
parsed = datetime.strptime(d, fmt)
|
|
|
|
break
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
if not parsed:
|
|
|
|
return None
|
|
|
|
return '%d-%02d-%02d' % (parsed.year, parsed.month, parsed.day)
|
|
|
|
|
|
|
|
dates = []
|
|
|
|
for item in releases['section']['items']:
|
|
|
|
content = item['listContent'][0]
|
|
|
|
date = parse_date(content['text'])
|
|
|
|
if date:
|
|
|
|
dates.append(date)
|
|
|
|
|
|
|
|
if dates:
|
|
|
|
return min(dates)
|
|
|
|
|
2023-07-06 13:14:32 +00:00
|
|
|
def get_locations(metadata):
|
|
|
|
try:
|
2023-07-06 13:18:34 +00:00
|
|
|
locations = [
|
2023-07-06 13:14:32 +00:00
|
|
|
row['cardText']
|
|
|
|
for row in metadata['props']['pageProps']['contentData']['categories'][0]['section']['items']
|
|
|
|
]
|
|
|
|
except:
|
2023-07-06 13:18:34 +00:00
|
|
|
locations = []
|
|
|
|
return locations
|
2023-07-06 13:14:32 +00:00
|
|
|
|
2023-02-03 17:28:49 +00:00
|
|
|
|
2023-07-06 13:02:45 +00:00
|
|
|
def get_keywords(metadata):
|
|
|
|
try:
|
|
|
|
keywords = [
|
|
|
|
row['rowTitle']
|
|
|
|
for row in metadata['props']['pageProps']['contentData']['section']['items']
|
|
|
|
]
|
|
|
|
except:
|
|
|
|
keywords = []
|
|
|
|
return keywords
|
|
|
|
|
|
|
|
|
2023-03-10 16:39:31 +00:00
|
|
|
def get_entity_metadata(metadata):
|
|
|
|
data = {}
|
|
|
|
entity = metadata['props']['pageProps']['contentData']['entityMetadata']
|
|
|
|
data['title'] = entity['titleText']['text']
|
|
|
|
data['originalTitle'] = entity['originalTitleText']['text']
|
|
|
|
data['year'] = entity['releaseYear']['year']
|
|
|
|
data['plot'] = entity['plot']['plotText']['plainText']
|
|
|
|
data['country'] = [get_country_name(c['id']) for c in entity['countriesOfOrigin']['countries']]
|
|
|
|
data['poster'] = metadata['props']['pageProps']['contentData']['posterData']['image']['url']
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
2023-02-03 17:28:49 +00:00
|
|
|
def alternative_titles(metadata):
|
2023-03-10 16:39:31 +00:00
|
|
|
titles = defaultdict(list)
|
2023-02-03 17:28:49 +00:00
|
|
|
akas = get_category_by_id(metadata, 'akas')
|
2023-03-10 16:39:31 +00:00
|
|
|
|
|
|
|
skip = [
|
|
|
|
metadata['props']['pageProps']['contentData']['entityMetadata']['titleText']['text'],
|
|
|
|
metadata['props']['pageProps']['contentData']['entityMetadata']['originalTitleText']['text']
|
|
|
|
]
|
2023-02-03 17:28:49 +00:00
|
|
|
for row in akas['section']['items']:
|
|
|
|
content = row['listContent'][0]
|
2023-03-10 16:39:31 +00:00
|
|
|
title = content['text']
|
|
|
|
country = row['rowTitle']
|
|
|
|
if title in skip:
|
|
|
|
continue
|
|
|
|
titles[title].append(country)
|
|
|
|
#if content.get('subText'):
|
|
|
|
# titles[-1]['subText'] = content['subText']
|
|
|
|
return [kv for kv in titles.items()]
|
2023-02-03 17:28:49 +00:00
|
|
|
|
|
|
|
|
2018-01-14 17:24:29 +00:00
|
|
|
'''
|
|
|
|
'posterIds': {
|
|
|
|
'page': 'posters',
|
|
|
|
're': '/unknown-thumbnail/media/rm(.*?)/tt',
|
|
|
|
'type': 'list'
|
|
|
|
},
|
|
|
|
'''
|
|
|
|
|
2010-07-07 23:25:57 +00:00
|
|
|
class Imdb(SiteParser):
|
2010-12-09 03:37:28 +00:00
|
|
|
'''
|
2023-02-03 17:28:49 +00:00
|
|
|
>>> Imdb('0068646')['title'] == 'The Godfather'
|
2016-05-21 13:19:25 +00:00
|
|
|
True
|
2010-12-09 03:37:28 +00:00
|
|
|
|
2023-02-03 17:28:49 +00:00
|
|
|
>>> Imdb('0133093')['title'] == 'The Matrix'
|
2016-05-21 13:19:25 +00:00
|
|
|
True
|
2010-12-09 03:37:28 +00:00
|
|
|
'''
|
2017-05-03 13:11:01 +00:00
|
|
|
regex = {
|
2011-10-15 14:54:09 +00:00
|
|
|
'alternativeTitles': {
|
2010-07-09 08:54:06 +00:00
|
|
|
'page': 'releaseinfo',
|
|
|
|
're': [
|
2023-02-03 17:28:49 +00:00
|
|
|
'<li role="presentation" class="ipc-metadata-list__item" data-testid="list-item"><button class="ipc-metadata-list-item__label" role="button" tabindex="0" aria-disabled="false">([^>]+)</button.*?<li role="presentation" class="ipc-inline-list__item"><label class="ipc-metadata-list-item__list-content-item"[^>]*?>([^<]+)</label>',
|
2010-07-09 08:54:06 +00:00
|
|
|
],
|
|
|
|
'type': 'list'
|
|
|
|
},
|
2011-10-28 23:25:43 +00:00
|
|
|
'aspectratio': {
|
2018-01-14 17:24:29 +00:00
|
|
|
'page': 'reference',
|
2018-01-26 07:49:33 +00:00
|
|
|
're': [
|
2024-07-08 12:33:07 +00:00
|
|
|
r'Aspect Ratio</td>.*?ipl-inline-list__item">\s+([\d\.\:\ ]+)',
|
2018-05-01 09:59:38 +00:00
|
|
|
parse_aspectratio,
|
2018-01-26 07:49:33 +00:00
|
|
|
],
|
2011-08-18 07:30:30 +00:00
|
|
|
'type': 'float',
|
|
|
|
},
|
2018-01-14 17:24:29 +00:00
|
|
|
'budget': zebra_table('Budget', more=[
|
2024-07-08 12:33:07 +00:00
|
|
|
lambda data: find_re(decode_html(data).replace(',', ''), r'\d+')
|
2018-01-14 17:24:29 +00:00
|
|
|
], type='int'),
|
2010-07-07 23:25:57 +00:00
|
|
|
'cast': {
|
2018-01-14 17:24:29 +00:00
|
|
|
'page': 'reference',
|
2010-07-10 08:24:56 +00:00
|
|
|
're': [
|
2018-01-14 17:24:29 +00:00
|
|
|
' <table class="cast_list">(.*?)</table>',
|
|
|
|
'<td.*?itemprop="actor".*?>.*?>(.*?)</a>.*?<td class="character">(.*?)</td>',
|
2018-01-14 17:52:55 +00:00
|
|
|
lambda ll: [strip_tags(l) for l in ll] if isinstance(ll, list) else strip_tags(ll)
|
2010-07-07 23:25:57 +00:00
|
|
|
],
|
|
|
|
'type': 'list'
|
|
|
|
},
|
2018-01-14 17:24:29 +00:00
|
|
|
'cinematographer': reference_section('cinematographers'),
|
|
|
|
'country': zebra_list('Country', more=['<a.*?>(.*?)</a>']),
|
|
|
|
'director': reference_section('directors'),
|
|
|
|
'editor': reference_section('editors'),
|
|
|
|
'composer': reference_section('composers'),
|
2011-10-15 14:54:09 +00:00
|
|
|
'episodeTitle': {
|
2018-01-14 17:24:29 +00:00
|
|
|
'page': 'reference',
|
|
|
|
're': '<h3 itemprop="name">(.*?)<',
|
2010-07-12 08:52:26 +00:00
|
|
|
'type': 'string'
|
|
|
|
},
|
2018-01-14 17:45:52 +00:00
|
|
|
'genre': zebra_list('Genres', more=['<a.*?>(.*?)</a>']),
|
2018-01-14 17:24:29 +00:00
|
|
|
'gross': zebra_table('Cumulative Worldwide Gross', more=[
|
2024-07-08 12:33:07 +00:00
|
|
|
lambda data: find_re(decode_html(data).replace(',', ''), r'\d+')
|
2018-01-14 17:24:29 +00:00
|
|
|
], type='int'),
|
|
|
|
'language': zebra_list('Language', more=['<a.*?>(.*?)</a>']),
|
2017-08-02 14:48:22 +00:00
|
|
|
'originalTitle': {
|
2017-08-02 15:36:57 +00:00
|
|
|
'page': 'releaseinfo',
|
2024-07-08 12:33:07 +00:00
|
|
|
're': r'<li role="presentation" class="ipc-metadata-list__item" data-testid="list-item"><button class="ipc-metadata-list-item__label" role="button" tabindex="0" aria-disabled="false">\(original title\)</button.*?<li role="presentation" class="ipc-inline-list__item"><label class="ipc-metadata-list-item__list-content-item"[^>]*?>([^<]+)</label>',
|
2017-08-02 14:48:22 +00:00
|
|
|
'type': 'string'
|
|
|
|
},
|
2018-01-14 17:24:29 +00:00
|
|
|
'summary': zebra_table('Plot Summary', more=[
|
|
|
|
'<p>(.*?)<em'
|
|
|
|
]),
|
2019-11-15 13:51:32 +00:00
|
|
|
'storyline': {
|
|
|
|
'page': '',
|
2024-07-08 12:33:07 +00:00
|
|
|
're': r'<h2>Storyline</h2>.*?<p>(.*?)</p>',
|
2019-11-15 13:51:32 +00:00
|
|
|
'type': 'string'
|
|
|
|
},
|
2011-10-15 14:54:09 +00:00
|
|
|
'posterId': {
|
2018-01-14 17:24:29 +00:00
|
|
|
'page': 'reference',
|
|
|
|
're': '<img.*?class="titlereference-primary-image".*?src="(.*?)".*?>',
|
2010-07-19 10:05:01 +00:00
|
|
|
'type': 'string'
|
2010-07-07 23:25:57 +00:00
|
|
|
},
|
2018-01-14 17:24:29 +00:00
|
|
|
'producer': reference_section('producers'),
|
2013-02-25 08:27:48 +00:00
|
|
|
'productionCompany': {
|
2018-01-14 17:24:29 +00:00
|
|
|
'page': 'reference',
|
2013-02-18 13:50:30 +00:00
|
|
|
're': [
|
2024-07-08 12:33:07 +00:00
|
|
|
r'Production Companies.*?<ul(.*?)</ul>',
|
|
|
|
r'<a href="/company/.*?/">(.*?)</a>'
|
2013-02-18 13:50:30 +00:00
|
|
|
],
|
|
|
|
'type': 'list'
|
|
|
|
},
|
2010-07-07 23:25:57 +00:00
|
|
|
'rating': {
|
2018-01-14 17:24:29 +00:00
|
|
|
'page': 'reference',
|
|
|
|
're': [
|
2024-07-08 12:33:07 +00:00
|
|
|
r'<div class="ipl-rating-star ">(.*?)</div>',
|
|
|
|
r'ipl-rating-star__rating">([\d,.]+?)</span>',
|
2018-01-14 17:24:29 +00:00
|
|
|
],
|
2010-07-07 23:25:57 +00:00
|
|
|
'type': 'float'
|
|
|
|
},
|
2018-01-14 17:24:29 +00:00
|
|
|
#FIXME using some /offsite/ redirect now
|
|
|
|
#'reviews': {
|
|
|
|
# 'page': 'externalreviews',
|
|
|
|
# 're': [
|
|
|
|
# '<ul class="simpleList">(.*?)</ul>',
|
|
|
|
# '<li>.*?<a href="(http.*?)".*?>(.*?)</a>.*?</li>'
|
|
|
|
# ],
|
|
|
|
# 'type': 'list'
|
|
|
|
#},
|
|
|
|
'runtime': zebra_list('Runtime'),
|
2018-04-06 08:57:49 +00:00
|
|
|
'color': zebra_list('Color', more=[
|
|
|
|
'<a.*?>([^(<]+)',
|
|
|
|
lambda r: r[0] if isinstance(r, list) else r,
|
|
|
|
strip_tags
|
|
|
|
]),
|
2010-07-12 08:52:26 +00:00
|
|
|
'season': {
|
2018-01-14 17:24:29 +00:00
|
|
|
'page': 'reference',
|
2010-07-13 09:28:55 +00:00
|
|
|
're': [
|
2024-07-08 12:33:07 +00:00
|
|
|
r'<ul class="ipl-inline-list titlereference-overview-season-episode-numbers">(.*?)</ul>',
|
|
|
|
r'Season (\d+)',
|
2010-07-13 09:28:55 +00:00
|
|
|
],
|
2010-07-12 08:52:26 +00:00
|
|
|
'type': 'int'
|
|
|
|
},
|
|
|
|
'episode': {
|
2018-01-14 17:24:29 +00:00
|
|
|
'page': 'reference',
|
2010-07-13 09:28:55 +00:00
|
|
|
're': [
|
2024-07-08 12:33:07 +00:00
|
|
|
r'<ul class="ipl-inline-list titlereference-overview-season-episode-numbers">(.*?)</ul>',
|
|
|
|
r'Episode (\d+)',
|
2010-07-13 09:28:55 +00:00
|
|
|
],
|
2010-07-12 08:52:26 +00:00
|
|
|
'type': 'int'
|
|
|
|
},
|
|
|
|
'series': {
|
2018-01-14 17:24:29 +00:00
|
|
|
'page': 'reference',
|
2024-07-08 12:33:07 +00:00
|
|
|
're': r'<h4 itemprop="name">.*?<a href="/title/tt(\d+)',
|
2010-07-12 08:52:26 +00:00
|
|
|
'type': 'string'
|
|
|
|
},
|
2011-11-22 11:46:27 +00:00
|
|
|
'isSeries': {
|
2018-01-14 17:24:29 +00:00
|
|
|
'page': 'reference',
|
2024-07-08 12:33:07 +00:00
|
|
|
're': r'property=\'og:title\'.*?content=".*?(TV series|TV mini-series).*?"',
|
2011-11-22 11:46:27 +00:00
|
|
|
'type': 'string'
|
|
|
|
},
|
2012-09-22 19:40:01 +00:00
|
|
|
'title': {
|
2017-08-02 15:36:57 +00:00
|
|
|
'page': 'releaseinfo',
|
2024-07-08 12:33:07 +00:00
|
|
|
're': r'<h2.*?>(.*?)</h2>',
|
2010-07-07 23:25:57 +00:00
|
|
|
'type': 'string'
|
|
|
|
},
|
|
|
|
'trivia': {
|
|
|
|
'page': 'trivia',
|
2013-10-21 15:33:00 +00:00
|
|
|
're': [
|
2024-07-08 12:33:07 +00:00
|
|
|
r'<div class="sodatext">(.*?)<(br|/div)',
|
2013-10-21 15:33:00 +00:00
|
|
|
lambda data: data[0]
|
|
|
|
],
|
2010-07-07 23:25:57 +00:00
|
|
|
'type': 'list',
|
|
|
|
},
|
|
|
|
'votes': {
|
2018-01-14 17:24:29 +00:00
|
|
|
'page': 'reference',
|
2010-07-07 23:25:57 +00:00
|
|
|
're': [
|
2024-07-08 12:33:07 +00:00
|
|
|
r'class="ipl-rating-star__total-votes">\((.*?)\)',
|
2018-01-14 17:24:29 +00:00
|
|
|
lambda r: r.replace(',', '')
|
2010-07-07 23:25:57 +00:00
|
|
|
],
|
2018-01-14 17:24:29 +00:00
|
|
|
'type': 'string'
|
2010-07-07 23:25:57 +00:00
|
|
|
},
|
2018-01-14 17:24:29 +00:00
|
|
|
'writer': reference_section('writers'),
|
2010-07-07 23:25:57 +00:00
|
|
|
'year': {
|
2018-01-14 17:24:29 +00:00
|
|
|
'page': 'reference',
|
2018-09-25 08:24:27 +00:00
|
|
|
're': [
|
2024-07-08 12:33:07 +00:00
|
|
|
r'<span class="titlereference-title-year">(.*?)</span>',
|
|
|
|
r'<a.*?>(\d+)',
|
2018-09-25 08:24:27 +00:00
|
|
|
],
|
2010-07-07 23:25:57 +00:00
|
|
|
'type': 'int'
|
2017-02-16 16:16:14 +00:00
|
|
|
},
|
|
|
|
'credits': {
|
|
|
|
'page': 'fullcredits',
|
|
|
|
're': [
|
|
|
|
lambda data: data.split('<h4'),
|
2024-07-08 12:33:07 +00:00
|
|
|
r'>(.*?)</h4>.*?(<table.*?</table>)',
|
2017-02-16 16:16:14 +00:00
|
|
|
lambda data: [d for d in data if d]
|
|
|
|
],
|
|
|
|
'type': 'list'
|
|
|
|
},
|
2019-02-21 12:13:05 +00:00
|
|
|
'laboratory': technical('Laboratory'),
|
|
|
|
'camera': technical('Camera'),
|
2010-07-07 23:25:57 +00:00
|
|
|
}
|
|
|
|
|
2012-08-14 13:58:05 +00:00
|
|
|
def read_url(self, url, timeout):
|
2022-04-18 21:59:16 +00:00
|
|
|
if self.debug:
|
|
|
|
print(url)
|
2017-08-02 14:48:22 +00:00
|
|
|
if url not in self._cache:
|
2012-08-21 07:06:29 +00:00
|
|
|
self._cache[url] = read_url(url, timeout=timeout, unicode=True)
|
|
|
|
return self._cache[url]
|
2010-10-08 16:07:39 +00:00
|
|
|
|
2023-02-03 17:28:49 +00:00
|
|
|
def get_page_data(self, page, timeout=-1):
|
|
|
|
url = self.get_url(page)
|
|
|
|
data = self.read_url(url, timeout)
|
|
|
|
pdata = re.compile('<script id="__NEXT_DATA__" type="application/json">(.*?)</script>', re.DOTALL).findall(data)
|
|
|
|
if pdata:
|
|
|
|
pdata = pdata[0]
|
|
|
|
return json.loads(pdata)
|
|
|
|
return {}
|
|
|
|
|
2010-07-12 08:52:26 +00:00
|
|
|
def __init__(self, id, timeout=-1):
|
2016-05-21 13:19:25 +00:00
|
|
|
# http://www.imdb.com/help/show_leaf?titlelanguagedisplay
|
2017-08-02 14:48:22 +00:00
|
|
|
self.baseUrl = "http://www.imdb.com/title/tt%s/" % id
|
2023-07-07 09:20:14 +00:00
|
|
|
self._id = id
|
2023-02-03 17:28:49 +00:00
|
|
|
if timeout != 0:
|
|
|
|
self._cache = {}
|
|
|
|
url = self.baseUrl + 'releaseinfo'
|
|
|
|
page = self.read_url(url, timeout=-1)
|
|
|
|
if '<h2>See also</h2>' in page:
|
|
|
|
timeout = 0
|
2010-07-12 08:52:26 +00:00
|
|
|
super(Imdb, self).__init__(timeout)
|
2016-05-21 13:19:25 +00:00
|
|
|
|
2018-01-14 17:24:29 +00:00
|
|
|
url = self.baseUrl + 'reference'
|
2012-08-14 13:58:05 +00:00
|
|
|
page = self.read_url(url, timeout=-1)
|
2012-05-22 09:22:08 +00:00
|
|
|
if '<title>IMDb: Page not found</title>' in page \
|
|
|
|
or 'The requested URL was not found on our server.' in page:
|
2011-04-15 09:46:20 +00:00
|
|
|
return
|
2012-05-22 09:22:08 +00:00
|
|
|
if "<p>We're sorry, something went wrong.</p>" in page:
|
2011-04-20 21:08:33 +00:00
|
|
|
time.sleep(1)
|
2011-04-20 21:12:31 +00:00
|
|
|
super(Imdb, self).__init__(0)
|
2010-07-07 23:25:57 +00:00
|
|
|
|
2012-09-22 20:38:02 +00:00
|
|
|
if 'alternativeTitles' in self:
|
|
|
|
if len(self['alternativeTitles']) == 2 and \
|
2023-07-27 11:07:13 +00:00
|
|
|
isinstance(self['alternativeTitles'][0], str):
|
2012-09-22 20:38:02 +00:00
|
|
|
self['alternativeTitles'] = [self['alternativeTitles']]
|
|
|
|
|
2018-01-16 08:48:12 +00:00
|
|
|
for key in ('country', 'genre', 'language', 'sound', 'color'):
|
2018-01-14 17:33:59 +00:00
|
|
|
if key in self:
|
|
|
|
self[key] = [x[0] if len(x) == 1 and isinstance(x, list) else x for x in self[key]]
|
|
|
|
self[key] = list(filter(lambda x: x.lower() != 'home', self[key]))
|
|
|
|
|
2013-07-13 13:48:26 +00:00
|
|
|
#normalize country names
|
|
|
|
if 'country' in self:
|
|
|
|
self['country'] = [normalize_country_name(c) or c for c in self['country']]
|
|
|
|
|
2013-07-23 12:54:32 +00:00
|
|
|
|
2012-09-14 09:27:36 +00:00
|
|
|
def cleanup_title(title):
|
2019-04-04 09:07:31 +00:00
|
|
|
if isinstance(title, list):
|
|
|
|
title = title[0]
|
2012-09-14 09:27:36 +00:00
|
|
|
if title.startswith('"') and title.endswith('"'):
|
|
|
|
title = title[1:-1]
|
2012-09-30 10:14:33 +00:00
|
|
|
if title.startswith("'") and title.endswith("'"):
|
|
|
|
title = title[1:-1]
|
2024-07-08 12:33:07 +00:00
|
|
|
title = re.sub(r'\(\#[.\d]+\)', '', title)
|
2012-09-14 09:31:29 +00:00
|
|
|
return title.strip()
|
2012-09-14 09:27:36 +00:00
|
|
|
|
2017-08-02 14:48:22 +00:00
|
|
|
for t in ('title', 'originalTitle'):
|
2012-09-14 09:17:34 +00:00
|
|
|
if t in self:
|
2012-09-14 09:27:36 +00:00
|
|
|
self[t] = cleanup_title(self[t])
|
2012-09-22 20:44:08 +00:00
|
|
|
|
2011-10-15 14:54:09 +00:00
|
|
|
if 'alternativeTitles' in self:
|
2012-09-22 19:40:01 +00:00
|
|
|
alt = {}
|
2020-02-18 15:59:08 +00:00
|
|
|
for t in self['alternativeTitles']:
|
|
|
|
if t[0].strip() in ('World-wide (English title)', ):
|
|
|
|
self['title'] = cleanup_title(t[1])
|
2012-09-22 19:40:01 +00:00
|
|
|
for t in self['alternativeTitles']:
|
2013-06-29 16:50:10 +00:00
|
|
|
title = cleanup_title(t[1])
|
2017-08-02 14:48:22 +00:00
|
|
|
if title.lower() not in (self.get('title', '').lower(), self.get('originalTitle', '').lower()):
|
2012-09-22 19:40:01 +00:00
|
|
|
if title not in alt:
|
|
|
|
alt[title] = []
|
2013-06-29 16:50:10 +00:00
|
|
|
for c in t[0].split('/'):
|
2017-08-02 14:48:22 +00:00
|
|
|
for cleanup in ('International', '(working title)', 'World-wide'):
|
|
|
|
c = c.replace(cleanup, '')
|
|
|
|
c = c.split('(')[0].strip()
|
|
|
|
if c:
|
|
|
|
alt[title].append(c)
|
2012-09-22 19:40:01 +00:00
|
|
|
self['alternativeTitles'] = []
|
2014-09-30 19:04:46 +00:00
|
|
|
for t in sorted(alt, key=lambda a: sorted(alt[a])):
|
2017-08-02 14:48:22 +00:00
|
|
|
countries = sorted(set([normalize_country_name(c) or c for c in alt[t]]))
|
2015-05-04 08:53:17 +00:00
|
|
|
self['alternativeTitles'].append((t, countries))
|
2012-09-22 21:28:59 +00:00
|
|
|
if not self['alternativeTitles']:
|
|
|
|
del self['alternativeTitles']
|
2011-08-07 10:42:59 +00:00
|
|
|
|
2010-07-08 08:03:57 +00:00
|
|
|
if 'runtime' in self and self['runtime']:
|
2018-01-14 17:24:29 +00:00
|
|
|
if isinstance(self['runtime'], list):
|
|
|
|
self['runtime'] = self['runtime'][0]
|
2017-08-02 14:48:22 +00:00
|
|
|
if 'min' in self['runtime']:
|
|
|
|
base = 60
|
|
|
|
else:
|
|
|
|
base = 1
|
2012-08-14 14:12:43 +00:00
|
|
|
self['runtime'] = int(find_re(self['runtime'], '([0-9]+)')) * base
|
2010-07-10 08:24:56 +00:00
|
|
|
if 'runtime' in self and not self['runtime']:
|
|
|
|
del self['runtime']
|
2018-01-14 17:24:29 +00:00
|
|
|
|
|
|
|
if 'sound' in self:
|
|
|
|
self['sound'] = list(sorted(set(self['sound'])))
|
2011-10-15 14:54:09 +00:00
|
|
|
|
|
|
|
if 'cast' in self:
|
2023-07-27 11:07:13 +00:00
|
|
|
if isinstance(self['cast'][0], str):
|
2011-10-15 14:54:09 +00:00
|
|
|
self['cast'] = [self['cast']]
|
|
|
|
self['actor'] = [c[0] for c in self['cast']]
|
2012-09-25 10:57:57 +00:00
|
|
|
def cleanup_character(c):
|
|
|
|
c = c.replace('(uncredited)', '').strip()
|
2024-07-08 12:33:07 +00:00
|
|
|
c = re.sub(r'\s+', ' ', c)
|
2012-09-25 10:57:57 +00:00
|
|
|
return c
|
|
|
|
self['cast'] = [{'actor': x[0], 'character': cleanup_character(x[1])}
|
|
|
|
for x in self['cast']]
|
2011-10-15 14:54:09 +00:00
|
|
|
|
2010-07-07 23:25:57 +00:00
|
|
|
|
2011-11-22 11:46:27 +00:00
|
|
|
if 'isSeries' in self:
|
|
|
|
del self['isSeries']
|
2013-03-01 09:45:18 +00:00
|
|
|
self['isSeries'] = True
|
2012-09-23 13:12:07 +00:00
|
|
|
if 'episodeTitle' in self:
|
2024-07-08 12:33:07 +00:00
|
|
|
self['episodeTitle'] = re.sub(r'Episode \#\d+\.\d+', '', self['episodeTitle'])
|
2010-07-09 08:54:06 +00:00
|
|
|
|
|
|
|
|
2012-10-08 11:44:02 +00:00
|
|
|
#make lists unique but keep order
|
2013-02-20 04:16:34 +00:00
|
|
|
for key in ('director', 'language'):
|
2012-10-08 11:44:02 +00:00
|
|
|
if key in self:
|
|
|
|
self[key] = [x for i,x in enumerate(self[key])
|
|
|
|
if x not in self[key][i+1:]]
|
|
|
|
|
2013-03-14 08:47:10 +00:00
|
|
|
for key in ('actor', 'writer', 'producer', 'editor', 'composer'):
|
2012-07-02 13:06:08 +00:00
|
|
|
if key in self:
|
2012-08-30 09:46:25 +00:00
|
|
|
if isinstance(self[key][0], list):
|
|
|
|
self[key] = [i[0] for i in self[key] if i]
|
2014-09-30 19:04:46 +00:00
|
|
|
self[key] = sorted(list(set(self[key])), key=lambda a: self[key].index(a))
|
|
|
|
|
2012-07-02 13:06:08 +00:00
|
|
|
|
2011-08-09 08:30:13 +00:00
|
|
|
if 'budget' in self and 'gross' in self:
|
|
|
|
self['profit'] = self['gross'] - self['budget']
|
|
|
|
|
2023-02-03 17:28:49 +00:00
|
|
|
metadata = self.get_page_data('releaseinfo')
|
|
|
|
releasedate = get_release_date(metadata)
|
|
|
|
if releasedate:
|
|
|
|
self['releasedate'] = releasedate
|
2019-11-15 13:51:32 +00:00
|
|
|
|
2023-07-06 13:02:45 +00:00
|
|
|
metadata = self.get_page_data('keywords')
|
|
|
|
keywords = get_keywords(metadata)
|
|
|
|
if keywords:
|
2023-07-06 13:07:23 +00:00
|
|
|
self['keyword'] = keywords
|
2023-07-06 13:02:45 +00:00
|
|
|
|
2023-07-06 13:14:32 +00:00
|
|
|
metadata = self.get_page_data('locations')
|
|
|
|
locations = get_locations(metadata)
|
|
|
|
if locations:
|
|
|
|
self['filmingLocations'] = locations
|
|
|
|
|
2019-11-15 13:51:32 +00:00
|
|
|
if 'summary' not in self and 'storyline' in self:
|
|
|
|
self['summary'] = self.pop('storyline')
|
2011-10-18 12:57:31 +00:00
|
|
|
if 'summary' in self:
|
2014-01-16 08:26:07 +00:00
|
|
|
if isinstance(self['summary'], list):
|
|
|
|
self['summary'] = self['summary'][0]
|
2019-11-15 13:51:32 +00:00
|
|
|
self['summary'] = strip_tags(self['summary'].split('</p')[0]).split(' Written by\n')[0].strip()
|
2023-02-03 17:28:49 +00:00
|
|
|
else:
|
|
|
|
|
|
|
|
try:
|
|
|
|
summary = metadata['props']['pageProps']['contentData']['entityMetadata']['plot']['plotText']['plainText']
|
|
|
|
self['summary'] = summary
|
|
|
|
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
2023-07-07 09:20:14 +00:00
|
|
|
#self['connections'] = movie_connections(self.get_page_data('movieconnections'))
|
|
|
|
self['connections'] = self._get_connections()
|
|
|
|
|
2023-02-03 17:28:49 +00:00
|
|
|
spec = tech_spec(self.get_page_data('technical'))
|
|
|
|
for key in spec:
|
|
|
|
if not self.get(key):
|
|
|
|
self[key] = spec[key]
|
2011-10-15 14:54:09 +00:00
|
|
|
|
2017-02-16 16:16:14 +00:00
|
|
|
if 'credits' in self:
|
|
|
|
credits = [
|
|
|
|
[
|
|
|
|
strip_tags(d[0].replace(' by', '')).strip(),
|
|
|
|
[
|
|
|
|
[
|
|
|
|
strip_tags(x[0]).strip(),
|
|
|
|
[t.strip().split(' (')[0].strip() for t in x[2].split(' / ')]
|
|
|
|
]
|
|
|
|
for x in
|
|
|
|
re.compile('<td class="name">(.*?)</td>.*?<td>(.*?)</td>.*?<td class="credit">(.*?)</td>', re.DOTALL).findall(d[1])
|
|
|
|
]
|
|
|
|
] for d in self['credits'] if d
|
|
|
|
]
|
|
|
|
credits = [c for c in credits if c[1]]
|
|
|
|
|
|
|
|
self['credits'] = []
|
2018-07-09 13:20:13 +00:00
|
|
|
self['lyricist'] = []
|
|
|
|
self['singer'] = []
|
2017-02-16 16:16:14 +00:00
|
|
|
for department, crew in credits:
|
|
|
|
department = department.replace('(in alphabetical order)', '').strip()
|
|
|
|
for c in crew:
|
2018-07-09 13:20:13 +00:00
|
|
|
name = c[0]
|
|
|
|
roles = c[1]
|
2017-02-16 16:16:14 +00:00
|
|
|
self['credits'].append({
|
2018-07-09 13:20:13 +00:00
|
|
|
'name': name,
|
|
|
|
'roles': roles,
|
2017-02-16 16:16:14 +00:00
|
|
|
'deparment': department
|
|
|
|
})
|
2018-07-09 13:20:13 +00:00
|
|
|
if department == 'Music Department':
|
|
|
|
if 'lyricist' in roles:
|
|
|
|
self['lyricist'].append(name)
|
|
|
|
if 'playback singer' in roles:
|
|
|
|
self['singer'].append(name)
|
2017-03-05 08:13:01 +00:00
|
|
|
if not self['credits']:
|
|
|
|
del self['credits']
|
2017-02-16 16:16:14 +00:00
|
|
|
|
2019-04-07 07:21:12 +00:00
|
|
|
if 'credits' in self:
|
|
|
|
for key, deparment in (
|
|
|
|
('director', 'Series Directed'),
|
|
|
|
('writer', 'Series Writing Credits'),
|
|
|
|
('cinematographer', 'Series Cinematography'),
|
|
|
|
):
|
|
|
|
if key not in self:
|
|
|
|
series_credit = [c for c in self['credits'] if c.get('deparment') == deparment]
|
|
|
|
if series_credit:
|
|
|
|
self[key] = [c['name'] for c in series_credit]
|
2022-04-18 21:59:16 +00:00
|
|
|
creator = []
|
|
|
|
for c in self.get('credits', []):
|
|
|
|
if '(created by)' in c['roles'] and c['name'] not in creator:
|
|
|
|
creator.append(c['name'])
|
2022-04-18 22:23:01 +00:00
|
|
|
if '(creator)' in c['roles'] and c['name'] not in creator:
|
|
|
|
creator.append(c['name'])
|
2022-04-18 21:59:16 +00:00
|
|
|
if creator:
|
|
|
|
self['creator'] = creator
|
|
|
|
|
|
|
|
if 'series' in self:
|
|
|
|
series = Imdb(self['series'], timeout=timeout)
|
|
|
|
self['seriesTitle'] = series['title']
|
|
|
|
if 'episodeTitle' in self:
|
|
|
|
self['seriesTitle'] = series['title']
|
|
|
|
if 'season' in self and 'episode' in self:
|
|
|
|
self['title'] = "%s (S%02dE%02d) %s" % (
|
|
|
|
self['seriesTitle'], self['season'], self['episode'], self['episodeTitle'])
|
|
|
|
else:
|
|
|
|
self['title'] = "%s (S01) %s" % (self['seriesTitle'], self['episodeTitle'])
|
|
|
|
self['season'] = 1
|
|
|
|
self['title'] = self['title'].strip()
|
|
|
|
if 'director' in self:
|
|
|
|
self['episodeDirector'] = self['director']
|
|
|
|
|
|
|
|
if 'creator' not in series and 'director' in series:
|
|
|
|
series['creator'] = series['director']
|
|
|
|
if len(series['creator']) > 10:
|
|
|
|
series['creator'] = series['director'][:1]
|
|
|
|
|
|
|
|
for key in ['creator', 'country']:
|
|
|
|
if key in series:
|
|
|
|
self[key] = series[key]
|
|
|
|
|
|
|
|
if 'year' in series:
|
|
|
|
self['seriesYear'] = series['year']
|
|
|
|
if 'year' not in self:
|
|
|
|
self['year'] = series['year']
|
|
|
|
|
|
|
|
if 'year' in self:
|
|
|
|
self['episodeYear'] = self['year']
|
|
|
|
if 'creator' in self:
|
|
|
|
self['seriesDirector'] = self['creator']
|
|
|
|
if 'originalTitle' in self:
|
|
|
|
del self['originalTitle']
|
|
|
|
else:
|
|
|
|
for key in ('seriesTitle', 'episodeTitle', 'season', 'episode'):
|
|
|
|
if key in self:
|
|
|
|
del self[key]
|
|
|
|
if 'creator' in self:
|
|
|
|
if 'director' in self:
|
|
|
|
self['episodeDirector'] = self['director']
|
|
|
|
self['director'] = self['creator']
|
|
|
|
|
2023-07-07 09:20:14 +00:00
|
|
|
def _get_connections(self):
|
|
|
|
query = '''query {
|
|
|
|
title(id: "tt%s") {
|
|
|
|
id
|
|
|
|
titleText {
|
|
|
|
text
|
|
|
|
}
|
|
|
|
connections(first: 5000) {
|
|
|
|
edges {
|
|
|
|
node {
|
|
|
|
associatedTitle {
|
|
|
|
id
|
|
|
|
titleText {
|
|
|
|
text
|
|
|
|
}
|
|
|
|
}
|
|
|
|
category {
|
|
|
|
text
|
|
|
|
}
|
|
|
|
text
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
''' % self._id
|
|
|
|
url = 'https://caching.graphql.imdb.com/'
|
|
|
|
headers = cache.DEFAULT_HEADERS.copy()
|
|
|
|
headers.update({
|
|
|
|
'Accept': 'application/graphql+json, application/json',
|
|
|
|
'Origin': 'https://www.imdb.com',
|
|
|
|
'Referer': 'https://www.imdb.com',
|
|
|
|
'x-imdb-user-country': 'US',
|
|
|
|
'x-imdb-user-language': 'en-US',
|
|
|
|
'content-type': 'application/json',
|
|
|
|
'Accept-Language': 'en,en-US;q=0.5'
|
|
|
|
})
|
|
|
|
#response = requests.post(url, json=
|
|
|
|
response = json.loads(read_url(url, data=json.dumps({
|
|
|
|
"query": query
|
|
|
|
}), headers=headers))
|
|
|
|
connections = {}
|
|
|
|
for c in response['data']['title']['connections']['edges']:
|
|
|
|
cat = c['node']['category']['text']
|
|
|
|
if cat not in connections:
|
|
|
|
connections[cat] = []
|
2023-07-07 10:00:34 +00:00
|
|
|
connection = {
|
2023-07-07 09:20:14 +00:00
|
|
|
'id': c['node']['associatedTitle']['id'][2:],
|
|
|
|
'title': c['node']['associatedTitle']['titleText']['text'],
|
2023-07-07 10:00:34 +00:00
|
|
|
}
|
|
|
|
description = c['node'].get('text', '')
|
|
|
|
if description:
|
|
|
|
connection['description'] = description
|
|
|
|
connections[cat].append(connection)
|
2023-07-07 09:20:14 +00:00
|
|
|
return connections
|
|
|
|
|
2019-04-07 07:21:12 +00:00
|
|
|
|
2010-07-18 18:24:36 +00:00
|
|
|
class ImdbCombined(Imdb):
|
|
|
|
def __init__(self, id, timeout=-1):
|
|
|
|
_regex = {}
|
|
|
|
for key in self.regex:
|
2018-01-14 17:24:29 +00:00
|
|
|
if self.regex[key]['page'] in ('releaseinfo', 'reference'):
|
2010-07-18 18:24:36 +00:00
|
|
|
_regex[key] = self.regex[key]
|
|
|
|
self.regex = _regex
|
|
|
|
super(ImdbCombined, self).__init__(id, timeout)
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_movie_by_title(title, timeout=-1):
|
2011-04-15 09:46:20 +00:00
|
|
|
'''
|
|
|
|
This only works for exact title matches from the data dump
|
|
|
|
Usually in the format
|
|
|
|
Title (Year)
|
|
|
|
"Series Title" (Year) {(#Season.Episode)}
|
|
|
|
"Series Title" (Year) {Episode Title (#Season.Episode)}
|
|
|
|
|
|
|
|
If there is more than one film with that title for the year
|
|
|
|
Title (Year/I)
|
|
|
|
|
2016-05-21 13:19:25 +00:00
|
|
|
>>> str(get_movie_by_title(u'"Father Knows Best" (1954) {(#5.34)}'))
|
|
|
|
'1602860'
|
|
|
|
|
|
|
|
>>> str(get_movie_by_title(u'The Matrix (1999)'))
|
|
|
|
'0133093'
|
2011-04-15 09:46:20 +00:00
|
|
|
|
2016-05-21 13:19:25 +00:00
|
|
|
>>> str(get_movie_by_title(u'Little Egypt (1951)'))
|
|
|
|
'0043748'
|
2011-04-15 09:46:20 +00:00
|
|
|
|
2016-05-21 13:19:25 +00:00
|
|
|
>>> str(get_movie_by_title(u'Little Egypt (1897/I)'))
|
|
|
|
'0214882'
|
2011-04-15 09:46:20 +00:00
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
>>> get_movie_by_title(u'Little Egypt')
|
2011-04-15 09:46:20 +00:00
|
|
|
None
|
|
|
|
|
2016-05-21 13:19:25 +00:00
|
|
|
>>> str(get_movie_by_title(u'"Dexter" (2006) {Father Knows Best (#1.9)}'))
|
|
|
|
'0866567'
|
2011-04-15 09:46:20 +00:00
|
|
|
'''
|
2016-05-21 13:19:25 +00:00
|
|
|
params = {'s': 'tt', 'q': title}
|
2014-09-30 19:04:46 +00:00
|
|
|
if not isinstance(title, bytes):
|
2011-04-15 09:46:20 +00:00
|
|
|
try:
|
|
|
|
params['q'] = unicodedata.normalize('NFKC', params['q']).encode('latin-1')
|
|
|
|
except:
|
|
|
|
params['q'] = params['q'].encode('utf-8')
|
2014-10-05 08:23:56 +00:00
|
|
|
params = urlencode(params)
|
2020-05-12 08:46:17 +00:00
|
|
|
url = "http://www.imdb.com/find?" + params
|
2012-08-14 13:58:05 +00:00
|
|
|
data = read_url(url, timeout=timeout, unicode=True)
|
2011-04-15 09:46:20 +00:00
|
|
|
#if search results in redirect, get id of current page
|
2024-07-08 12:33:07 +00:00
|
|
|
r = r'<meta property="og:url" content="http://www.imdb.com/title/tt(\d+)/" />'
|
2011-04-15 09:46:20 +00:00
|
|
|
results = re.compile(r).findall(data)
|
|
|
|
if results:
|
|
|
|
return results[0]
|
|
|
|
return None
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_movie_id(title, director='', year='', timeout=-1):
|
2010-07-18 18:57:22 +00:00
|
|
|
'''
|
2016-05-21 13:19:25 +00:00
|
|
|
>>> str(get_movie_id('The Matrix'))
|
|
|
|
'0133093'
|
2010-09-03 21:19:19 +00:00
|
|
|
|
2016-05-21 13:19:25 +00:00
|
|
|
>>> str(get_movie_id('2 or 3 Things I Know About Her', 'Jean-Luc Godard'))
|
|
|
|
'0060304'
|
2010-09-03 21:19:19 +00:00
|
|
|
|
2016-05-21 13:19:25 +00:00
|
|
|
>>> str(get_movie_id('2 or 3 Things I Know About Her', 'Jean-Luc Godard', '1967'))
|
|
|
|
'0060304'
|
2010-12-31 07:23:28 +00:00
|
|
|
|
2016-05-21 13:19:25 +00:00
|
|
|
>>> str(get_movie_id(u"Histoire(s) du cinema: Le controle de l'univers", u'Jean-Luc Godard'))
|
|
|
|
'0179214'
|
|
|
|
|
|
|
|
>>> str(get_movie_id(u"Histoire(s) du cinéma: Le contrôle de l'univers", u'Jean-Luc Godard'))
|
|
|
|
'0179214'
|
2010-12-31 07:23:28 +00:00
|
|
|
|
2010-07-18 18:57:22 +00:00
|
|
|
'''
|
2011-03-09 12:10:20 +00:00
|
|
|
imdbId = {
|
|
|
|
(u'Le jour se l\xe8ve', u'Marcel Carn\xe9'): '0031514',
|
|
|
|
(u'Wings', u'Larisa Shepitko'): '0061196',
|
|
|
|
(u'The Ascent', u'Larisa Shepitko'): '0075404',
|
|
|
|
(u'Fanny and Alexander', u'Ingmar Bergman'): '0083922',
|
|
|
|
(u'Torment', u'Alf Sj\xf6berg'): '0036914',
|
|
|
|
(u'Crisis', u'Ingmar Bergman'): '0038675',
|
|
|
|
(u'To Joy', u'Ingmar Bergman'): '0043048',
|
|
|
|
(u'Humain, trop humain', u'Louis Malle'): '0071635',
|
|
|
|
(u'Place de la R\xe9publique', u'Louis Malle'): '0071999',
|
|
|
|
(u'God\u2019s Country', u'Louis Malle'): '0091125',
|
2011-03-15 19:16:35 +00:00
|
|
|
(u'Flunky, Work Hard', u'Mikio Naruse'): '0022036',
|
|
|
|
(u'The Courtesans of Bombay', u'Richard Robbins') : '0163591',
|
|
|
|
(u'Je tu il elle', u'Chantal Akerman') : '0071690',
|
|
|
|
(u'Hotel Monterey', u'Chantal Akerman') : '0068725',
|
|
|
|
(u'No Blood Relation', u'Mikio Naruse') : '023261',
|
|
|
|
(u'Apart from You', u'Mikio Naruse') : '0024214',
|
|
|
|
(u'Every-Night Dreams', u'Mikio Naruse') : '0024793',
|
|
|
|
(u'Street Without End', u'Mikio Naruse') : '0025338',
|
|
|
|
(u'Sisters of the Gion', u'Kenji Mizoguchi') : '0027672',
|
|
|
|
(u'Osaka Elegy', u'Kenji Mizoguchi') : '0028021',
|
|
|
|
(u'Blaise Pascal', u'Roberto Rossellini') : '0066839',
|
|
|
|
(u'Japanese Girls at the Harbor', u'Hiroshi Shimizu') : '0160535',
|
|
|
|
(u'The Private Life of Don Juan', u'Alexander Korda') : '0025681',
|
|
|
|
(u'Last Holiday', u'Henry Cass') : '0042665',
|
|
|
|
(u'A Colt Is My Passport', u'Takashi Nomura') : '0330536',
|
|
|
|
(u'Androcles and the Lion', u'Chester Erskine') : '0044355',
|
|
|
|
(u'Major Barbara', u'Gabriel Pascal') : '0033868',
|
|
|
|
(u'Come On Children', u'Allan King') : '0269104',
|
2011-03-09 12:10:20 +00:00
|
|
|
|
2011-03-15 19:16:35 +00:00
|
|
|
(u'Jimi Plays Monterey & Shake! Otis at Monterey', u'D. A. Pennebaker and Chris Hegedus') : '',
|
|
|
|
(u'Martha Graham: Dance on Film', u'Nathan Kroll') : '',
|
2012-07-07 13:04:16 +00:00
|
|
|
(u'Carmen', u'Carlos Saura'): '0085297',
|
|
|
|
(u'The Story of a Cheat', u'Sacha Guitry'): '0028201',
|
|
|
|
(u'Weekend', 'Andrew Haigh'): '1714210',
|
2011-03-09 12:10:20 +00:00
|
|
|
}.get((title, director), None)
|
|
|
|
if imdbId:
|
|
|
|
return imdbId
|
2017-07-26 11:17:40 +00:00
|
|
|
params = {'s': 'tt', 'q': title}
|
2010-07-18 18:57:22 +00:00
|
|
|
if director:
|
2011-02-08 07:20:57 +00:00
|
|
|
params['q'] = u'"%s" %s' % (title, director)
|
2010-09-03 21:19:19 +00:00
|
|
|
if year:
|
2011-02-08 07:20:57 +00:00
|
|
|
params['q'] = u'"%s (%s)" %s' % (title, year, director)
|
2011-03-09 12:10:20 +00:00
|
|
|
google_query = "site:imdb.com %s" % params['q']
|
2014-09-30 19:04:46 +00:00
|
|
|
if not isinstance(params['q'], bytes):
|
2011-04-15 09:46:20 +00:00
|
|
|
try:
|
|
|
|
params['q'] = unicodedata.normalize('NFKC', params['q']).encode('latin-1')
|
|
|
|
except:
|
|
|
|
params['q'] = params['q'].encode('utf-8')
|
2014-10-05 08:23:56 +00:00
|
|
|
params = urlencode(params)
|
2020-05-12 08:46:17 +00:00
|
|
|
url = "http://www.imdb.com/find?" + params
|
2010-12-31 07:23:28 +00:00
|
|
|
#print url
|
|
|
|
|
2012-08-14 13:58:05 +00:00
|
|
|
data = read_url(url, timeout=timeout, unicode=True)
|
2010-12-31 07:23:28 +00:00
|
|
|
#if search results in redirect, get id of current page
|
2024-07-08 12:33:07 +00:00
|
|
|
r = r'<meta property="og:url" content="http://www.imdb.com/title/tt(\d+)/" />'
|
2010-12-31 07:23:28 +00:00
|
|
|
results = re.compile(r).findall(data)
|
|
|
|
if results:
|
|
|
|
return results[0]
|
|
|
|
#otherwise get first result
|
2024-07-08 12:33:07 +00:00
|
|
|
r = r'<td valign="top">.*?<a href="/title/tt(\d+)/"'
|
2011-03-09 12:10:20 +00:00
|
|
|
results = re.compile(r).findall(data)
|
2010-12-31 07:23:28 +00:00
|
|
|
if results:
|
|
|
|
return results[0]
|
2011-03-09 12:10:20 +00:00
|
|
|
|
2017-07-26 11:17:40 +00:00
|
|
|
#print((title, director), ": '',")
|
|
|
|
#print(google_query)
|
2013-06-14 10:17:18 +00:00
|
|
|
#results = google.find(google_query, timeout=timeout)
|
|
|
|
results = duckduckgo.find(google_query, timeout=timeout)
|
2011-03-09 12:10:20 +00:00
|
|
|
if results:
|
2013-06-14 10:17:18 +00:00
|
|
|
for r in results[:2]:
|
2024-07-08 12:33:07 +00:00
|
|
|
imdbId = find_re(r[1], r'title/tt(\d+)')
|
2013-06-14 10:17:18 +00:00
|
|
|
if imdbId:
|
|
|
|
return imdbId
|
2010-12-31 07:23:28 +00:00
|
|
|
#or nothing
|
2010-07-18 18:57:22 +00:00
|
|
|
return ''
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def get_movie_poster(imdbId):
|
2010-09-17 08:46:37 +00:00
|
|
|
'''
|
2012-08-15 15:15:40 +00:00
|
|
|
>>> get_movie_poster('0133093')
|
2010-09-17 08:46:37 +00:00
|
|
|
'http://ia.media-imdb.com/images/M/MV5BMjEzNjg1NTg2NV5BMl5BanBnXkFtZTYwNjY3MzQ5._V1._SX338_SY475_.jpg'
|
|
|
|
'''
|
2010-07-19 10:05:01 +00:00
|
|
|
info = ImdbCombined(imdbId)
|
2011-10-18 13:30:16 +00:00
|
|
|
if 'posterId' in info:
|
2017-05-03 13:02:58 +00:00
|
|
|
poster = info['posterId']
|
2017-05-03 13:11:01 +00:00
|
|
|
if '@._V' in poster:
|
|
|
|
poster = poster.split('@._V')[0] + '@.jpg'
|
2010-07-19 10:05:01 +00:00
|
|
|
return poster
|
2010-09-17 08:46:37 +00:00
|
|
|
elif 'series' in info:
|
2012-08-15 15:15:40 +00:00
|
|
|
return get_movie_poster(info['series'])
|
2010-07-19 10:05:01 +00:00
|
|
|
return ''
|
|
|
|
|
2012-08-26 13:27:16 +00:00
|
|
|
def get_episodes(imdbId, season=None):
|
|
|
|
episodes = {}
|
|
|
|
url = 'http://www.imdb.com/title/tt%s/episodes' % imdbId
|
|
|
|
if season:
|
|
|
|
url += '?season=%d' % season
|
2019-01-01 20:09:05 +00:00
|
|
|
data = cache.read_url(url).decode()
|
2024-07-08 12:33:07 +00:00
|
|
|
for e in re.compile(r'<div data-const="tt(\d+)".*?>.*?<div>S(\d+), Ep(\d+)<\/div>\n<\/div>', re.DOTALL).findall(data):
|
2017-08-02 14:48:22 +00:00
|
|
|
episodes['S%02dE%02d' % (int(e[1]), int(e[2]))] = e[0]
|
2012-08-26 13:27:16 +00:00
|
|
|
else:
|
2014-09-30 19:04:46 +00:00
|
|
|
data = cache.read_url(url)
|
2024-07-08 12:33:07 +00:00
|
|
|
match = re.compile(r'<strong>Season (\d+)</strong>').findall(data)
|
2012-08-26 13:27:16 +00:00
|
|
|
if match:
|
|
|
|
for season in range(1, int(match[0]) + 1):
|
|
|
|
episodes.update(get_episodes(imdbId, season))
|
|
|
|
return episodes
|
|
|
|
|
2012-08-15 15:15:40 +00:00
|
|
|
def max_votes():
|
2012-03-08 13:42:22 +00:00
|
|
|
url = 'http://www.imdb.com/search/title?num_votes=500000,&sort=num_votes,desc'
|
2016-09-07 13:06:52 +00:00
|
|
|
data = cache.read_url(url).decode('utf-8', 'ignore')
|
2016-09-07 12:44:42 +00:00
|
|
|
votes = max([
|
|
|
|
int(v.replace(',', ''))
|
2024-07-08 12:33:07 +00:00
|
|
|
for v in re.compile(r'Votes</span>.*?([\d,]+)', re.DOTALL).findall(data)
|
2016-09-07 12:44:42 +00:00
|
|
|
])
|
2012-03-08 12:57:11 +00:00
|
|
|
return votes
|
|
|
|
|
2010-12-31 07:23:28 +00:00
|
|
|
def guess(title, director='', timeout=-1):
|
2012-08-15 15:15:40 +00:00
|
|
|
return get_movie_id(title, director, timeout=timeout)
|
2010-07-07 23:25:57 +00:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
import json
|
2014-09-30 19:04:46 +00:00
|
|
|
print(json.dumps(Imdb('0306414'), indent=2))
|
2010-07-07 23:25:57 +00:00
|
|
|
#print json.dumps(Imdb('0133093'), indent=2)
|
|
|
|
|