diff --git a/ox/api.py b/ox/api.py index 51742c2..159aec7 100644 --- a/ox/api.py +++ b/ox/api.py @@ -201,7 +201,7 @@ class API(object): return False if data['status']['code'] != 200: print("request returned error, will try again in 5 seconds") - if DEBUG: + if self.DEBUG: print(data) time.sleep(5) if data and data.get('result') == 1: diff --git a/ox/cache.py b/ox/cache.py index 904f31d..b5f9a9e 100644 --- a/ox/cache.py +++ b/ox/cache.py @@ -16,6 +16,7 @@ from six import PY2 try: import requests USE_REQUESTS = True + requests_session = requests.Session() except: USE_REQUESTS = False @@ -101,7 +102,7 @@ def read_url(url, data=None, headers=None, timeout=cache_timeout, valid=None, un url_headers = {} if not result: if USE_REQUESTS: - r = requests.get(url, headers=headers) + r = requests_session.get(url, headers=headers) for key in r.headers: url_headers[key.lower()] = r.headers[key] result = r.content diff --git a/ox/file.py b/ox/file.py index ab789a3..ec9da4b 100644 --- a/ox/file.py +++ b/ox/file.py @@ -159,51 +159,10 @@ def avinfo(filename, cached=True): if os.path.getsize(filename): if find_executable('ffprobe'): return ffprobe(filename) - ffmpeg2theora = cmd('ffmpeg2theora') - p = subprocess.Popen([ffmpeg2theora], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, error = p.communicate() - stdout = stdout.decode('utf-8') - version = stdout.split('\n')[0].split(' - ')[0].split(' ')[-1] - if version < '0.27': - raise EnvironmentError('version of ffmpeg2theora needs to be 0.27 or later, found %s' % version) - p = subprocess.Popen([ffmpeg2theora, '--info', filename], - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, error = p.communicate() - stdout = stdout.decode('utf-8') - try: - info = json.loads(stdout) - except: - # remove metadata, can be broken - reg = re.compile('"metadata": {.*?},', re.DOTALL) - stdout = re.sub(reg, '', stdout) - info = json.loads(stdout) - if 'video' in info: - for v in info['video']: - if 'display_aspect_ratio' not in v and 'width' in v: - v['display_aspect_ratio'] = '%d:%d' % (v['width'], v['height']) - v['pixel_aspect_ratio'] = '1:1' - if len(info.get('audio', [])) > 1: - if 'metadata' in info['audio'][0]: - for stream in info['audio']: - language = stream.get('metadata', {}).get('language') - if language and language != 'und': - stream['language'] = language[0] - else: - ffmpeg = cmd('ffmpeg') - p = subprocess.Popen([ffmpeg, '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = p.communicate() - stderr = stderr.decode('utf-8') - languages = [re.compile('\((.+?)\):').findall(l) for l in stderr.split('\n') if 'Stream' in l and 'Audio' in l] - if len(languages) == len(info['audio']): - for i, stream in enumerate(info['audio']): - language = languages[i] - if language and language[0] != 'und': - stream['language'] = language[0] - fix_coverart(info) - return info - + raise EnvironmentError('could to find ffprobe. please install ffmpeg') return {'path': filename, 'size': 0} + def ffprobe(filename): p = subprocess.Popen([ cmd('ffprobe'), @@ -293,6 +252,22 @@ def ffprobe(filename): 'sample_aspect_ratio': 'pixel_aspect_ratio', }.get(key, key)] = fix_value(key, s[key]) info[s['codec_type']].append(stream) + elif s.get('codec_type') == 'subtitle': + info['subtitles'] = info.get('subtitles', []) + stream = {} + if language and language != 'und': + stream['language'] = language + for key in ( + 'codec_name', + 'language', + 'width', + 'height', + ): + if key in s: + stream[{ + 'codec_name': 'codec', + }.get(key, key)] = s[key] + info['subtitles'].append(stream) else: pass # print s diff --git a/ox/net.py b/ox/net.py index 2a5e71b..59e6abe 100644 --- a/ox/net.py +++ b/ox/net.py @@ -21,7 +21,7 @@ from chardet.universaldetector import UniversalDetector DEBUG = False # Default headers for HTTP requests. DEFAULT_HEADERS = { - 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.8,fr;q=0.6,de;q=0.4', diff --git a/ox/srt.py b/ox/srt.py index 5191a55..c29ae8b 100644 --- a/ox/srt.py +++ b/ox/srt.py @@ -63,10 +63,6 @@ def load(filename, offset=0): Returns list with dicts that have in, out, value and id ''' srt = [] - - def parse_time(t): - return offset + ox.time2ms(t.replace(',', '.')) / 1000 - with open(filename, 'rb') as f: encoding = _detect_encoding(f) data = f.read() @@ -77,7 +73,21 @@ def load(filename, offset=0): data = data.decode('latin-1') except: print("failed to detect encoding, giving up") - return srt + return [] + return loads(data, offset) + +def loads(data, offset=0): + '''Parses an srt file + + filename: path to an srt file + offset (float, seconds): shift all in/out points by offset + + Returns list with dicts that have in, out, value and id + ''' + srt = [] + + def parse_time(t): + return offset + ox.time2ms(t.replace(',', '.')) / 1000 data = data.replace('\r\n', '\n') if not data.endswith('\n\n'): diff --git a/ox/web/imdb.py b/ox/web/imdb.py index 4821b0c..fb109be 100644 --- a/ox/web/imdb.py +++ b/ox/web/imdb.py @@ -23,6 +23,8 @@ def prepare_url(url, data=None, headers=cache.DEFAULT_HEADERS, timeout=cache.cac headers = headers.copy() # https://webapps.stackexchange.com/questions/11003/how-can-i-disable-reconfigure-imdbs-automatic-geo-location-so-it-does-not-defau headers['X-Forwarded-For'] = '72.21.206.80' + headers['Accept-Language'] = 'en' + return url, data, headers, timeout, unicode def read_url(url, data=None, headers=cache.DEFAULT_HEADERS, timeout=cache.cache_timeout, valid=None, unicode=False): @@ -34,7 +36,7 @@ def delete_url(url, data=None, headers=cache.DEFAULT_HEADERS): cache.store.delete(url, data, headers) def get_url(id): - return "http://www.imdb.com/title/tt%s/" % id + return "http://akas.imdb.com/title/tt%s/" % id def reference_section(id): @@ -124,8 +126,8 @@ class Imdb(SiteParser): 'alternativeTitles': { 'page': 'releaseinfo', 're': [ - ']*?id="akas"[^>]*?>(.*?)', - "td>(.*?).*?(.*?)" + ']*?id="akas"[^>]*?>(.*?)', + "td[^>]*?>(.*?).*?]*?>(.*?)" ], 'type': 'list' }, @@ -199,6 +201,11 @@ class Imdb(SiteParser): 'summary': zebra_table('Plot Summary', more=[ '

(.*?)Storyline.*?

(.*?)

', + 'type': 'string' + }, 'posterId': { 'page': 'reference', 're': '', @@ -267,7 +274,7 @@ class Imdb(SiteParser): }, 'series': { 'page': 'reference', - 're': '

.*?.*?(.*?)').findall(data) + #cc[rel] = re.compile('(.*?)').findall(data) def get_conn(c): r = { 'id': c[0], @@ -432,7 +442,7 @@ class Imdb(SiteParser): if len(description) == 2 and description[-1].strip() != '-': r['description'] = description[-1].strip() return r - cc[rel] = list(map(get_conn, re.compile('(.*?)(.*?)<\/div', re.DOTALL).findall(data))) + cc[rel] = list(map(get_conn, re.compile('(.*?)(.*?)<\/div', re.DOTALL).findall(data))) self['connections'] = cc @@ -517,10 +527,13 @@ class Imdb(SiteParser): ]) if self['releasedate'] == 'x': del self['releasedate'] + + if 'summary' not in self and 'storyline' in self: + self['summary'] = self.pop('storyline') if 'summary' in self: if isinstance(self['summary'], list): self['summary'] = self['summary'][0] - self['summary'] = self['summary'].split('.*?
S(\d+), Ep(\d+)<\/div>\n<\/div>', re.DOTALL).findall(data): + for e in re.compile('
.*?
S(\d+), Ep(\d+)<\/div>\n<\/div>', re.DOTALL).findall(data): episodes['S%02dE%02d' % (int(e[1]), int(e[2]))] = e[0] else: data = cache.read_url(url) diff --git a/ox/web/piratecinema.py b/ox/web/piratecinema.py index 4ed946b..c452f04 100644 --- a/ox/web/piratecinema.py +++ b/ox/web/piratecinema.py @@ -8,7 +8,7 @@ from ox.net import read_url def get_poster_url(id): url = 'http://piratecinema.org/posters/' html = read_url(url).decode('utf-8') - results = re.compile('src="(.+)" title=".+\((\d{7})\)"').findall(html) + results = re.compile('src="(.+)" title=".+\((\d{6}\d+)\)"').findall(html) for result in results: if result[1] == id: return url + result[0] diff --git a/ox/web/siteparser.py b/ox/web/siteparser.py index 61a79bd..8c212bf 100644 --- a/ox/web/siteparser.py +++ b/ox/web/siteparser.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 import re +from multiprocessing.pool import ThreadPool from six import string_types @@ -28,6 +29,7 @@ def cleanup(key, data, data_type): class SiteParser(dict): baseUrl = '' regex = {} + pool = ThreadPool(8) def get_url(self, page): return "%s%s" % (self.baseUrl, page) @@ -39,6 +41,9 @@ class SiteParser(dict): def __init__(self, timeout=-1): self._cache = {} + urls = list(set(self.get_url(self.regex[key]['page']) for key in self.regex)) + self.pool.map(self.get_url, urls) + for key in self.regex: url = self.get_url(self.regex[key]['page']) data = self.read_url(url, timeout) diff --git a/ox/web/wikipedia.py b/ox/web/wikipedia.py index cb73758..de8b064 100644 --- a/ox/web/wikipedia.py +++ b/ox/web/wikipedia.py @@ -17,7 +17,7 @@ def get_id(url): def get_url(id=None, imdb=None, allmovie=None): if imdb: - query = '"%s"'% imdb + query = '"%s"' % imdb result = find(query) if result: url = result[0][1] @@ -26,7 +26,7 @@ def get_url(id=None, imdb=None, allmovie=None): return url return "" if allmovie: - query = '"amg_id = 1:%s"'% allmovie + query = '"amg_id = 1:%s"' % allmovie result = find(query) if result: url = result[0][1] @@ -140,7 +140,7 @@ def get_allmovie_id(wikipedia_url): return data.get('amg_id', '') def find(query, max_results=10): - query = {'action': 'query', 'list':'search', 'format': 'json', + query = {'action': 'query', 'list': 'search', 'format': 'json', 'srlimit': max_results, 'srwhat': 'text', 'srsearch': query.encode('utf-8')} url = "http://en.wikipedia.org/w/api.php?" + urllib.parse.urlencode(query) data = read_url(url) diff --git a/requirements.txt b/requirements.txt index b7509ec..51c3f99 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ chardet six>=1.5.2 +lxml