From 3ed213d6d7ac196d977247a290595dbafadfc822 Mon Sep 17 00:00:00 2001 From: j <0x006A@0x2620.org> Date: Tue, 3 Nov 2015 23:16:34 +0100 Subject: [PATCH] update crawler --- ox/web/amazon.py | 2 +- ox/web/thepiratebay.py | 10 +-- ox/web/ubu.py | 163 ++++++++++++++++++++++------------------- 3 files changed, 95 insertions(+), 80 deletions(-) diff --git a/ox/web/amazon.py b/ox/web/amazon.py index 920fe89..19a72c7 100644 --- a/ox/web/amazon.py +++ b/ox/web/amazon.py @@ -7,7 +7,7 @@ from six.moves.urllib.parse import quote from ox import find_re, strip_tags, decode_html from ox.cache import read_url -import lxml +import lxml.html def findISBN(title, author): diff --git a/ox/web/thepiratebay.py b/ox/web/thepiratebay.py index 7002ebc..125ce7d 100644 --- a/ox/web/thepiratebay.py +++ b/ox/web/thepiratebay.py @@ -25,7 +25,7 @@ def find_movies(query=None, imdb=None, max_results=10): if imdb: query = "tt" + normalize_imdbid(imdb) results = [] - next = ["http://thepiratebay.org/search/%s/0/3/200" % quote(query), ] + next = ["https://thepiratebay.se/search/%s/0/3/200" % quote(query), ] page_count = 1 while next and page_count < 4: page_count += 1 @@ -33,12 +33,12 @@ def find_movies(query=None, imdb=None, max_results=10): if not url.startswith('http'): if not url.startswith('/'): url = "/" + url - url = "http://thepiratebay.org" + url + url = "https://thepiratebay.se" + url data = read_url(url, timeout=cache_timeout, unicode=True) regexp = '''(.*?).*?''' for row in re.compile(regexp, re.DOTALL).findall(data): torrentType = row[0] - torrentLink = "http://thepiratebay.org" + row[1] + torrentLink = "https://thepiratebay.se" + row[1] torrentTitle = decode_html(row[2]) # 201 = Movies , 202 = Movie DVDR, 205 TV Shows if torrentType in ['201']: @@ -61,7 +61,7 @@ def get_id(piratebayId): def exists(piratebayId): piratebayId = get_id(piratebayId) - return ox.net.exists("http://thepiratebay.org/torrent/%s" % piratebayId) + return ox.net.exists("https://thepiratebay.se/torrent/%s" % piratebayId) def get_data(piratebayId): _key_map = { @@ -75,7 +75,7 @@ def get_data(piratebayId): torrent = dict() torrent[u'id'] = piratebayId torrent[u'domain'] = 'thepiratebay.org' - torrent[u'comment_link'] = 'http://thepiratebay.org/torrent/%s' % piratebayId + torrent[u'comment_link'] = 'https://thepiratebay.se/torrent/%s' % piratebayId data = read_url(torrent['comment_link'], unicode=True) torrent[u'title'] = find_re(data, '(.*?) \(download torrent\) - TPB') diff --git a/ox/web/ubu.py b/ox/web/ubu.py index ba05751..2d532f1 100644 --- a/ox/web/ubu.py +++ b/ox/web/ubu.py @@ -24,86 +24,92 @@ def get_data(url): 'url': url, 'type': re.compile('ubu.com/(.*?)/').findall(url)[0] } - for videourl, title in re.compile('href="(http://ubumexico.centro.org.mx/.*?)">(.*?)').findall(data): - if videourl.endswith('.srt'): - m['srt'] = videourl - elif not 'video' in m: - m['video'] = videourl - m['video'] = m['video'].replace('/video/ ', '/video/').replace(' ', '%20') - if m['video'] == 'http://ubumexico.centro.org.mx/video/': - del m['video'] - if not 'title' in m: - m['title'] = strip_tags(decode_html(title)).strip() - if not 'url' in m: - print(url, 'missing') - if 'title' in m: - m['title'] = re.sub('(.*?) \(\d{4}\)$', '\\1', m['title']) - - if not 'title' in m: - match = re.compile('(.*?)').findall(data) - if match: - m['title'] = strip_tags(decode_html(match[0])).strip() - if not 'title' in m: - match = re.compile(".*?&(.*?)", re.DOTALL).findall(data) - if match: - m['title'] = re.sub('\s+', ' ', match[0]).strip() - if ' - ' in m['title']: - m['title'] = m['title'].split(' - ', 1)[-1] - if 'title' in m: - m['title'] = strip_tags(decode_html(m['title']).strip()) - match = re.compile("flashvars','file=(.*?.flv)'").findall(data) - if match: - m['flv'] = match[0] - m['flv'] = m['flv'].replace('/video/ ', '/video/').replace(' ', '%20') - - match = re.compile('''src=(.*?) type="video/mp4"''').findall(data) - if match: - m['mp4'] = match[0].strip('"').strip("'").replace(' ', '%20') - if not m['mp4'].startswith('http'): - m['mp4'] = 'http://ubumexico.centro.org.mx/video/' + m['mp4'] - elif 'video' in m and (m['video'].endswith('.mp4') or m['video'].endswith('.m4v')): - m['mp4'] = m['video'] - - doc = lxml.html.document_fromstring(read_url(url)) - desc = doc.xpath("//div[contains(@id, 'ubudesc')]") - if len(desc): - txt = [] - for part in desc[0].text_content().split('\n\n'): - if part == 'RESOURCES:': - break - if part.strip(): - txt.append(part) - if txt: - if len(txt) > 1 and txt[0].strip() == m.get('title'): - txt = txt[1:] - m['description'] = '\n\n'.join(txt).split('RESOURCES')[0].split('RELATED')[0].strip() - y = re.compile('\((\d{4})\)').findall(data) - if y: - m['year'] = int(y[0]) - d = re.compile('Director: (.+)').findall(data) - if d: - m['director'] = strip_tags(decode_html(d[0])).strip() - - a = re.compile('Back to (.*?)', re.DOTALL).findall(data) - if a: - m['artist'] = strip_tags(decode_html(a[0][1])).strip() + if m['type'] == 'sound': + m['tracks'] = [{ + 'title': strip_tags(decode_html(t[1])).strip(), + 'url': t[0] + } for t in re.compile('"(http.*?.mp3)"[^>]*>(.+)(.*?) in UbuWeb Film').findall(data) + for videourl, title in re.compile('href="(http://ubumexico.centro.org.mx/.*?)">(.*?)').findall(data): + if videourl.endswith('.srt'): + m['srt'] = videourl + elif not 'video' in m: + m['video'] = videourl + m['video'] = m['video'].replace('/video/ ', '/video/').replace(' ', '%20') + if m['video'] == 'http://ubumexico.centro.org.mx/video/': + del m['video'] + if not 'title' in m: + m['title'] = strip_tags(decode_html(title)).strip() + if not 'url' in m: + print(url, 'missing') + if 'title' in m: + m['title'] = re.sub('(.*?) \(\d{4}\)$', '\\1', m['title']) + + if not 'title' in m: + match = re.compile('(.*?)').findall(data) + if match: + m['title'] = strip_tags(decode_html(match[0])).strip() + if not 'title' in m: + match = re.compile(".*?&(.*?)", re.DOTALL).findall(data) + if match: + m['title'] = re.sub('\s+', ' ', match[0]).strip() + if ' - ' in m['title']: + m['title'] = m['title'].split(' - ', 1)[-1] + if 'title' in m: + m['title'] = strip_tags(decode_html(m['title']).strip()) + match = re.compile("flashvars','file=(.*?.flv)'").findall(data) + if match: + m['flv'] = match[0] + m['flv'] = m['flv'].replace('/video/ ', '/video/').replace(' ', '%20') + + match = re.compile('''src=(.*?) type="video/mp4"''').findall(data) + if match: + m['mp4'] = match[0].strip('"').strip("'").replace(' ', '%20') + if not m['mp4'].startswith('http'): + m['mp4'] = 'http://ubumexico.centro.org.mx/video/' + m['mp4'] + elif 'video' in m and (m['video'].endswith('.mp4') or m['video'].endswith('.m4v')): + m['mp4'] = m['video'] + + doc = lxml.html.document_fromstring(read_url(url)) + desc = doc.xpath("//div[contains(@id, 'ubudesc')]") + if len(desc): + txt = [] + for part in desc[0].text_content().split('\n\n'): + if part == 'RESOURCES:': + break + if part.strip(): + txt.append(part) + if txt: + if len(txt) > 1 and txt[0].strip() == m.get('title'): + txt = txt[1:] + m['description'] = '\n\n'.join(txt).split('RESOURCES')[0].split('RELATED')[0].strip() + y = re.compile('\((\d{4})\)').findall(data) + if y: + m['year'] = int(y[0]) + d = re.compile('Director: (.+)').findall(data) + if d: + m['director'] = strip_tags(decode_html(d[0])).strip() + + a = re.compile('Back to (.*?)', re.DOTALL).findall(data) if a: m['artist'] = strip_tags(decode_html(a[0][1])).strip() else: - a = re.compile('(.*?)\(b\..*?\d{4}\)').findall(data) + a = re.compile('(.*?) in UbuWeb Film').findall(data) if a: - m['artist'] = strip_tags(decode_html(a[0])).strip() - elif m['id'] == 'film/lawder_color': - m['artist'] = 'Standish Lawder' + m['artist'] = strip_tags(decode_html(a[0][1])).strip() + else: + a = re.compile('(.*?)\(b\..*?\d{4}\)').findall(data) + if a: + m['artist'] = strip_tags(decode_html(a[0])).strip() + elif m['id'] == 'film/lawder_color': + m['artist'] = 'Standish Lawder' - if 'artist' in m: - m['artist'] = m['artist'].replace('in UbuWeb Film', '') - m['artist'] = m['artist'].replace('on UbuWeb Film', '').strip() - if m['id'] == 'film/coulibeuf': - m['title'] = 'Balkan Baroque' - m['year'] = 1999 + if 'artist' in m: + m['artist'] = m['artist'].replace('in UbuWeb Film', '') + m['artist'] = m['artist'].replace('on UbuWeb Film', '').strip() + if m['id'] == 'film/coulibeuf': + m['title'] = 'Balkan Baroque' + m['year'] = 1999 return m def get_films(): @@ -135,3 +141,12 @@ def get_ids(): ids.append(u) ids = [get_id(url) for url in list(set(ids))] return ids + +def get_sound_ids(): + data = read_url('http://www.ubu.com/sound/') + ids = [] + for url, author in re.compile('(.*?)').findall(data): + url = 'http://www.ubu.com/sound' + url[1:] + ids.append(url) + ids = [get_id(url) for url in sorted(set(ids))] + return ids