filter results, return more results from TPB, filter HD content in filter
This commit is contained in:
parent
472c99240b
commit
ee5864a99d
3 changed files with 49 additions and 21 deletions
|
@ -15,9 +15,14 @@ def torrentsWeLike(link):
|
||||||
if word in text:
|
if word in text:
|
||||||
return False
|
return False
|
||||||
#no dubbed versions
|
#no dubbed versions
|
||||||
for word in ('italian', 'german', 'spanish', 'french'):
|
for word in ('italian', 'german', 'spanish', 'french', 'nl sub'):
|
||||||
if word in text:
|
if word in text:
|
||||||
return False
|
return False
|
||||||
|
#not blueray or hddvd version right now or even DVDRs
|
||||||
|
for word in ('chd', 'hd ', 'hd-', 'dvdr-', 'dvdr.', 'dvdr '):
|
||||||
|
if word in text:
|
||||||
|
return False
|
||||||
|
|
||||||
#only dvdrips or dvdscrs
|
#only dvdrips or dvdscrs
|
||||||
for word in ('dvdrip', 'dvdscr', 'dvd screener'):
|
for word in ('dvdrip', 'dvdscr', 'dvd screener'):
|
||||||
if word in text:
|
if word in text:
|
||||||
|
|
|
@ -12,7 +12,7 @@ from btutils import torrentsWeLike
|
||||||
|
|
||||||
socket.setdefaulttimeout(10.0)
|
socket.setdefaulttimeout(10.0)
|
||||||
|
|
||||||
def search(query):
|
def search(query, filterResult = False):
|
||||||
'''search for torrents on mininova
|
'''search for torrents on mininova
|
||||||
'''
|
'''
|
||||||
torrents = []
|
torrents = []
|
||||||
|
@ -21,9 +21,14 @@ def search(query):
|
||||||
soup = BeautifulSoup(page)
|
soup = BeautifulSoup(page)
|
||||||
for row in soup('tr'):
|
for row in soup('tr'):
|
||||||
links = row('a', {'href':re.compile('/tor')})
|
links = row('a', {'href':re.compile('/tor')})
|
||||||
if links and torrentsWeLike(links[0]):
|
if links:
|
||||||
torrent_url = "http://www.mininova.org%s" % links[0].get('href').replace('/tor', '/get')
|
torrent_url = "http://www.mininova.org%s" % links[0].get('href').replace('/tor', '/get')
|
||||||
|
if filterResult:
|
||||||
|
if torrentsWeLike(links[0]):
|
||||||
torrents.append(torrent_url)
|
torrents.append(torrent_url)
|
||||||
|
else:
|
||||||
|
torrents.append(torrent_url)
|
||||||
|
|
||||||
return torrents
|
return torrents
|
||||||
|
|
||||||
def searchByImdb(imdb):
|
def searchByImdb(imdb):
|
||||||
|
@ -33,8 +38,12 @@ def searchByImdb(imdb):
|
||||||
page = read_url("http://www.mininova.org/imdb/?imdb=%s" % imdb)
|
page = read_url("http://www.mininova.org/imdb/?imdb=%s" % imdb)
|
||||||
soup = BeautifulSoup(page)
|
soup = BeautifulSoup(page)
|
||||||
for row in soup('tr'):
|
for row in soup('tr'):
|
||||||
links = row('a', {'href':re.compile('/get')})
|
#filter private trackers
|
||||||
if links:
|
private_tracker = row('a', {'href':re.compile('/faq/#pt')})
|
||||||
torrent_url = "http://www.mininova.org%s" % links[0].get('href')
|
links = row('a', {'href':re.compile('/tor')})
|
||||||
|
if not private_tracker and links:
|
||||||
|
torrent = links[0]
|
||||||
|
if torrentsWeLike(unicode(torrent.contents[0])):
|
||||||
|
torrent_url = "http://www.mininova.org%s" % torrent.get('href').replace('/tor', '/get')
|
||||||
torrents.append(torrent_url)
|
torrents.append(torrent_url)
|
||||||
return torrents
|
return torrents
|
||||||
|
|
|
@ -8,6 +8,7 @@ from urllib import quote
|
||||||
|
|
||||||
from BeautifulSoup import BeautifulSoup
|
from BeautifulSoup import BeautifulSoup
|
||||||
|
|
||||||
|
from btutils import torrentsWeLike
|
||||||
from google import google
|
from google import google
|
||||||
from utils import read_url, read_url_utf8
|
from utils import read_url, read_url_utf8
|
||||||
|
|
||||||
|
@ -85,9 +86,17 @@ def get_episodes(id):
|
||||||
episodes = re.compile('<nobr><a href="(.*?)">(.*?)</a></nobr>').findall(data)
|
episodes = re.compile('<nobr><a href="(.*?)">(.*?)</a></nobr>').findall(data)
|
||||||
return episodes
|
return episodes
|
||||||
|
|
||||||
def search(query):
|
def search(query, filterResult = False):
|
||||||
torrents = []
|
torrents = []
|
||||||
url = "http://thepiratebay.org/search.php?video=on&q=%s" % quote(query)
|
next = ["http://thepiratebay.org/search/%s/0/3/200" % quote(query), ]
|
||||||
|
page_count = 1
|
||||||
|
while next and page_count < 4:
|
||||||
|
page_count += 1
|
||||||
|
url = next[0]
|
||||||
|
if not url.startswith('http'):
|
||||||
|
if not url.startswith('/'):
|
||||||
|
url = "/" + url
|
||||||
|
url = "http://thepiratebay.org" + url
|
||||||
page = read_url(url)
|
page = read_url(url)
|
||||||
soup = BeautifulSoup(page)
|
soup = BeautifulSoup(page)
|
||||||
for row in soup('tr'):
|
for row in soup('tr'):
|
||||||
|
@ -97,7 +106,12 @@ def search(query):
|
||||||
# 201 = Movies , 202 = Movie DVDR
|
# 201 = Movies , 202 = Movie DVDR
|
||||||
if torrentType in ['201']:
|
if torrentType in ['201']:
|
||||||
torrent = row.findAll('a', {'href':re.compile('.torrent$')})[0].get('href')
|
torrent = row.findAll('a', {'href':re.compile('.torrent$')})[0].get('href')
|
||||||
|
if filterResult:
|
||||||
|
if torrentsWeLike(torrent):
|
||||||
torrents.append(torrent)
|
torrents.append(torrent)
|
||||||
|
else:
|
||||||
|
torrents.append(torrent)
|
||||||
|
next = re.compile('<a.*?href="(.*?)".*?>.*?next.gif.*?</a>').findall(page)
|
||||||
return torrents
|
return torrents
|
||||||
|
|
||||||
def searchByImdb(imdb):
|
def searchByImdb(imdb):
|
||||||
|
|
Loading…
Reference in a new issue