cache tpb search only for 24 hours

This commit is contained in:
j 2008-12-29 17:08:02 +05:30
parent 30afb98645
commit 5e224d3cdb
2 changed files with 5 additions and 4 deletions

2
README
View file

@ -2,7 +2,7 @@ python-oxweb the internet is a dict
Depends:
python2.5
python-oxutils
python-oxutils (bzr branch http//code.0xdb.org/python-oxutils)
python-beautifulsoup (http://www.crummy.com/software/BeautifulSoup/)
python-feedparser (http://www.feedparser.org/)
(there seam to be some issues if not using the one from ubuntu/debian)

View file

@ -13,6 +13,7 @@ import oxlib
from torrent import Torrent
cache_timeout = 24*60*60 # cache search only for 24 hours
season_episode = re.compile("S..E..", re.IGNORECASE)
@ -22,8 +23,8 @@ def _getUrl(url, data=None, headers=cache.DEFAULT_HEADERS, timeout=cache.cache_t
headers['Cookie'] = 'language=en_EN'
return cache.getUrl(url, data, headers, timeout)
def _getUrlUnicode(url):
return cache.getUrlUnicode(url, _getUrl=_getUrl)
def _getUrlUnicode(url, timeout=cache.cache_timeout):
return cache.getUrlUnicode(url, _getUrl=_getUrl, timeout=timeout)
def findMovies(query, max_results=10):
results = []
@ -36,7 +37,7 @@ def findMovies(query, max_results=10):
if not url.startswith('/'):
url = "/" + url
url = "http://thepiratebay.org" + url
data = _getUrlUnicode(url)
data = _getUrlUnicode(url, timeout=cache_timeout)
regexp = '''<tr.*?<td class="vertTh"><a href="/browse/(.*?)".*?<td><a href="(/torrent/.*?)" class="detLink".*?>(.*?)</a>.*?</tr>'''
for row in re.compile(regexp, re.DOTALL).findall(data):
torrentType = row[0]