2010-12-29 12:06:14 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
|
|
|
import re
|
2014-09-30 19:04:46 +00:00
|
|
|
|
|
|
|
from six.moves import urllib
|
2010-12-29 12:06:14 +00:00
|
|
|
import ox
|
2012-08-14 14:12:43 +00:00
|
|
|
from ox import strip_tags, decode_html
|
2012-08-14 13:58:05 +00:00
|
|
|
from ox.cache import read_url
|
2017-07-26 11:18:10 +00:00
|
|
|
import lxml.html
|
2010-12-29 12:06:14 +00:00
|
|
|
|
|
|
|
|
|
|
|
def find(query, timeout=ox.cache.cache_timeout):
|
2017-07-26 11:18:10 +00:00
|
|
|
"""
|
|
|
|
Returns tuples with title, url, description
|
|
|
|
"""
|
2014-09-30 19:04:46 +00:00
|
|
|
if not isinstance(query, bytes):
|
2010-12-31 07:23:24 +00:00
|
|
|
query = query.encode('utf-8')
|
2014-09-30 19:04:46 +00:00
|
|
|
params = urllib.parse.urlencode({'q': query})
|
2010-12-29 12:06:14 +00:00
|
|
|
url = 'http://duckduckgo.com/html/?' + params
|
2013-06-01 11:21:13 +00:00
|
|
|
data = read_url(url, timeout=timeout).decode('utf-8')
|
2017-07-26 11:18:10 +00:00
|
|
|
doc = lxml.html.document_fromstring(data)
|
2010-12-29 12:06:14 +00:00
|
|
|
results = []
|
2017-07-26 11:18:10 +00:00
|
|
|
for e in doc.xpath("//a[contains(@class, 'result__a')]"):
|
|
|
|
url = e.attrib['href']
|
|
|
|
if 'uddg=' in url:
|
|
|
|
url = urllib.parse.unquote(url.split('&uddg=')[-1])
|
|
|
|
title = e.text_content()
|
|
|
|
description = ''
|
|
|
|
results.append((title, url, description))
|
2010-12-29 12:06:14 +00:00
|
|
|
return results
|