2013-10-11 17:28:32 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
|
|
|
import re
|
2014-09-30 16:15:32 +00:00
|
|
|
|
|
|
|
from six.moves import urllib
|
2013-10-11 17:28:32 +00:00
|
|
|
import ox
|
|
|
|
from ox import strip_tags, decode_html
|
|
|
|
from ox.cache import read_url
|
2018-12-15 00:08:54 +00:00
|
|
|
import lxml.html
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
def find(query, timeout=ox.cache.cache_timeout):
|
2018-12-15 00:08:54 +00:00
|
|
|
"""
|
|
|
|
Returns tuples with title, url, description
|
|
|
|
"""
|
2014-09-30 16:15:32 +00:00
|
|
|
if not isinstance(query, bytes):
|
2013-10-11 17:28:32 +00:00
|
|
|
query = query.encode('utf-8')
|
2014-09-30 16:15:32 +00:00
|
|
|
params = urllib.parse.urlencode({'q': query})
|
2013-10-11 17:28:32 +00:00
|
|
|
url = 'http://duckduckgo.com/html/?' + params
|
|
|
|
data = read_url(url, timeout=timeout).decode('utf-8')
|
2018-12-15 00:08:54 +00:00
|
|
|
doc = lxml.html.document_fromstring(data)
|
2013-10-11 17:28:32 +00:00
|
|
|
results = []
|
2018-12-15 00:08:54 +00:00
|
|
|
for e in doc.xpath("//a[contains(@class, 'result__a')]"):
|
|
|
|
url = e.attrib['href']
|
|
|
|
if 'uddg=' in url:
|
|
|
|
url = urllib.parse.unquote(url.split('&uddg=')[-1])
|
|
|
|
title = e.text_content()
|
|
|
|
description = ''
|
|
|
|
results.append((title, url, description))
|
2013-10-11 17:28:32 +00:00
|
|
|
return results
|