2010-12-29 12:06:14 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
|
|
|
import re
|
|
|
|
import urllib
|
|
|
|
import ox
|
2012-08-14 14:12:43 +00:00
|
|
|
from ox import strip_tags, decode_html
|
2010-12-29 12:06:14 +00:00
|
|
|
from ox.utils import json
|
2012-08-14 13:58:05 +00:00
|
|
|
from ox.cache import read_url
|
2010-12-29 12:06:14 +00:00
|
|
|
|
|
|
|
|
|
|
|
def find(query, timeout=ox.cache.cache_timeout):
|
2010-12-31 07:23:24 +00:00
|
|
|
if isinstance(query, unicode):
|
|
|
|
query = query.encode('utf-8')
|
2010-12-31 07:32:22 +00:00
|
|
|
params = urllib.urlencode({'q': query})
|
2010-12-29 12:06:14 +00:00
|
|
|
url = 'http://duckduckgo.com/html/?' + params
|
2012-08-14 13:58:05 +00:00
|
|
|
data = read_url(url, timeout=timeout, unicode=True)
|
2010-12-29 12:06:14 +00:00
|
|
|
results = []
|
2013-06-01 10:25:20 +00:00
|
|
|
regex = '<a .*?class="large" href="(.+?)">(.*?)</a>.*?<div class="snippet">(.*?)</div>'
|
2010-12-29 12:06:14 +00:00
|
|
|
for r in re.compile(regex, re.DOTALL).findall(data):
|
2012-08-14 14:12:43 +00:00
|
|
|
results.append((strip_tags(decode_html(r[1])), r[0], strip_tags(decode_html(r[2]))))
|
2010-12-29 12:06:14 +00:00
|
|
|
return results
|
|
|
|
|