2009-07-04 10:37:25 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2008-06-19 09:47:02 +00:00
|
|
|
# vi:si:et:sw=4:sts=4:ts=4
|
2009-07-04 10:37:25 +00:00
|
|
|
import re
|
|
|
|
import time
|
|
|
|
import urllib
|
|
|
|
import urllib2
|
|
|
|
import weakref
|
|
|
|
import threading
|
|
|
|
import Queue
|
|
|
|
import simplejson
|
|
|
|
|
|
|
|
|
2008-07-03 09:24:49 +00:00
|
|
|
import oxlib
|
|
|
|
from oxlib import stripTags
|
2008-04-28 09:52:21 +00:00
|
|
|
|
|
|
|
|
|
|
|
'''
|
|
|
|
usage:
|
|
|
|
import google
|
|
|
|
google.find(query)
|
|
|
|
|
|
|
|
for result in google.find(query): result
|
|
|
|
|
|
|
|
result is title, url, description
|
|
|
|
|
|
|
|
google.find(query, max_results)
|
|
|
|
|
2008-04-29 14:27:19 +00:00
|
|
|
FIXME: how search depper than first page?
|
2008-04-28 09:52:21 +00:00
|
|
|
'''
|
2009-07-04 10:37:25 +00:00
|
|
|
DEFAULT_MAX_RESULTS = 10
|
2009-08-19 21:00:31 +00:00
|
|
|
DEFAULT_TIMEOUT = 24*60*60
|
2008-04-28 09:52:21 +00:00
|
|
|
|
2009-08-19 21:00:31 +00:00
|
|
|
def getUrl(url, data=None, headers=oxlib.net.DEFAULT_HEADERS, timeout=DEFAULT_TIMEOUT):
|
|
|
|
return oxlib.cache.getUrl(url, data, headers, timeout)
|
2008-04-28 09:52:21 +00:00
|
|
|
|
2009-07-04 10:37:25 +00:00
|
|
|
def quote_plus(s):
|
|
|
|
return urllib.quote_plus(s.encode('utf-8'))
|
2008-04-28 09:52:21 +00:00
|
|
|
|
2009-08-19 21:00:31 +00:00
|
|
|
def find(query, max_results=DEFAULT_MAX_RESULTS, timeout=DEFAULT_TIMEOUT):
|
2008-06-19 09:47:02 +00:00
|
|
|
url = "http://www.google.com/search?q=%s" % quote_plus(query)
|
2009-08-19 21:00:31 +00:00
|
|
|
data = getUrl(url, timeout=timeout)
|
2009-07-04 10:37:25 +00:00
|
|
|
link_re = r'<a href="(?P<url>[^"]*?)" class=l.*?>(?P<name>.*?)</a>' + \
|
|
|
|
r'.*?(?:<br>|<table.*?>)' + \
|
|
|
|
r'(?P<desc>.*?)' + '(?:<font color=#008000>|<a)'
|
|
|
|
results = []
|
|
|
|
for match in re.compile(link_re, re.DOTALL).finditer(data):
|
|
|
|
(name, url, desc) = match.group('name', 'url', 'desc')
|
2008-06-19 09:47:02 +00:00
|
|
|
results.append((stripTags(name), url, stripTags(desc)))
|
|
|
|
if len(results) > max_results:
|
2009-07-04 10:37:25 +00:00
|
|
|
results = results[:max_results]
|
|
|
|
return results
|
|
|
|
|
|
|
|
def _find(query):
|
|
|
|
url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=%s' % quote_plus(query)
|
|
|
|
results = simplejson.loads(getUrlUnicode(url))['responseData']['results']
|
2008-06-19 09:47:02 +00:00
|
|
|
return results
|
2008-04-28 09:52:21 +00:00
|
|
|
|