use copy of default headers before changing it, user ~/.ox/auth.json for cookies and other user credentials

This commit is contained in:
j 2009-08-21 15:04:28 +02:00
parent 9a6b6c8c49
commit 5109243163
4 changed files with 41 additions and 5 deletions

9
README
View file

@ -7,5 +7,14 @@ Depends:
python-feedparser (http://www.feedparser.org/) python-feedparser (http://www.feedparser.org/)
(there seam to be some issues if not using the one from ubuntu/debian) (there seam to be some issues if not using the one from ubuntu/debian)
Install:
python setup.py install
some modules require user accont information or cookies to work,
those are saved in ~/.ox/auth.json, most basic form looks like this:
{
"key": "value"
}
Test: Test:
nosetests --with-doctest oxweb nosetests --with-doctest oxweb

22
oxweb/auth.py Normal file
View file

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
# GPL 2009
import os
import simplejson
import oxlib
def get(key):
user_auth = os.environ.get('oxAUTH', os.path.expanduser('~/.ox/auth.json'))
auth = {}
if os.path.exists(user_auth):
f = open(user_auth, "r")
data = f.read()
f.close()
auth = simplejson.loads(data)
if key in auth:
return auth[key]
print "please add key %s to json file '%s'" % (key, user_auth)
return ""

View file

@ -1,11 +1,17 @@
import re import re
from oxlib.cache import getUrlUnicode, DEFAULT_HEADERS from oxlib import cache
from oxlib.html import stripTags from oxlib.html import stripTags
from oxlib.text import findRe from oxlib.text import findRe
import auth
headers = DEFAULT_HEADERS
headers["Cookie"] = "uid=9829; pass=cd08329f960450b32218bd73a39f90f1" def _getUrl(url, data=None, headers=cache.DEFAULT_HEADERS.copy(), timeout=cache.cache_timeout, valid=None):
headers["Cookie"] = auth.get("karagarga.cookie")
return cache.getUrl(url, data, headers, timeout)
def getUrlUnicode(url, timeout=cache.cache_timeout):
return cache.getUrlUnicode(url, _getUrl=_getUrl, timeout=timeout)
def getData(id): def getData(id):
data = { data = {

View file

@ -18,8 +18,7 @@ cache_timeout = 24*60*60 # cache search only for 24 hours
season_episode = re.compile("S..E..", re.IGNORECASE) season_episode = re.compile("S..E..", re.IGNORECASE)
def _getUrl(url, data=None, headers=cache.DEFAULT_HEADERS, timeout=cache.cache_timeout, valid=None): def _getUrl(url, data=None, headers=cache.DEFAULT_HEADERS.copy(), timeout=cache.cache_timeout, valid=None):
headers = cache.DEFAULT_HEADERS
headers['Cookie'] = 'language=en_EN' headers['Cookie'] = 'language=en_EN'
return cache.getUrl(url, data, headers, timeout) return cache.getUrl(url, data, headers, timeout)