spider now also collects js/css files and puts the in the template

This commit is contained in:
j 2007-04-03 13:18:22 +00:00
parent 2a6ec2987c
commit 036f03a265
6 changed files with 90 additions and 30 deletions

View file

@ -5,7 +5,7 @@
# oilarchive/config/app.cfg # oilarchive/config/app.cfg
# DATABASE # DATABASE
sqlobject.dburi="notrans_mysql://root@localhost/oil" sqlobject.dburi="notrans_mysql://root@localhost/oil?sqlobject_encoding=utf-8&use_unicode=1&charset=utf8"
# SERVER # SERVER

View file

@ -1,6 +1,7 @@
# -*- Mode: Python; -*- # -*- Mode: Python; -*-
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# vi:si:et:sw=2:sts=2:ts=2 # vi:si:et:sw=2:sts=2:ts=2
import time
from turbogears import controllers, expose, validate, error_handler from turbogears import controllers, expose, validate, error_handler
from model import * from model import *
@ -99,10 +100,38 @@ class Api:
sname = sortname(name) sname = sortname(name)
return dict(sortname = sname, name = name) return dict(sortname = sname, name = name)
class ArchiveJavascript:
@expose()
def default(self, name):
name = name.split('.')[0]
archive = Archive.byHashId(name)
response.headerMap['Content-Type'] = "application/x-javascript"
secs = 60*60*24*30
secs = 60
expires = cherrypy.lib.httptools.HTTPDate(time.gmtime(time.mktime(time.gmtime()) + secs))
cherrypy.response.headerMap["Expires"] = expires
return archive.js
class ArchiveStyleSheet:
@expose()
def default(self, name):
name = name.split('.')[0]
archive = Archive.byHashId(name)
response.headerMap['Content-Type'] = "text/css"
secs = 60*60*24*30
secs = 60
expires = cherrypy.lib.httptools.HTTPDate(time.gmtime(time.mktime(time.gmtime()) + secs))
cherrypy.response.headerMap["Expires"] = expires
return archive.css
class Root(controllers.RootController): class Root(controllers.RootController):
view = View() view = View()
admin = Admin() admin = Admin()
api = Api() api = Api()
js = ArchiveJavascript()
css = ArchiveStyleSheet()
@expose(template=".templates.login") @expose(template=".templates.login")
def login(self, forward_url=None, previous_url=None, *args, **kw): def login(self, forward_url=None, previous_url=None, *args, **kw):

View file

@ -5,6 +5,7 @@
from datetime import datetime from datetime import datetime
import time import time
from urllib import quote from urllib import quote
import urlparse
import md5 import md5
from turbogears.database import PackageHub from turbogears.database import PackageHub
@ -118,8 +119,8 @@ class ArchiveItem(SQLObject):
self.updateHashID() self.updateHashID()
def updateHashID(self): def updateHashID(self):
salt = '%s/%s/%s' % (self.archive.archiveName, self.author, self.title) salt = u'%s/%s' % (self.archive.archiveName, self.archiveItemId)
self.hashID = md5.new(salt).hexdigest() self.hashID = md5.new(salt.encode('utf-8')).hexdigest()
class Archive(SQLObject): class Archive(SQLObject):
@ -130,10 +131,17 @@ class Archive(SQLObject):
pubDate = DateTimeCol(default=datetime.now) pubDate = DateTimeCol(default=datetime.now)
modDate = DateTimeCol(default=datetime.now) modDate = DateTimeCol(default=datetime.now)
created = DateTimeCol(default=datetime.now) created = DateTimeCol(default=datetime.now)
initialized = BoolCol(default = False)
def _get_pubDateTimestamp(self): css = UnicodeCol(default='')
return int(time.mktime(self.pubDate.timetuple())) js = UnicodeCol(default='')
hashId = UnicodeCol(alternateID = True, length=128)
def _get_pubDateTimestamp(self):
if self.initialized:
return int(time.mktime(self.pubDate.timetuple()))
return -1
def _query_url(self, query): def _query_url(self, query):
url = "%s?" % self.archiveUrl url = "%s?" % self.archiveUrl
url += "&".join(["%s=%s" % (key, quote("%s" % query[key])) for key in query]) url += "&".join(["%s=%s" % (key, quote("%s" % query[key])) for key in query])
@ -142,6 +150,9 @@ class Archive(SQLObject):
def _get_update_url(self): def _get_update_url(self):
return self._query_url({'modDate': self.pubDateTimestamp}) return self._query_url({'modDate': self.pubDateTimestamp})
def _get_files_url(self):
return self._query_url({'files': '1'})
def data_url(self, id): def data_url(self, id):
return self._query_url({'id': id}) return self._query_url({'id': id})
@ -149,14 +160,26 @@ class Archive(SQLObject):
if url.find('://') > 0: if url.find('://') > 0:
return url return url
if url.startswith('/'): if url.startswith('/'):
url = "%s/%s" % (self.archiveUrl.split('/')[0], url) domain = "://".join(urlparse.urlsplit(self.archiveUrl)[0:2])
url = "%s%s" % (domain, url)
else: else:
url = "%s/%s" % (self.archiveUrl, url) url = "%s/%s" % (self.archiveUrl, url)
return url
def update(self): def update(self):
result = simplejson.loads(read_url(self.files_url))
if result.has_key('css'):
self.css = read_url(self.full_url(result['css']))
else:
self.css = ''
if result.has_key('js'):
self.js = read_url(self.full_url(result['js']))
else:
self.js = ''
result = simplejson.loads(read_url(self.update_url)) result = simplejson.loads(read_url(self.update_url))
items = result.get('items', []) items = result.get('items', [])
for id in items: for id in items:
print "updating / adding ", id
data = jsonLoadArchiveItem(read_url(self.data_url(id))) data = jsonLoadArchiveItem(read_url(self.data_url(id)))
q = ArchiveItem.select(AND( q = ArchiveItem.select(AND(
ArchiveItem.q.archiveItemId == id, ArchiveItem.q.archiveItemId == id,
@ -165,6 +188,7 @@ class Archive(SQLObject):
jsonImportArchiveItem(self, id, data) jsonImportArchiveItem(self, id, data)
else: else:
q[0].update(data) q[0].update(data)
self.initialized = True
''' '''
get list of all items from archive and remove those from ArchiveItem that get list of all items from archive and remove those from ArchiveItem that
@ -174,9 +198,11 @@ class Archive(SQLObject):
url = self._query_url({'modDate': -1}) url = self._query_url({'modDate': -1})
result = simplejson.loads(read_url(url)) result = simplejson.loads(read_url(url))
archiveItems = result.get('items', []) archiveItems = result.get('items', [])
archivedItems = [i.archiveItemId for i in ArchiveItem.select(ArchiveItem.q.archiveID == self.id)] archivedItems = {}
removeItems = filter(lambda i: i not in archiveItems, archivedItems) for i in ArchiveItem.select(ArchiveItem.q.archiveID == self.id):
for i in removeItems: ArchiveItem.delete(i) archivedItems[i.archiveItemId] = i.id
removeItems = filter(lambda i: i not in archiveItems, archivedItems.keys())
for i in removeItems: ArchiveItem.delete(archivedItems[i])
class SortName(SQLObject): class SortName(SQLObject):
name =UnicodeCol(length=1000, alternateID=True) name =UnicodeCol(length=1000, alternateID=True)

View file

@ -13,6 +13,10 @@ import md5
def jsonLoadArchiveItem(data): def jsonLoadArchiveItem(data):
json_array = simplejson.loads(data) json_array = simplejson.loads(data)
json_array.pop('tg_flash', None) json_array.pop('tg_flash', None)
if json_array.has_key('archiveURL'):
json_array['archiveUrl'] = json_array.pop('archiveURL')
if json_array.has_key('downloadURL'):
json_array['downloadUrl'] = json_array.pop('downloadURL')
for key in ('relDate', 'pubDate', 'modDate'): for key in ('relDate', 'pubDate', 'modDate'):
json_array[key] = datetime.utcfromtimestamp(float(json_array[key])) json_array[key] = datetime.utcfromtimestamp(float(json_array[key]))
for key in ('rights', 'size'): for key in ('rights', 'size'):
@ -24,9 +28,8 @@ def jsonLoadArchiveItem(data):
def jsonImportArchiveItem(archive, archiveItemId, json_array): def jsonImportArchiveItem(archive, archiveItemId, json_array):
if isinstance(json_array, basestring): if isinstance(json_array, basestring):
json_array = jsonLoadArchiveItem(json_array) json_array = jsonLoadArchiveItem(json_array)
salt = '%s/%s/%s' % (archive.archiveName, json_array['author'], json_array['title']) salt = u'%s/%s' % (archive.archiveName, archiveItemId)
hashID = md5.new(salt).hexdigest() hashID = md5.new(salt.encode('utf-8')).hexdigest()
i = model.ArchiveItem( i = model.ArchiveItem(
archiveID=archive.id, archiveID=archive.id,
hashId = hashID, hashId = hashID,

View file

@ -5,21 +5,20 @@
<head py:match="item.tag=='{http://www.w3.org/1999/xhtml}head'" py:attrs="item.items()"> <head py:match="item.tag=='{http://www.w3.org/1999/xhtml}head'" py:attrs="item.items()">
<meta content="text/html; charset=UTF-8" http-equiv="content-type" py:replace="''"/> <meta content="text/html; charset=UTF-8" http-equiv="content-type" py:replace="''"/>
<title py:replace="''">Your title goes here</title> <title py:replace="''">Your title goes here</title>
<meta py:replace="item[:]"/> <style type="text/css">
<style type="text/css"> #pageLogin
#pageLogin {
{ font-size: 10px;
font-size: 10px; font-family: verdana;
font-family: verdana; text-align: right;
text-align: right; }
} </style>
<style type="text/css" media="screen">
@import "/static/css/style.css";
@import "/static/css/archive.css";
</style> </style>
<style type="text/css" media="screen"> <script src="/static/js/archive.js" />
@import "/static/css/style.css"; <meta py:replace="item[:]"/>
@import "/static/css/archive.css";
</style>
<script src="/static/js/archive.js" />
</head> </head>
<body py:match="item.tag=='{http://www.w3.org/1999/xhtml}body'" py:attrs="item.items()"> <body py:match="item.tag=='{http://www.w3.org/1999/xhtml}body'" py:attrs="item.items()">

View file

@ -2,11 +2,14 @@
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:py="http://purl.org/kid/ns#" <html xmlns="http://www.w3.org/1999/xhtml" xmlns:py="http://purl.org/kid/ns#"
py:extends="'master.kid'"> py:extends="'master.kid'">
<head> <head>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type" py:replace="''"/> <meta content="text/html; charset=utf-8" http-equiv="Content-Type" py:replace="''"/>
<title>Oil Archive</title> <title>Oil21 - ${item.title}</title>
<style type="text/css" media="screen" py:if="item.archive.css">
@import "/css/${item.archive.hashId}.css";
</style>
<script py:if="item.archive.js" src="/js/${item.archive.hashId}.js" />
</head> </head>
<body> <body>
<div class="item"> <div class="item">
<img class="itemIcon" src="/view/${item.hashId}/icon.png" /> <img class="itemIcon" src="/view/${item.hashId}/icon.png" />
<div class="author">${item.author}</div> <div class="author">${item.author}</div>