- storeUrl

- sort / search
- show sort by field
This commit is contained in:
j 2007-04-04 14:07:16 +00:00
parent 9022ed674b
commit 2644f0fccf
10 changed files with 349 additions and 51 deletions

View file

@ -18,6 +18,9 @@ import oilcache
from forms import forms from forms import forms
from sortname import sortname from sortname import sortname
def httpExpires(sec):
return cherrypy.lib.httptools.HTTPDate(time.gmtime(time.mktime(time.gmtime()) + sec))
class View: class View:
@expose(template=".templates.view") @expose(template=".templates.view")
def view(self, item): def view(self, item):
@ -25,10 +28,12 @@ class View:
def icon(self, item): def icon(self, item):
response.headerMap['Content-Type'] = "image/png" response.headerMap['Content-Type'] = "image/png"
cherrypy.response.headerMap["Expires"] = httpExpires(60*60*24*30)
return oilcache.loadIcon(item) return oilcache.loadIcon(item)
def icon_reflection(self, item): def icon_reflection(self, item):
response.headerMap['Content-Type'] = "image/png" response.headerMap['Content-Type'] = "image/png"
cherrypy.response.headerMap["Expires"] = httpExpires(60*60*24*30)
return oilcache.loadIconReflection(item) return oilcache.loadIconReflection(item)
@expose() @expose()
@ -108,10 +113,7 @@ class ArchiveJavascript:
name = name.split('.')[0] name = name.split('.')[0]
archive = Archive.byHashId(name) archive = Archive.byHashId(name)
response.headerMap['Content-Type'] = "application/x-javascript" response.headerMap['Content-Type'] = "application/x-javascript"
secs = 60*60*24*30 cherrypy.response.headerMap["Expires"] = httpExpires(60) #(60*60*24*30)
secs = 60
expires = cherrypy.lib.httptools.HTTPDate(time.gmtime(time.mktime(time.gmtime()) + secs))
cherrypy.response.headerMap["Expires"] = expires
return archive.js return archive.js
class ArchiveStyleSheet: class ArchiveStyleSheet:
@ -120,10 +122,7 @@ class ArchiveStyleSheet:
name = name.split('.')[0] name = name.split('.')[0]
archive = Archive.byHashId(name) archive = Archive.byHashId(name)
response.headerMap['Content-Type'] = "text/css" response.headerMap['Content-Type'] = "text/css"
secs = 60*60*24*30 cherrypy.response.headerMap["Expires"] = httpExpires(60) #(60*60*24*30)
secs = 60
expires = cherrypy.lib.httptools.HTTPDate(time.gmtime(time.mktime(time.gmtime()) + secs))
cherrypy.response.headerMap["Expires"] = expires
return archive.css return archive.css
class Root(controllers.RootController): class Root(controllers.RootController):
@ -168,42 +167,27 @@ class Root(controllers.RootController):
return dict(q = '', f = 'all', s = 'title', o = 0, n = 60, l = 'all', v = 'icon', length = 0) return dict(q = '', f = 'all', s = 'title', o = 0, n = 60, l = 'all', v = 'icon', length = 0)
_sort_map = { _sort_map = {
'id': 'imdb', 'id': 'hashId',
'director': 'director_html', 'author': 'author_sort',
'writer': 'writer_html', 'releasedate': 'rel_date',
'language': 'language_html',
'releasedate': 'release_date',
'cast': 'cast_html',
'genre': 'genre_html',
'keywords': 'keywords_html',
'connections': 'connections_sort',
'title': 'title_sort', 'title': 'title_sort',
'country': 'country_html', 'date': 'rel_date',
'producer': 'producer_html',
'summary': 'plot',
'trivia': 'plot',
'date': 'latest_file_date',
'year': 'release_date',
} }
def get_sort(self, s): def get_sort(self, s):
s = str(self._sort_map.get(s, s)) s = str(self._sort_map.get(s, s))
if s in ('release_date', 'size', 'pub_date'): if s in ('rel_date', 'size', 'pub_date'):
s = '-%s' % s s = '-%s' % s
return s return s
_field_map = { _field_map = {
'title': ArchiveItem.q.title, 'title': ArchiveItem.q.title,
'author': ArchiveItem.q.author, 'author': ArchiveItem.q.author,
'genre': ArchiveItem.q.genre,
} }
_search_map = { _search_map = {
'summary': 'plot', 'releasedate': 'rel_date',
'trivia': 'plot',
'releasedate': 'release_date',
'script': 'year',
'title': 'year',
'director': 'year'
} }
@expose(template=".templates.iconview") @expose(template=".templates.iconview")
@ -233,9 +217,13 @@ class Root(controllers.RootController):
if v == 'quote': if v == 'quote':
tg_template = ".templates.quoteview" tg_template = ".templates.quoteview"
orderBy = [self.get_sort(s), 'title_sort', 'title'] orderBy = [self.get_sort(s), 'title_sort', 'rel_date']
if q: if q:
items = queryArchive(q) if f=='all':
items = queryArchive(q, s)
elif f in ('title', 'author', 'genre'):
q = q.encode('utf-8')
items = ArchiveItem.select(LIKE(self._field_map[f], '%'+q+'%') , orderBy=orderBy)
else: else:
items = ArchiveItem.select(orderBy = orderBy) items = ArchiveItem.select(orderBy = orderBy)
sort = s sort = s

View file

@ -12,7 +12,7 @@ from sortname import sortname
update authorSort for better(tm) sorting update authorSort for better(tm) sorting
''' '''
def updateSortAuthorNames(): def updateSortAuthorNames():
for i in ArchiveItems.select(): for i in ArchiveItem.select():
i.authorSort = sortname(i.author) i.authorSort = sortname(i.author)
''' '''
@ -20,10 +20,11 @@ def updateSortAuthorNames():
''' '''
def spiderArchives(): def spiderArchives():
for archive in Archive.select(Archive.q.initialized == True): for archive in Archive.select(Archive.q.initialized == True):
if archive.pubDate - datetime.now() < timedelta(minutes = archive.ttl): if archive.modDate - datetime.now() < timedelta(minutes = archive.ttl):
print archive.archiveName print "updating", archive.archiveName
archive.update() archive.update()
else:
print "skipping", archive.archiveName
def runCron(): def runCron():
spiderArchives() spiderArchives()

View file

@ -18,6 +18,7 @@ from scrapeit.utils import read_url
import simplejson import simplejson
from oilspider import jsonLoadArchiveItem, jsonImportArchiveItem from oilspider import jsonLoadArchiveItem, jsonImportArchiveItem
import utils
hub = PackageHub("oilarchive") hub = PackageHub("oilarchive")
__connection__ = hub __connection__ = hub
@ -25,11 +26,18 @@ __connection__ = hub
def queryArchive(query, orderBy="score", offset = 0, count = 100): def queryArchive(query, orderBy="score", offset = 0, count = 100):
query = MySQLdb.escape_string(query) query = MySQLdb.escape_string(query)
orderBy = orderBy.encode('utf-8')
print orderBy
if orderBy not in ('score', 'size', 'title', 'description'):
orderBy = 'score'
if orderBy == 'size':
orderBy = "size DESC"
match = "MATCH (title, description, text) AGAINST ('%s')" % query match = "MATCH (title, description, text) AGAINST ('%s')" % query
sql = """SELECT id, %s AS score FROM archive_item sql = """SELECT id, %s AS score, title, size, description FROM archive_item
WHERE %s ORDER BY %s""" % \ WHERE %s ORDER BY %s""" % \
(match, match, orderBy) #, offset, count) (match, match, orderBy) #, offset, count)
result = [] result = []
print sql
matches = ArchiveItem._connection.queryAll(sql) matches = ArchiveItem._connection.queryAll(sql)
if len(matches) > offset: if len(matches) > offset:
matches = matches[offset:] matches = matches[offset:]
@ -57,6 +65,7 @@ class ArchiveItem(SQLObject):
modDate = DateTimeCol() #timestamp (item published) modDate = DateTimeCol() #timestamp (item published)
archiveUrl = UnicodeCol() # -> url (link to archive page) archiveUrl = UnicodeCol() # -> url (link to archive page)
downloadUrl = UnicodeCol() # -> url (link to item) downloadUrl = UnicodeCol() # -> url (link to item)
storeUrl = UnicodeCol() # -> url (link to store)
size = IntCol() #bytes size = IntCol() #bytes
rights = IntCol(default = 5) #-> int: 0 (free) - 5 (unfree) rights = IntCol(default = 5) #-> int: 0 (free) - 5 (unfree)
itemType = UnicodeCol() #string (Text, Pictures, Music, Movies, Software) itemType = UnicodeCol() #string (Text, Pictures, Music, Movies, Software)
@ -71,6 +80,10 @@ class ArchiveItem(SQLObject):
#Fulltext search #Fulltext search
#ALTER TABLE archive_item ADD FULLTEXT (title, description, text); #ALTER TABLE archive_item ADD FULLTEXT (title, description, text);
def getPreview(self, sort):
if sort == 'size':
return utils.formatFileSize(self.size)
return self.relDateFormated
def _set_author(self, value): def _set_author(self, value):
self._SO_set_author(value) self._SO_set_author(value)
@ -80,6 +93,11 @@ class ArchiveItem(SQLObject):
def _get_year(self): def _get_year(self):
return self.relDate.strftime('%Y') return self.relDate.strftime('%Y')
def _get_relDateFormated(self):
if self.itemType in ('Movie', 'Book'):
return self.year
else:
return self.relDate.strftime('%Y-%m-%d')
#expand urls in case they are relative to the archive #expand urls in case they are relative to the archive
def _get_archiveUrl(self): def _get_archiveUrl(self):
@ -140,9 +158,9 @@ class Archive(SQLObject):
def setHashId(self): def setHashId(self):
self.hashId = md5.new("%s" % self.id).hexdigest() self.hashId = md5.new("%s" % self.id).hexdigest()
def _get_pubDateTimestamp(self): def _get_modDateTimestamp(self):
if self.initialized: if self.initialized:
return int(time.mktime(self.pubDate.timetuple())) return int(time.mktime(self.modDate.timetuple()))
return -1 return -1
def _query_url(self, query): def _query_url(self, query):
@ -151,7 +169,7 @@ class Archive(SQLObject):
return url return url
def _get_update_url(self): def _get_update_url(self):
return self._query_url({'modDate': self.pubDateTimestamp}) return self._query_url({'modDate': self.modDateTimestamp})
def _get_files_url(self): def _get_files_url(self):
return self._query_url({'files': '1'}) return self._query_url({'files': '1'})
@ -181,11 +199,12 @@ class Archive(SQLObject):
self.js = '' self.js = ''
result = simplejson.loads(read_url(self.update_url)) result = simplejson.loads(read_url(self.update_url))
items = result.get('items', []) items = result.get('items', [])
print len(items) print "importing", len(items), "items"
for id in items: for id in items:
try: try:
data = read_url(self.data_url(id)) data = read_url(self.data_url(id))
data = jsonLoadArchiveItem(data) data = jsonLoadArchiveItem(data)
print data['title'].encode('utf-8')
except: except:
print "failed to load ", id, "from ", self.data_url(id) print "failed to load ", id, "from ", self.data_url(id)
continue continue
@ -197,6 +216,7 @@ class Archive(SQLObject):
else: else:
q[0].update(data) q[0].update(data)
self.initialized = True self.initialized = True
self.modDate = datetime.now()
''' '''
get list of all items from archive and remove those from ArchiveItem that get list of all items from archive and remove those from ArchiveItem that

View file

@ -17,6 +17,8 @@ def jsonLoadArchiveItem(data):
json_array['archiveUrl'] = json_array.pop('archiveURL') json_array['archiveUrl'] = json_array.pop('archiveURL')
if json_array.has_key('downloadURL'): if json_array.has_key('downloadURL'):
json_array['downloadUrl'] = json_array.pop('downloadURL') json_array['downloadUrl'] = json_array.pop('downloadURL')
if json_array.has_key('storeURL'):
json_array['storeUrl'] = json_array.pop('storeURL')
for key in ('relDate', 'pubDate', 'modDate'): for key in ('relDate', 'pubDate', 'modDate'):
json_array[key] = datetime.utcfromtimestamp(float(json_array[key])) json_array[key] = datetime.utcfromtimestamp(float(json_array[key]))
for key in ('rights', 'size'): for key in ('rights', 'size'):
@ -43,6 +45,7 @@ def jsonImportArchiveItem(archive, archiveItemId, json_array):
modDate=json_array['modDate'], modDate=json_array['modDate'],
archiveUrl=json_array['archiveUrl'], archiveUrl=json_array['archiveUrl'],
downloadUrl=json_array['downloadUrl'], downloadUrl=json_array['downloadUrl'],
storeUrl=json_array['storeUrl'],
html=json_array['html'], html=json_array['html'],
genre=json_array['genre'], genre=json_array['genre'],
title=json_array['title'], title=json_array['title'],

View file

@ -127,3 +127,71 @@ input {
.item .textIconLarge { .item .textIconLarge {
color: rgb(0, 0, 0); color: rgb(0, 0, 0);
} }
table {
border-collapse: collapse;
border-spacing: 0px;
}
td {
padding: 0px;
}
#itemPageIcon {
width: 128px;
padding-left: 8px;
padding-right: 8px;
}
#itemPageText {
padding-left: 8px;
padding-right: 8px;
}
#itemPageTextLeftTop {
width: 8px;
height: 8px;
background: url(/static/images/itemPageTextLeftTop.png)
}
#itemPageTextCenterTop {
height: 8px;
background: url(/static/images/itemPageTextCenterTop.png);
}
#itemPageTextRightTop {
width: 8px;
height: 8px;
background: url(/static/images/itemPageTextRightTop.png)
}
#itemPageTextLeftMiddle {
width: 8px;
background: url(/static/images/itemPageTextLeftMiddle.png)
}
#itemPageTextCenterMiddle {
background: url(/static/images/itemPageTextCenterMiddle.png);
}
#itemPageTextRightMiddle {
width: 8px;
background: url(/static/images/itemPageTextRightMiddle.png)
}
#itemPageTextLeftBottom {
width: 8px;
height: 8px;
background: url(/static/images/itemPageTextLeftBottom.png)
}
#itemPageTextCenterBottom {
height: 8px;
background: url(/static/images/itemPageTextCenterBottom.png);
}
#itemPageTextRightBottom {
width: 8px;
height: 8px;
background: url(/static/images/itemPageTextRightBottom.png)
}

View file

@ -0,0 +1,31 @@
#head {
position: fixed;
top: 0px;
width: 100%;
height: 64px;
background: rgb(64, 64, 64);
text-align: center;
z-index: 1;
}
#headList {
position: relative;
margin-left: auto;
margin-right: auto;
margin-top: 8px;
width: 808px;
height: 48px;
}
.headTop {
position: absolute;
left: 0px;
top: 0px;
width: 128px;
}
.headBottom {
position: absolute;
left: 0px;
bottom: 0px;
width: 128px;
}

View file

@ -1,3 +1,28 @@
function changeList() {
submitFind();
}
function changeView() {
submitFind();
}
function changeSort() {
submitFind();
}
function changeFind() {
}
function submitFind() {
var l = document.getElementById('selectList').value;
var v = document.getElementById('selectView').value;
var s = document.getElementById('selectSort').value;
var f = document.getElementById('selectFind').value;
var q = document.getElementById('inputFind').value;
document.location.href = '/search?l=' + l + '&v=' + v + '&s=' + s + '&f=' + f + '&q=' + q;
}
function mouseOver(id, view) { function mouseOver(id, view) {
if (view == 'IconLarge' || view == 'IconSmall') if (view == 'IconLarge' || view == 'IconSmall')
document.getElementById(id).style.background = 'url(/static/images/item' + view + 'MouseOver.png)'; document.getElementById(id).style.background = 'url(/static/images/item' + view + 'MouseOver.png)';

View file

@ -1,10 +1,136 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:py="http://purl.org/kid/ns#" py:extends="'master.kid'"> <html xmlns="http://www.w3.org/1999/xhtml" xmlns:py="http://purl.org/kid/ns#" py:extends="'master.kid'">
<?python
selectView = [
('icon', 'View: Icon'),
('list', 'View: List'),
('quote', 'View: Quotes'),
]
selectSort = [
('title', 'Sort: Title'),
('date', 'Sort: Date'),
('size', 'Sort: Size'),
('relevance', 'Sort: Relevance'),
]
selectFind = [
('all', 'Find: All'),
('title', 'Find: Title'),
('author', 'Find: Author'),
('date', 'Find: Date'),
('genre', 'Find: Genre'),
]
selectList = [
('all', 'List: All'),
('Screenings', 'List: Screenings'),
]
def search_link(search, n = None, o = None):
link = "/search?"
if n:
o = search['o'] - (search['o'] % n) + 1
for key in search:
value = search[key]
if key == 'o' and o:
value = o -1
if key == 'n' and n:
value = n
if key not in ['length']:
link += "%s=%s&" %(key, value)
return link
?>
<head> <head>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type" py:replace="''"/> <meta content="text/html; charset=utf-8" http-equiv="Content-Type" py:replace="''"/>
<title>Oil of the 21st Century Archive</title> <title>Oil of the 21st Century Archive</title>
</head> </head>
<body> <body>
<div id="head">
<div id="headList">
<div class="headTop">
</div>
<div class="headTop" style="left: 136px">
<select id="selectList" onChange="changeList()">
<option py:for="value, content in selectList"
py:content="content"
py:attrs="dict(value=value, selected=(value==search['l'] and 'selected' or None))" />
</select>
</div>
<div class="headTop" style="left: 272px">
<select id="selectView" onChange="changeView()">
<option py:for="value, content in selectView"
py:content="content"
py:attrs="dict(value=value, selected=(value==search['v'] and 'selected' or None))" />
</select>
</div>
<div class="headTop" style="left: 408px">
<select id="selectSort" onChange="changeSort()">
<option py:for="value, content in selectSort"
py:content="content"
py:attrs="dict(value=value, selected=(value==search['s'] and 'selected' or None))" />
</select>
</div>
<div class="headTop" style="left: 544px">
<select id="selectFind" onChange="changeFind()">
<option py:for="value, content in selectFind"
py:content="content"
py:attrs="dict(value=value, selected=(value==search['f'] and 'selected' or None))" />
</select>
</div>
<div class="headTop" style="left: 680px">
<input id="inputFind" type="search" placeholder="Find" autosave="find" results="10" onBlur="submitFind()" value="${search['q']}"/>
</div>
<div py:if="search['length'] > 30" id="numberDiv" class="headBottom textSmall">
Items per Page<br/> <span py:for="n in [30, 60, 90, 120]">
<a py:if="n != search['n']" href="${search_link(search, n=n)}">${n}</a>
<span py:if="n == search['n']" py:replace="n" />
</span>
</div>
<?python
number_pages = search['length'] / search['n']
if search['length'] % search['n']:
number_pages += 1
current_page = search['o'] / search['n'] + 1
current_page_start = search['o'] + 1
current_page_end = min(search['o']+search['n'], search['length'])
previous_page = current_page - 1
previous_page_start = None
if current_page > 1:
previous_page_end = current_page_start - 1
previous_page_start = current_page_start - search['n']
next_page = current_page + 1
next_page_start = None
if current_page < number_pages:
next_page_start = current_page_end + 1
next_page_end = min(next_page_start + search['n'] -1, search['length'])
last_page = number_pages
last_page_start = None
if search['length'] > search['o'] + search['n']:
last_page_start = search['n'] * (number_pages -1) + 1
last_page_end = search['length']
?>
<div py:if="search['length'] > search['n'] and current_page > 1" id="firstDiv" class="headBottom textSmall" style="left: 136px">
<a href="${search_link(search, o=1)}">First Page<br/>1 (1-${search['n']})</a>
</div>
<div py:if="previous_page_start" id="previousDiv" class="headBottom textSmall" style="left: 272px">
<a href="${search_link(search, o= previous_page_start)}">Previous Page<br/>${previous_page} (${previous_page_start}-${previous_page_end})</a>
</div>
<div id="currentDiv" class="headBottom textSmall" style="left: 408px">
Current Page<br/>${current_page} (${current_page_start}-${current_page_end})
</div>
<div py:if="next_page_start" id="nextDiv" class="headBottom textSmall" style="left: 544px">
<a href="${search_link(search, o= next_page_start)}">Next Page<br/>${next_page} (${next_page_start}-${next_page_end})</a>
</div>
<div py:if="last_page_start" id="lastDiv" class="headBottom textSmall" style="left: 680px">
<a href="${search_link(search, o= last_page_start)}">Last Page<br/>${last_page} (${last_page_start}-${last_page_end})</a>
</div>
</div>
</div>
<div id="shadowTop"></div>
<div id="listBody"> <div id="listBody">
<div py:for="item in items" id="${item.hashId}" class="inline listItem"> <div py:for="item in items" id="${item.hashId}" class="inline listItem">
<div class="table"> <div class="table">
@ -18,7 +144,7 @@
${item.title} ${item.title}
</div> </div>
<div class="link textIconLarge textGrey" onMouseOver="mouseOver('${item.hashId}','IconLarge')" onMouseOut="mouseOut('${item.hashId}','IconLarge')"> <div class="link textIconLarge textGrey" onMouseOver="mouseOver('${item.hashId}','IconLarge')" onMouseOut="mouseOut('${item.hashId}','IconLarge')">
${item.relDate} ${item.getPreview(sort)}
</div> </div>
</div> </div>
<div class="itemText"> <div class="itemText">
@ -26,7 +152,7 @@
<span class="textIconLarge">${item.title}</span> <span class="textIconLarge">${item.title}</span>
</div> </div>
<div> <div>
<span class="textIconLarge">${item.relDate}</span> <span class="textIconLarge">${item.getPreview(sort)}</span>
</div> </div>
</div> </div>
</div> </div>

View file

@ -4,17 +4,37 @@
<head> <head>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type" py:replace="''"/> <meta content="text/html; charset=utf-8" http-equiv="Content-Type" py:replace="''"/>
<title>Oil21 - ${item.title}</title> <title>Oil21 - ${item.title}</title>
<!--
<style type="text/css" media="screen" py:if="item.archive.css"> <style type="text/css" media="screen" py:if="item.archive.css">
@import "/css/${item.archive.hashId}.css"; @import "/css/${item.archive.hashId}.css";
</style> </style>
<script py:if="item.archive.js" src="/js/${item.archive.hashId}.js" /> <script py:if="item.archive.js" src="/js/${item.archive.hashId}.js" />
-->
</head> </head>
<body> <body>
<div class="item"> <div class="itemPageIcon">
<img class="itemIcon" src="/view/${item.hashId}/icon.png" /> <img class="itemIcon" src="/view/${item.hashId}/icon.png" />
<div class="author">${item.author}</div> </div>
<div class="title">${item.title}</div> <div class="itemPageText">
<div class="description">${XML(item.html)}</div> <table>
</div> <tr>
<td id="itemPageTextLeftTop"></td>
<td id="itemPageTextCenterTop"></td>
<td id="itemPageTextRightTop"></td>
</tr>
<tr>
<td id="itemPageTextLeftMiddle"></td>
<td id="itemPageTextCenterMiddle">
x${XML(item.html)}
</td>
<td id="itemPageTextRightMiddle"></td>
</tr>
<tr>
<td id="itemPageTextLeftBottom"></td>
<td id="itemPageTextCenterBottom"></td>
<td id="itemPageTextRightBottom"></td>
</tr>
</table>
</div>
</body> </body>
</html> </html>

View file

@ -29,3 +29,19 @@ def highlightText(text, term):
else: else:
output = text output = text
return output return output
'''
Format the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB, 102
bytes, etc).
'''
def formatFileSize(bytes):
bytes = float(bytes)
if bytes < 1024:
return "%d byte%s" % (bytes, bytes != 1 and 's' or '')
if bytes < 1024 * 1024:
return "%d KB" % (bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return "%.1f MB" % (bytes / (1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024:
return "%.2f GB" % (bytes / (1024 * 1024 * 1024))
return "%.3f TB" % (bytes / (1024 * 1024 * 1024 * 1024))