software powering 0xdb.org and Pad.ma is called pandora now
This commit is contained in:
parent
e724c67f05
commit
7c0e365a0a
46 changed files with 30 additions and 21 deletions
0
pandora/backend/__init__.py
Normal file
0
pandora/backend/__init__.py
Normal file
43
pandora/backend/admin.py
Normal file
43
pandora/backend/admin.py
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
|
||||
from django.contrib import admin
|
||||
|
||||
from forms import FileAdminForm, MovieAdminForm, ArchiveFileAdminForm
|
||||
import models
|
||||
|
||||
|
||||
#class MovieImdbAdmin(admin.ModelAdmin):
|
||||
# search_fields = ['imdbId', 'title']
|
||||
#admin.site.register(models.MovieImdb, MovieImdbAdmin)
|
||||
|
||||
class MovieImdbInline(admin.StackedInline):
|
||||
model = models.MovieImdb
|
||||
|
||||
class MovieOxdbInline(admin.StackedInline):
|
||||
model = models.MovieOxdb
|
||||
|
||||
class MovieAdmin(admin.ModelAdmin):
|
||||
search_fields = ['movieId', 'imdb__title', 'oxdb__title']
|
||||
form = MovieAdminForm
|
||||
#inlines = [MovieImdbInline, MovieOxdbInline]
|
||||
|
||||
admin.site.register(models.Movie, MovieAdmin)
|
||||
|
||||
class FileAdmin(admin.ModelAdmin):
|
||||
search_fields = ['path', 'video_codec']
|
||||
|
||||
form = FileAdminForm
|
||||
|
||||
admin.site.register(models.File, FileAdmin)
|
||||
|
||||
class ArchiveFileAdmin(admin.ModelAdmin):
|
||||
search_fields = ['path', 'archive__name']
|
||||
form = ArchiveFileAdminForm
|
||||
|
||||
admin.site.register(models.ArchiveFile, ArchiveFileAdmin)
|
||||
|
||||
class ArchiveAdmin(admin.ModelAdmin):
|
||||
search_fields = ['name']
|
||||
admin.site.register(models.Archive, ArchiveAdmin)
|
||||
|
||||
33
pandora/backend/daemon.py
Normal file
33
pandora/backend/daemon.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
from carrot.connection import DjangoBrokerConnection
|
||||
from carrot.messaging import Consumer, Publisher
|
||||
from django.conf import settings
|
||||
|
||||
import load
|
||||
import models
|
||||
|
||||
def send_bg_message(msg):
|
||||
conn = DjangoBrokerConnection()
|
||||
publisher = Publisher(connection=conn, exchange="oxdb-bg",
|
||||
routing_key="oxdb-bg")
|
||||
publisher.send(msg)
|
||||
publisher.close()
|
||||
|
||||
def run():
|
||||
conn = DjangoBrokerConnection()
|
||||
|
||||
consumer = Consumer(connection=conn, queue="oxdb-bg",
|
||||
exchange="oxdb-bg",
|
||||
routing_key="oxdb-bg")
|
||||
def handle_background_tasks_callback(data, message):
|
||||
print("Got bg message")
|
||||
print data
|
||||
if 'loadIMDb' in data:
|
||||
imdbId = data['loadIMDb']
|
||||
load.loadIMDb(imdbId)
|
||||
elif 'findMovie' in data:
|
||||
f = models.File.objects.get(pk=data['findMovie'])
|
||||
f.findMovie()
|
||||
message.ack()
|
||||
consumer.register_callback(handle_background_tasks_callback)
|
||||
consumer.wait() # Go into the consumer loop.
|
||||
|
||||
44
pandora/backend/encoder.py
Normal file
44
pandora/backend/encoder.py
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
from carrot.connection import DjangoBrokerConnection
|
||||
from carrot.messaging import Consumer, Publisher
|
||||
from django.conf import settings
|
||||
|
||||
import load
|
||||
import models
|
||||
|
||||
|
||||
def send_encoder_message(msg):
|
||||
conn = DjangoBrokerConnection()
|
||||
publisher = Publisher(connection=conn, exchange="oxdb-encoder",
|
||||
routing_key="oxdb-encoder")
|
||||
publisher.send(msg)
|
||||
publisher.close()
|
||||
|
||||
def run():
|
||||
conn = DjangoBrokerConnection()
|
||||
|
||||
consumer = Consumer(connection=conn, queue="oxdb-encoder",
|
||||
exchange="oxdb-encoder",
|
||||
routing_key="oxdb-encoder")
|
||||
def handle_background_tasks_callback(data, message):
|
||||
print("Got encoder message")
|
||||
print data
|
||||
if 'extract' in data:
|
||||
'''
|
||||
update file stuff
|
||||
create derivates and other related stuff for a file
|
||||
'''
|
||||
fileId = data['fileId']
|
||||
f = models.File.objects.get(pk=fileId)
|
||||
f.extract()
|
||||
elif 'updateMovie' in data:
|
||||
'''
|
||||
update movie
|
||||
create proxy stream and other related files extracted from movieFiles
|
||||
'''
|
||||
movieId = data['movieId']
|
||||
m = models.Movie.objects.get(pk=fileId)
|
||||
m.extract()
|
||||
message.ack()
|
||||
consumer.register_callback(handle_background_tasks_callback)
|
||||
consumer.wait() # Go into the consumer loop.
|
||||
|
||||
72
pandora/backend/extract.py
Normal file
72
pandora/backend/extract.py
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
# GPL 2010
|
||||
from __future__ import division
|
||||
import re
|
||||
import os
|
||||
from os.path import abspath, join, dirname, exists
|
||||
import shutil
|
||||
import time
|
||||
import warnings
|
||||
import subprocess
|
||||
|
||||
import oxlib
|
||||
import Image
|
||||
import simplejson as json
|
||||
|
||||
|
||||
img_extension='jpg'
|
||||
|
||||
def run_command(cmd, timeout=10):
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
while timeout > 0:
|
||||
time.sleep(0.2)
|
||||
timeout -= 0.2
|
||||
if p.poll() != None:
|
||||
return p.returncode
|
||||
if p.poll() == None:
|
||||
os.kill(p.pid, 9)
|
||||
killedpid, stat = os.waitpid(p.pid, os.WNOHANG)
|
||||
return p.returncode
|
||||
|
||||
def frame(videoFile, position, baseFolder, width=128, redo=False):
|
||||
'''
|
||||
params:
|
||||
videoFile
|
||||
position as float in seconds
|
||||
baseFolder to write frames to
|
||||
width of frame
|
||||
redo boolean to extract file even if it exists
|
||||
'''
|
||||
#not using input file, to slow to extract frame right now
|
||||
base_size = 320
|
||||
frame = os.path.join(baseFolder, "%f.%s.%s" % (position, base_size, img_extension))
|
||||
|
||||
if exists(videoFile):
|
||||
if redo or not exists(frame):
|
||||
if not exists(baseFolder):
|
||||
os.makedirs(baseFolder)
|
||||
cmd = ['oggThumb', '-t', str(position), '-n', frame, '-s', '%dx0'%base_size, videoFile]
|
||||
run_command(cmd)
|
||||
if width != base_size:
|
||||
frame_base = frame
|
||||
frame = os.path.join(baseFolder, "%f.%s.%s" % (position, width, img_extension))
|
||||
if not exists(frame):
|
||||
resize_image(frame_base, frame, width)
|
||||
return frame
|
||||
|
||||
def resize_image(image_source, image_output, width):
|
||||
if exists(image_source):
|
||||
source = Image.open(image_source)
|
||||
source_width = source.size[0]
|
||||
source_height = source.size[1]
|
||||
|
||||
height = int(width / (float(source_width) / source_height))
|
||||
height = height - height % 2
|
||||
|
||||
if width < source_width:
|
||||
resize_method = Image.ANTIALIAS
|
||||
else:
|
||||
resize_method = Image.BICUBIC
|
||||
output = source.resize((width, height), resize_method)
|
||||
output.save(image_output)
|
||||
42
pandora/backend/forms.py
Normal file
42
pandora/backend/forms.py
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
from ajax_filtered_fields.forms import AjaxManyToManyField, ForeignKeyByLetter
|
||||
from django.conf import settings
|
||||
from django import forms
|
||||
|
||||
import models
|
||||
|
||||
ajax_filtered_js = (
|
||||
settings.ADMIN_MEDIA_PREFIX + "js/SelectBox.js",
|
||||
settings.ADMIN_MEDIA_PREFIX + "js/SelectFilter2.js",
|
||||
settings.STATIC_URL + 'js/jquery/jquery.js',
|
||||
settings.STATIC_URL + 'js/ajax_filtered_fields.js',
|
||||
)
|
||||
|
||||
class FileAdminForm(forms.ModelForm):
|
||||
movie = ForeignKeyByLetter(models.Movie, field_name='imdb__title')
|
||||
|
||||
class Meta:
|
||||
model = models.File
|
||||
|
||||
class Media:
|
||||
js = ajax_filtered_js
|
||||
|
||||
class ArchiveFileAdminForm(forms.ModelForm):
|
||||
file = ForeignKeyByLetter(models.File, field_name='path')
|
||||
|
||||
class Meta:
|
||||
model = models.ArchiveFile
|
||||
|
||||
class Media:
|
||||
js = ajax_filtered_js
|
||||
|
||||
|
||||
class MovieAdminForm(forms.ModelForm):
|
||||
imdb = ForeignKeyByLetter(models.MovieImdb, field_name='title')
|
||||
oxdb = ForeignKeyByLetter(models.MovieOxdb, field_name='title')
|
||||
|
||||
class Meta:
|
||||
model = models.Movie
|
||||
|
||||
class Media:
|
||||
js = ajax_filtered_js
|
||||
|
||||
175
pandora/backend/load.py
Normal file
175
pandora/backend/load.py
Normal file
|
|
@ -0,0 +1,175 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
import random
|
||||
import os.path
|
||||
|
||||
from django.db import models
|
||||
from django.contrib.auth.models import User
|
||||
|
||||
from oxlib import stripTags, findRe
|
||||
import oxweb.imdb
|
||||
|
||||
import models
|
||||
|
||||
|
||||
def debug(*msgs):
|
||||
for m in msgs:
|
||||
print m,
|
||||
print
|
||||
|
||||
'''Import data from imdb into database,
|
||||
param: impdb id
|
||||
return: Movie Object, None if failed
|
||||
'''
|
||||
|
||||
def loadIMDb(imdbId):
|
||||
if len(imdbId) != 7:
|
||||
debug("IMDb ID not valid")
|
||||
return None
|
||||
try:
|
||||
movie = models.Movie.byImdbId(imdbId)
|
||||
except models.Movie.DoesNotExist:
|
||||
#this shound not happen, just in case previous imports failed
|
||||
try:
|
||||
imdb = models.MovieImdb.objects.get(imdbId=imdbId)
|
||||
except models.MovieImdb.DoesNotExist:
|
||||
imdb = models.MovieImdb()
|
||||
imdb.imdbId = imdbId
|
||||
imdb.save()
|
||||
movie = models.Movie()
|
||||
movie.imdb = imdb
|
||||
|
||||
info = oxweb.imdb.getMovieInfo(imdbId)
|
||||
for key in ('title',
|
||||
'tagline',
|
||||
'year',
|
||||
'release_date',
|
||||
'rating',
|
||||
'votes',
|
||||
'series_imdb',
|
||||
'season',
|
||||
'episode'):
|
||||
if key in info:
|
||||
setattr(movie.imdb, key, info[key])
|
||||
debug(key, info[key])
|
||||
_info_map = {
|
||||
'episode title': 'episode_title',
|
||||
'series title': 'series_title',
|
||||
}
|
||||
for key in _info_map.keys():
|
||||
if key in info:
|
||||
setattr(movie.imdb, _info_map.get(key, key), info[key])
|
||||
|
||||
movie.imdb.plot = oxweb.imdb.getMoviePlot(imdbId)
|
||||
debug("plot", movie.imdb.plot)
|
||||
|
||||
movie.imdb.runtime = oxweb.imdb.getMovieRuntimeSeconds(imdbId)
|
||||
business = oxweb.imdb.getMovieBusinessSum(imdbId)
|
||||
for key in ('gross', 'profit', 'budget'):
|
||||
setattr(movie.imdb, key, business[key])
|
||||
|
||||
movie.imdb.save()
|
||||
movie.oxdbId = "__init__%s" % random.randint(0, 100000)
|
||||
movie.save()
|
||||
models.AlternativeTitle.objects.filter(movie=movie, manual=False).delete()
|
||||
for i in oxweb.imdb.getMovieAKATitles(imdbId):
|
||||
t = models.AlternativeTitle()
|
||||
t.movie = movie
|
||||
t.title = i[0]
|
||||
t.type = i[1]
|
||||
t.save()
|
||||
|
||||
#FIXME: related tables should be cleaned to not accumulate cruft
|
||||
#Country
|
||||
models.MovieCountry.objects.filter(movie=movie, manual=False).delete()
|
||||
position = 0
|
||||
if 'country' in info:
|
||||
for i in info['country']:
|
||||
debug("add country", i)
|
||||
country, created = models.Country.objects.get_or_create(name=i)
|
||||
models.MovieCountry.link(movie, country, position)
|
||||
position += 1
|
||||
|
||||
#Language
|
||||
models.MovieLanguage.objects.filter(movie=movie, manual=False).delete()
|
||||
position = 0
|
||||
if 'language' in info:
|
||||
for i in info['language']:
|
||||
debug("add language", i)
|
||||
language, created = models.Language.objects.get_or_create(name=i)
|
||||
models.MovieLanguage.link(movie, language, position)
|
||||
position += 1
|
||||
|
||||
#Location
|
||||
movie.locations_all.filter(manual=False).delete()
|
||||
locations = oxweb.imdb.getMovieLocations(imdbId)
|
||||
for i in locations:
|
||||
debug("add location", i)
|
||||
location, created = models.Location.objects.get_or_create(name=i)
|
||||
location.movies.add(movie)
|
||||
|
||||
#Genre
|
||||
movie.genres_all.filter(manual=False).delete()
|
||||
if 'genre' in info:
|
||||
for i in info['genre']:
|
||||
debug("add genre", i)
|
||||
genre, created = models.Genre.objects.get_or_create(name=i)
|
||||
genre.movies.add(movie)
|
||||
|
||||
#Keyword
|
||||
movie.keywords_all.filter(manual=False).delete()
|
||||
keywords = oxweb.imdb.getMovieKeywords(imdbId)
|
||||
for g in keywords:
|
||||
debug("add keyword", g)
|
||||
keyword, created = models.Keyword.objects.get_or_create(name=g)
|
||||
keyword.movies.add(movie)
|
||||
|
||||
movie.trivia_all.filter(manual=False).delete()
|
||||
position = 0
|
||||
trivia = oxweb.imdb.getMovieTrivia(imdbId)
|
||||
for i in trivia:
|
||||
debug("add trivia", i)
|
||||
t = models.Trivia()
|
||||
t.movie = movie
|
||||
t.trivia = i
|
||||
t.position = position
|
||||
t.save()
|
||||
position += 1
|
||||
|
||||
position = 0
|
||||
models.Cast.objects.filter(movie=movie).filter(manual=False).delete()
|
||||
credits = oxweb.imdb.getMovieCredits(imdbId)
|
||||
for role in credits:
|
||||
for p in credits[role]:
|
||||
name = stripTags(p[0])
|
||||
imdb_id = findRe(p[0], 'nm(\d{7})')
|
||||
debug("add cast", name)
|
||||
#FIXME: we could save character information here
|
||||
character = stripTags(p[1])
|
||||
person = models.Person.get_or_create(name, imdb_id)
|
||||
models.Cast.link(movie, person, role, character, position)
|
||||
position += 1
|
||||
|
||||
movie.connections_all.filter(manual=False).delete()
|
||||
connections = oxweb.imdb.getMovieConnections(imdbId)
|
||||
for relation in connections:
|
||||
for otherId in connections[relation]:
|
||||
try:
|
||||
object = models.Movie.objects.get(imdb__imdbId=otherId)
|
||||
debug("add connection", relation, object)
|
||||
models.Connection.get_or_create(movie, relation, object)
|
||||
except models.Movie.DoesNotExist:
|
||||
pass
|
||||
|
||||
reviews = oxweb.imdb.getMovieExternalReviews(imdbId)
|
||||
movie.reviews_all.filter(manual=False).delete()
|
||||
for r in reviews:
|
||||
debug("add review", r)
|
||||
review = models.Review.get_or_create(movie, r)
|
||||
review.title = reviews[r]
|
||||
review.save()
|
||||
|
||||
movie.oxdbId = movie.oxid()
|
||||
movie.save()
|
||||
return movie
|
||||
|
||||
0
pandora/backend/management/__init__.py
Normal file
0
pandora/backend/management/__init__.py
Normal file
0
pandora/backend/management/commands/__init__.py
Normal file
0
pandora/backend/management/commands/__init__.py
Normal file
22
pandora/backend/management/commands/backgroundtasks.py
Normal file
22
pandora/backend/management/commands/backgroundtasks.py
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
|
||||
import os
|
||||
from os.path import join, dirname, basename, splitext, exists
|
||||
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.conf import settings
|
||||
|
||||
from ... import daemon
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
listen to rabbitmq and execute background task.
|
||||
"""
|
||||
help = 'listen to rabbitmq and execute background task.'
|
||||
args = ''
|
||||
|
||||
def handle(self, **options):
|
||||
daemon.run()
|
||||
|
||||
22
pandora/backend/management/commands/encoder.py
Normal file
22
pandora/backend/management/commands/encoder.py
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
|
||||
import os
|
||||
from os.path import join, dirname, basename, splitext, exists
|
||||
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.conf import settings
|
||||
|
||||
from ... import encoder
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
listen to rabbitmq and execute encoding tasks.
|
||||
"""
|
||||
help = 'listen to rabbitmq and execute encoding tasks.'
|
||||
args = ''
|
||||
|
||||
def handle(self, **options):
|
||||
encoder.run()
|
||||
|
||||
158
pandora/backend/managers.py
Normal file
158
pandora/backend/managers.py
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
import re
|
||||
from datetime import datetime
|
||||
from urllib2 import unquote
|
||||
import json
|
||||
|
||||
from django.contrib.auth.models import User
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.db.models import Q, Manager
|
||||
|
||||
import models
|
||||
|
||||
|
||||
def keyType(key):
|
||||
if key in ('released'):
|
||||
return "date"
|
||||
if key in ('year', 'cast.length'):
|
||||
return "int"
|
||||
if key in ('rating', 'votes'):
|
||||
return "float"
|
||||
return "string"
|
||||
|
||||
class MovieManager(Manager):
|
||||
def get_query_set(self):
|
||||
return super(MovieManager, self).get_query_set()
|
||||
|
||||
def find(self, request):
|
||||
'''
|
||||
construct query set from q value in request,
|
||||
also checks for lists.
|
||||
range and order must be applied later
|
||||
'''
|
||||
'''
|
||||
q = ''
|
||||
for i in request.META['QUERY_STRING'].split('&'):
|
||||
if i.startswith('q='):
|
||||
q = i[2:]
|
||||
'''
|
||||
q = json.loads(request.POST['data'])['q']
|
||||
print q
|
||||
op = ','
|
||||
if '|' in q:
|
||||
op = '|'
|
||||
conditions = []
|
||||
for e in q.split(op):
|
||||
e = e.split(':')
|
||||
if len(e) == 1: e = ['all'] + e
|
||||
k, v = e
|
||||
exclude = False
|
||||
if v.startswith('!'):
|
||||
v = v[1:]
|
||||
exclude = True
|
||||
if keyType(k) == "string":
|
||||
startswith = v.startswith('^')
|
||||
endswith = v.endswith('$')
|
||||
if startswith and endswith:
|
||||
v = v[1:-1]
|
||||
k = '%s__iexact' % k
|
||||
elif startswith:
|
||||
v = v[1:]
|
||||
k = '%s__istartswith' % k
|
||||
elif v.endswith('$'):
|
||||
v = v[:-1]
|
||||
k = '%s__iendswith' % k
|
||||
else:
|
||||
k = '%s__icontains' % k
|
||||
k = 'find__%s' % k
|
||||
v = unquote(v)
|
||||
if exclude:
|
||||
conditions.append(~Q(**{k:v}))
|
||||
else:
|
||||
conditions.append(Q(**{k:v}))
|
||||
else:
|
||||
def parseDate(d):
|
||||
while len(d) < 3:
|
||||
d.append(1)
|
||||
return datetime(*[int(i) for i in d])
|
||||
#1960-1970
|
||||
match = re.compile("(-?[\d\.]+?)-(-?[\d\.]+$)").findall(v)
|
||||
if match:
|
||||
v1 = match[0][0]
|
||||
v2 = match[0][1]
|
||||
if keyType(k) == "date":
|
||||
v1 = parseDate(v1.split('.'))
|
||||
v2 = parseDate(v2.split('.'))
|
||||
if exclude: #!1960-1970
|
||||
k1 = str('%s__lt' % k)
|
||||
k2 = str('%s__gte' % k)
|
||||
conditions.append(Q(**{k1:v1})|Q(**{k2:v2}))
|
||||
else: #1960-1970
|
||||
k1 = str('%s__gte' % k)
|
||||
k2 = str('%s__lt' % k)
|
||||
conditions.append(Q(**{k1:v1})&Q(**{k2:v2}))
|
||||
else:
|
||||
if keyType(k) == "date":
|
||||
v = parseDate(v.split('.'))
|
||||
k = str('%s' % k)
|
||||
if exclude: #!1960
|
||||
conditions.append(~Q(**{k:v}))
|
||||
else: #1960
|
||||
conditions.append(Q(**{k:v}))
|
||||
|
||||
#join query with operator
|
||||
qs = self.get_query_set()
|
||||
#only include movies that have hard metadata
|
||||
qs = qs.filter(available=True)
|
||||
if conditions:
|
||||
q = conditions[0]
|
||||
for c in conditions[1:]:
|
||||
if op == '|':
|
||||
q = q | c
|
||||
else:
|
||||
q = q & c
|
||||
qs = qs.filter(q)
|
||||
|
||||
# filter list, works for own or public lists
|
||||
l = request.GET.get('l', 'all')
|
||||
if l != "all":
|
||||
l = l.split(":")
|
||||
only_public = True
|
||||
if not request.user.is_anonymous():
|
||||
if len(l) == 1: l = [request.user.username] + l
|
||||
if request.user.username == l[0]:
|
||||
only_public = False
|
||||
if len(l) == 2:
|
||||
lqs = models.List.objects.filter(name=l[1], user__username=l[0])
|
||||
if only_public:
|
||||
lqs = qls.filter(public=True)
|
||||
if lqs.count() == 1:
|
||||
qs = qs.filter(listitem__list__id=lqs[0].id)
|
||||
return qs
|
||||
|
||||
class FileManager(Manager):
|
||||
def get_query_set(self):
|
||||
return super(FileManager, self).get_query_set()
|
||||
|
||||
def movie_files(self, movie):
|
||||
q = self.get_query_set()
|
||||
return q.filter(type=1, movie=movie)
|
||||
|
||||
class ArchiveFileManager(Manager):
|
||||
def get_query_set(self):
|
||||
return super(ArchiveFileManager, self).get_query_set()
|
||||
|
||||
def movie_files(self, movie):
|
||||
q = self.get_query_set()
|
||||
return q.filter(file__is_video=True, file__movie=movie)
|
||||
|
||||
def by_oshash(self, oshash):
|
||||
q = self.get_query_set()
|
||||
q = q.filter(file__oshash=oshash)
|
||||
if q.count() == 0:
|
||||
raise models.ArchiveFile.DoesNotExist("%s matching oshash %s does not exist." %
|
||||
(models.ArchiveFile._meta.object_name, oshash))
|
||||
else:
|
||||
return q[0]
|
||||
|
||||
0
pandora/backend/migrations/__init__.py
Normal file
0
pandora/backend/migrations/__init__.py
Normal file
1268
pandora/backend/models.py
Normal file
1268
pandora/backend/models.py
Normal file
File diff suppressed because it is too large
Load diff
11
pandora/backend/urls.py
Normal file
11
pandora/backend/urls.py
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
|
||||
from django.conf.urls.defaults import *
|
||||
|
||||
|
||||
urlpatterns = patterns("backend.views",
|
||||
(r'^upload/$', 'firefogg_upload'),
|
||||
(r'^$', 'api'),
|
||||
)
|
||||
|
||||
127
pandora/backend/utils.py
Normal file
127
pandora/backend/utils.py
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
#
|
||||
import errno
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import hashlib
|
||||
|
||||
import oxlib
|
||||
import oxlib.iso
|
||||
from oxlib.normalize import normalizeName
|
||||
|
||||
def oxid(title, director, year='', seriesTitle='', episodeTitle='', season=0, episode=0):
|
||||
oxid_value = u"\n".join([title, director, year])
|
||||
oxid = hashlib.sha1(oxid_value.encode('utf-8')).hexdigest()
|
||||
if seriesTitle:
|
||||
oxid_value = u"\n".join([seriesTitle, "%02d" % season])
|
||||
oxid = hashlib.sha1(oxid_value.encode('utf-8')).hexdigest()[:20]
|
||||
oxid_value = u"\n".join(["%02d" % episode, episodeTitle, director, year])
|
||||
oxid += hashlib.sha1(oxid_value.encode('utf-8')).hexdigest()[:20]
|
||||
return u"0x" + oxid
|
||||
|
||||
def oxdb_director(director):
|
||||
director = os.path.basename(os.path.dirname(director))
|
||||
if director.endswith('_'):
|
||||
director = "%s." % director[:-1]
|
||||
director = ", ".join([normalizeName(d) for d in director.split('; ')])
|
||||
director = director.replace('Series', '')
|
||||
director = director.replace('Unknown Director', '')
|
||||
director = director.replace('Various Directors', '')
|
||||
return director
|
||||
|
||||
def oxdb_title(_title, searchTitle = False):
|
||||
'''
|
||||
normalize filename to get movie title
|
||||
'''
|
||||
_title = os.path.basename(_title)
|
||||
_title = _title.replace('... ', '_dot_dot_dot_')
|
||||
_title = _title.replace('. ', '_dot__space_')
|
||||
_title = _title.replace(' .', '_space__dot_')
|
||||
title = _title.split('.')[0]
|
||||
title = re.sub('([a-z0-9])_ ', '\\1: ', title)
|
||||
se = re.compile('Season (\d+).Episode (\d+)').findall(_title)
|
||||
if se:
|
||||
se = "S%02dE%02d" % (int(se[0][0]), int(se[0][1]))
|
||||
if 'Part' in _title.split('.')[-2] and 'Episode' not in _title.split('.')[-3]:
|
||||
stitle = _title.split('.')[-3]
|
||||
else:
|
||||
stitle = _title.split('.')[-2]
|
||||
if stitle.startswith('Episode '):
|
||||
stitle = ''
|
||||
if searchTitle:
|
||||
title = '"%s" %s' % (title, stitle)
|
||||
else:
|
||||
title = '%s (%s) %s' % (title, se, stitle)
|
||||
title = title.strip()
|
||||
title = title.replace('_dot_dot_dot_', '... ')
|
||||
title = title.replace('_dot__space_', '. ')
|
||||
title = title.replace('_space__dot_', ' .')
|
||||
return title
|
||||
|
||||
def oxdb_year(data):
|
||||
return oxlib.findRe(data, '\.(\d{4})\.')
|
||||
|
||||
def oxdb_series_title(path):
|
||||
seriesTitle = u''
|
||||
if path.startswith('Series'):
|
||||
seriesTitle = os.path.basename(os.path.dirname(path))
|
||||
else:
|
||||
t = oxdb_title(path)
|
||||
if " (S" in t:
|
||||
seriesTitle = t.split(" (S")[0]
|
||||
return seriesTitle
|
||||
|
||||
def oxdb_episode_title(path):
|
||||
episodeTitle = u''
|
||||
ep = re.compile('.Episode \d+?\.(.*?)\.[a-zA-Z]').findall(path)
|
||||
if ep:
|
||||
episodeTitle = ep[0]
|
||||
return episodeTitle
|
||||
|
||||
def oxdb_season_episode(path):
|
||||
season = 0
|
||||
episode = 0
|
||||
path = os.path.basename(path)
|
||||
se = re.compile('Season (\d+).Episode (\d+)').findall(path)
|
||||
if se:
|
||||
season = int(se[0][0])
|
||||
episode = int(se[0][1])
|
||||
else:
|
||||
ep = re.compile('.Episode (\d+?)').findall(path)
|
||||
if ep:
|
||||
episode = int(ep[0][0])
|
||||
if season == 0 and episode == 0:
|
||||
se = re.compile('S(\d\d)E(\d\d)').findall(path)
|
||||
if se:
|
||||
season = int(se[0][0])
|
||||
episode = int(se[0][1])
|
||||
return (season, episode)
|
||||
|
||||
def oxdb_part(path):
|
||||
part = 1
|
||||
path = path.lower()
|
||||
p = re.compile('part\s*?(\d+)\.').findall(path)
|
||||
if p:
|
||||
part = p[0]
|
||||
else:
|
||||
p = re.compile('cd\s*?(\d+)\.').findall(path)
|
||||
if p:
|
||||
part = p[0]
|
||||
return part
|
||||
|
||||
def parsePath(path):
|
||||
import oxweb.imdb
|
||||
search_title = oxdb_title(path, True)
|
||||
r = {}
|
||||
r['title'] = oxdb_title(path)
|
||||
r['director'] = oxdb_director(path)
|
||||
r['episode_title'] = oxdb_episode_title(path)
|
||||
r['season'], r['episode'] = oxdb_season_episode(path)
|
||||
r['series'] = oxdb_series_title(path)
|
||||
r['part'] = oxdb_part(path)
|
||||
r['imdbId'] = oxweb.imdb.guess(search_title, r['director'], timeout=-1)
|
||||
return r
|
||||
|
||||
706
pandora/backend/views.py
Normal file
706
pandora/backend/views.py
Normal file
|
|
@ -0,0 +1,706 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
import os.path
|
||||
import re
|
||||
from datetime import datetime
|
||||
from urllib2 import unquote
|
||||
import json
|
||||
|
||||
from django import forms
|
||||
from django.core.paginator import Paginator
|
||||
from django.contrib.auth.decorators import login_required
|
||||
from django.contrib.auth.models import User
|
||||
from django.db.models import Q, Avg, Count
|
||||
from django.http import HttpResponse
|
||||
from django.shortcuts import render_to_response, get_object_or_404, get_list_or_404
|
||||
from django.template import RequestContext
|
||||
from django.conf import settings
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
from django.utils import simplejson as json
|
||||
|
||||
from oxdjango.decorators import login_required_json
|
||||
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
|
||||
|
||||
import models
|
||||
import utils
|
||||
from daemon import send_bg_message
|
||||
|
||||
from oxuser.models import getUserJSON
|
||||
from oxuser.views import api_login, api_logout, api_register, api_contact, api_recover, api_preferences
|
||||
|
||||
|
||||
def api(request):
|
||||
if request.META['REQUEST_METHOD'] == "OPTIONS":
|
||||
response = HttpResponse('')
|
||||
#response = render_to_json_response({'status': {'code': 200, 'text': 'please use POST'}})
|
||||
response['Access-Control-Allow-Origin'] = '*'
|
||||
return response
|
||||
if not 'action' in request.POST:
|
||||
return apidoc(request)
|
||||
function = request.POST['action']
|
||||
#FIXME: possible to do this in f
|
||||
#data = json.loads(request.POST['data'])
|
||||
|
||||
#FIXME: security considerations, web facing api should not call anything in globals!!!
|
||||
f = globals().get('api_'+function, None)
|
||||
if f:
|
||||
response = f(request)
|
||||
else:
|
||||
response = render_to_json_response(json_response(status=400,
|
||||
text='Unknown function %s' % function))
|
||||
#response['Access-Control-Allow-Origin'] = '*'
|
||||
return response
|
||||
|
||||
def api_hello(request):
|
||||
'''
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {user: object}}
|
||||
'''
|
||||
response = json_response({})
|
||||
if request.user.is_authenticated():
|
||||
response['data']['user'] = getUserJSON(request.user)
|
||||
else:
|
||||
response['data']['user'] = {'name': 'Guest', 'group': 'guest', 'preferences': {}}
|
||||
return render_to_json_response(response)
|
||||
|
||||
def api_error(request):
|
||||
'''
|
||||
trows 503 error
|
||||
'''
|
||||
success = error_is_success
|
||||
return render_to_json_response({})
|
||||
|
||||
def _order_query(qs, s, prefix='sort__'):
|
||||
order_by = []
|
||||
for e in s.split(','):
|
||||
o = e.split(':')
|
||||
if len(o) == 1: o.append('asc')
|
||||
order = {'id': 'movieId'}.get(o[0], o[0])
|
||||
order = '%s%s' % (prefix, order)
|
||||
if o[1] == 'desc':
|
||||
order = '-%s' % order
|
||||
order_by.append(order)
|
||||
if order_by:
|
||||
qs = qs.order_by(*order_by)
|
||||
return qs
|
||||
|
||||
def _parse_query(request):
|
||||
get = json.loads(request.POST['data'])
|
||||
query = {}
|
||||
query['i'] = 0
|
||||
query['o'] = 100
|
||||
query['s'] = 'title:asc'
|
||||
def parse_dict(s):
|
||||
d = s.split(",")
|
||||
return [i.strip() for i in d]
|
||||
_dicts = ['p', ]
|
||||
_ints = ['n', ]
|
||||
for key in ('s', 'p', 'g', 'l', 'n'):
|
||||
if key in get:
|
||||
if key in _ints:
|
||||
query[key] = int(get[key])
|
||||
elif key in _dicts:
|
||||
query[key] = parse_dict(get[key])
|
||||
else:
|
||||
query[key] = get[key]
|
||||
query['q'] = models.Movie.objects.find(request)
|
||||
if 'r' in get:
|
||||
r = get['r'].split(':')
|
||||
if len(r) == 1: r.append(0)
|
||||
if r[0] == '': r[0] = 0
|
||||
if r[1] == '': r[0] = -1
|
||||
query['i'] = int(r[0])
|
||||
query['o'] = int(r[1])
|
||||
#group by only allows sorting by name or number of itmes
|
||||
return query
|
||||
|
||||
def api_find(request):
|
||||
'''
|
||||
param data
|
||||
{'q': query, 's': sort, 'r': range}
|
||||
|
||||
q: query string, can contain field:search more on query syntax at
|
||||
http://wiki.0xdb.org/wiki/QuerySyntax
|
||||
s: comma seperated list of field:order, default: director:asc,year:desc
|
||||
r: result ragne, from:to or from
|
||||
p: properties to return, if obmited stats are returned
|
||||
g: group elements by, country, genre, director...
|
||||
|
||||
with p, items is list of dicts with requested properties:
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {items: array}}
|
||||
|
||||
with g, items contains list of {'title': string, 'items': int}:
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {items: array}}
|
||||
|
||||
with g + n=1, return number of items in given query
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {items: int}}
|
||||
|
||||
without p or g, return stats about query:
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {items=int, files=int, pixels=int, size=int, duration=int}}
|
||||
'''
|
||||
query = _parse_query(request)
|
||||
response = json_response({})
|
||||
if 'p' in query:
|
||||
response['data']['items'] = []
|
||||
qs = _order_query(query['q'], query['s'])
|
||||
if 'n' in query:
|
||||
response = {'items': qs.count()}
|
||||
else:
|
||||
_p = query['p']
|
||||
def only_p(m):
|
||||
r = {}
|
||||
if m:
|
||||
m = json.loads(m)
|
||||
for p in _p:
|
||||
r[p] = m[p]
|
||||
return r
|
||||
qs = qs[query['i']:query['o']]
|
||||
|
||||
response['data']['items'] = [only_p(m['json']) for m in qs.values('json')]
|
||||
|
||||
elif 'g' in query:
|
||||
if query['s'].split(':')[0] not in ('name', 'items'):
|
||||
query['s'] = 'name'
|
||||
#FIXME: also filter lists here
|
||||
response['data']['items'] = []
|
||||
name = 'name'
|
||||
items = 'movies'
|
||||
movie_qs = query['q']
|
||||
_objects = {
|
||||
'country': models.Country.objects,
|
||||
'genre': models.Genre.objects,
|
||||
'language': models.Language.objects,
|
||||
'director': models.Person.objects.filter(cast__role='directors'),
|
||||
}
|
||||
if query['g'] in _objects:
|
||||
qs = _objects[query['g']].filter(movies__id__in=movie_qs).values('name').annotate(movies=Count('movies'))
|
||||
elif query['g'] == "year":
|
||||
qs = movie_qs.values('imdb__year').annotate(movies=Count('id'))
|
||||
name='imdb__year'
|
||||
if 'n' in query:
|
||||
response['data']['items'] = qs.count()
|
||||
else:
|
||||
#replace normalized items/name sort with actual db value
|
||||
order_by = query['s'].split(":")
|
||||
if len(order_by) == 1:
|
||||
order_by.append('desc')
|
||||
if order_by[0] == 'name':
|
||||
order_by = "%s:%s" % (name, order_by[1])
|
||||
else:
|
||||
order_by = "%s:%s" % (items, order_by[1])
|
||||
qs = _order_query(qs, order_by, '')
|
||||
qs = qs[query['i']:query['o']]
|
||||
|
||||
response['data']['items'] = [{'title': i[name], 'items': i[items]} for i in qs]
|
||||
|
||||
else:
|
||||
#FIXME: also filter lists here
|
||||
movies = models.Movie.objects.filter(available=True)
|
||||
files = models.File.objects.all()
|
||||
response['data']['items'] = movies.count()
|
||||
response['data']['files'] = files.count()
|
||||
r = files.aggregate(Count('size'), Count('pixels'), Count('duration'))
|
||||
response['data']['pixels'] = r['pixels__count']
|
||||
response['data']['size'] = r['size__count']
|
||||
response['data']['duration'] = r['duration__count']
|
||||
return render_to_json_response(response)
|
||||
|
||||
def api_getItem(request):
|
||||
'''
|
||||
param data
|
||||
string id
|
||||
|
||||
return item array
|
||||
'''
|
||||
response = json_response({})
|
||||
itemId = json.loads(request.POST['data'])
|
||||
item = get_object_or_404_json(models.Movie, movieId=itemId)
|
||||
#FIXME: check permissions
|
||||
response['data'] = {'item': item.json}
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def api_editItem(request):
|
||||
'''
|
||||
param data
|
||||
{id: string, key: value,..}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {}}
|
||||
'''
|
||||
data = json.loads(request.POST['data'])
|
||||
item = get_object_or_404_json(models.Movie, movieId=data['id'])
|
||||
if item.editable(request.user):
|
||||
response = json_response(status=501, text='not implemented')
|
||||
item.edit(data)
|
||||
else:
|
||||
response = json_response(status=403, text='permissino denied')
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def api_removeItem(request):
|
||||
'''
|
||||
param data
|
||||
string id
|
||||
|
||||
return {'status': {'code': int, 'text': string}}
|
||||
'''
|
||||
response = json_response({})
|
||||
itemId = json.loads(request.POST['data'])
|
||||
item = get_object_or_404_json(models.Movie, movieId=itemId)
|
||||
if item.editable(request.user):
|
||||
response = json_response(status=501, text='not implemented')
|
||||
else:
|
||||
response = json_response(status=403, text='permissino denied')
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def api_addLayer(request):
|
||||
'''
|
||||
param data
|
||||
{key: value}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {}}
|
||||
'''
|
||||
response = {'status': {'code': 501, 'text': 'not implemented'}}
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def api_removeLayer(request):
|
||||
'''
|
||||
param data
|
||||
{key: value}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {}}
|
||||
'''
|
||||
response = {'status': {'code': 501, 'text': 'not implemented'}}
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def api_editLayer(request):
|
||||
'''
|
||||
param data
|
||||
{key: value}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {}}
|
||||
'''
|
||||
response = json_response({})
|
||||
data = json.loads(request.POST['data'])
|
||||
layer = get_object_or_404_json(models.Layer, pk=data['id'])
|
||||
if layer.editable(request.user):
|
||||
response = json_response(status=501, text='not implemented')
|
||||
else:
|
||||
response = json_response(status=403, text='permission denied')
|
||||
return render_to_json_response(response)
|
||||
|
||||
response = json_response(status=501, text='not implemented')
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def api_addListItem(request):
|
||||
'''
|
||||
param data
|
||||
{key: value}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {}}
|
||||
'''
|
||||
response = json_response(status=501, text='not implemented')
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def api_removeListItem(request):
|
||||
'''
|
||||
param data
|
||||
{key: value}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {}}
|
||||
'''
|
||||
response = json_response(status=501, text='not implemented')
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def api_addList(request):
|
||||
'''
|
||||
param data
|
||||
{key: value}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {}}
|
||||
'''
|
||||
response = json_response(status=501, text='not implemented')
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def api_editList(request):
|
||||
'''
|
||||
param data
|
||||
{key: value}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {}}
|
||||
'''
|
||||
response = json_response(status=501, text='not implemented')
|
||||
return render_to_json_response(response)
|
||||
|
||||
def api_removeList(request):
|
||||
'''
|
||||
param data
|
||||
{key: value}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {}}
|
||||
'''
|
||||
response = json_response(status=501, text='not implemented')
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def api_addArchive(request):
|
||||
'''
|
||||
ARCHIVE API NEEDS CLEANUP
|
||||
param data
|
||||
{name: string}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {}}
|
||||
'''
|
||||
data = json.loads(request.POST['data'])
|
||||
try:
|
||||
archive = models.Archive.objects.get(name=data['name'])
|
||||
response = {'status': {'code': 401, 'text': 'archive with this name exists'}}
|
||||
except models.Archive.DoesNotExist:
|
||||
archive = models.Archive(name=data['name'])
|
||||
archive.save()
|
||||
archive.users.add(request.user)
|
||||
response = json_response({})
|
||||
response['status']['text'] = 'archive created'
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def api_editArchive(request):
|
||||
'''
|
||||
ARCHIVE API NEEDS CLEANUP
|
||||
param data
|
||||
{id: string, key: value,..}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {}}
|
||||
'''
|
||||
data = json.loads(request.POST['data'])
|
||||
item = get_object_or_404_json(models.Archive, name=data['name'])
|
||||
if item.editable(request.user):
|
||||
response = json_response(status=501, text='not implemented')
|
||||
item.edit(data)
|
||||
else:
|
||||
response = json_response(status=403, text='permission denied')
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def api_removeArchive(request):
|
||||
'''
|
||||
ARCHIVE API NEEDS CLEANUP
|
||||
param data
|
||||
string id
|
||||
|
||||
return {'status': {'code': int, 'text': string}}
|
||||
'''
|
||||
response = json_response({})
|
||||
itemId = json.loads(request.POST['data'])
|
||||
item = get_object_or_404_json(models.Archive, movieId=itemId)
|
||||
if item.editable(request.user):
|
||||
response = json_response(status=501, text='not implemented')
|
||||
else:
|
||||
response = json_response(status=403, text='permission denied')
|
||||
return render_to_json_response(response)
|
||||
|
||||
|
||||
|
||||
#@login_required_json
|
||||
def api_update(request):
|
||||
'''
|
||||
param data
|
||||
{archive: string, files: json}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {info: object, rename: object}}
|
||||
'''
|
||||
data = json.loads(request.POST['data'])
|
||||
archive = data['archive']
|
||||
files = data['files']
|
||||
archive = get_object_or_404_json(models.Archive, name=archive)
|
||||
if archive.editable(request.user):
|
||||
needs_data = []
|
||||
rename = {}
|
||||
for oshash in files:
|
||||
data = files[oshash]
|
||||
q = models.ArchiveFile.objects.filter(archive=archive, file__oshash=oshash)
|
||||
if q.count() == 0:
|
||||
#print "adding file", oshash, data['path']
|
||||
f = models.ArchiveFile.get_or_create(archive, oshash)
|
||||
f.update(data)
|
||||
if not f.file.movie:
|
||||
send_bg_message({'findMovie': f.file.id})
|
||||
#FIXME: only add if it was not in File
|
||||
else:
|
||||
f = q[0]
|
||||
if data['path'] != f.path:
|
||||
f.path = data['path']
|
||||
f.save()
|
||||
if f.file.needs_data:
|
||||
needs_data.append(oshash)
|
||||
if f.path != f.file.path:
|
||||
rename[oshash] = f.file.path
|
||||
#print "processed files for", archive.name
|
||||
#remove all files not in files.keys() from ArchiveFile
|
||||
response = json_response({'info': needs_data, 'rename': rename})
|
||||
else:
|
||||
response = json_response(status=403, text='permission denied')
|
||||
return render_to_json_response(response)
|
||||
|
||||
def api_encodingSettings(request):
|
||||
'''
|
||||
returns Firefogg encoding settings as specified by site
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {'options': {'videoQuality':...}}}
|
||||
'''
|
||||
response = json_response({'options': settings.VIDEO_ENCODING[settings.VIDEO_PROFILE]})
|
||||
return render_to_json_response(response)
|
||||
|
||||
class UploadForm(forms.Form):
|
||||
data = forms.TextInput()
|
||||
file = forms.FileField()
|
||||
|
||||
class VideoChunkForm(forms.Form):
|
||||
chunk = forms.FileField()
|
||||
done = forms.IntegerField(required=False)
|
||||
|
||||
@login_required_json
|
||||
def api_upload(request): #video, timeline, frame
|
||||
'''
|
||||
upload video, timeline or frame
|
||||
param data
|
||||
param file
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {}}
|
||||
'''
|
||||
form = UploadForm(request.POST, request.FILES)
|
||||
if form.is_valid():
|
||||
data = json.loads(request.POST['data'])
|
||||
oshash = data['oshash']
|
||||
f = get_object_or_404(models.File, oshash=oshash)
|
||||
if data['item'] == 'frame':
|
||||
ff = form.cleaned_data['file']
|
||||
position = data['position']
|
||||
frame, created = models.Frame.objects.get_or_create(file=f, position=position)
|
||||
if not created and frame.frame:
|
||||
frame.frame.delete()
|
||||
frame.frame.save(ff.name, ff)
|
||||
frame.save()
|
||||
response = json_response({'url': frame.frame.url})
|
||||
return render_to_json_response(response)
|
||||
if data['item'] == 'timeline':
|
||||
pass
|
||||
#print "not implemented"
|
||||
|
||||
response = json_response(status=501, text='not implemented')
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def firefogg_upload(request):
|
||||
#handle video upload
|
||||
if request.method == 'POST':
|
||||
#init upload
|
||||
if 'oshash' in request.POST:
|
||||
#FIXME: what to do if requested oshash is not in db?
|
||||
#FIXME: should existing data be reset here? or better, should this fail if an upload was there
|
||||
f = get_object_or_404(models.File, oshash=request.POST['oshash'])
|
||||
stream = getattr(f, 'stream_%s'%settings.VIDEO_UPLOAD)
|
||||
if stream:
|
||||
stream.delete()
|
||||
f.available = False
|
||||
f.save()
|
||||
response = {
|
||||
'uploadUrl': request.build_absolute_uri('/api/upload/?oshash=%s' % f.oshash),
|
||||
'result': 1
|
||||
}
|
||||
return render_to_json_response(response)
|
||||
#post next chunk
|
||||
if 'chunk' in request.FILES and 'oshash' in request.GET:
|
||||
print "all chunk now"
|
||||
f = get_object_or_404(models.File, oshash=request.GET['oshash'])
|
||||
form = VideoChunkForm(request.POST, request.FILES)
|
||||
#FIXME:
|
||||
if form.is_valid() and f.editable(request.user):
|
||||
c = form.cleaned_data['chunk']
|
||||
response = {
|
||||
'result': 1,
|
||||
'resultUrl': request.build_absolute_uri('/')
|
||||
}
|
||||
if not f.save_chunk(c, c.name):
|
||||
response['result'] = -1
|
||||
elif form.cleaned_data['done']:
|
||||
#FIXME: send message to encode deamon to create derivates instead
|
||||
f.available = True
|
||||
f.save()
|
||||
response['result'] = 1
|
||||
response['done'] = 1
|
||||
return render_to_json_response(response)
|
||||
print request.GET, request.POST
|
||||
response = json_response(status=400, text='this request requires POST')
|
||||
return render_to_json_response(response)
|
||||
|
||||
@login_required_json
|
||||
def api_editFile(request): #FIXME: should this be file.files. or part of update
|
||||
'''
|
||||
change file / imdb link
|
||||
'''
|
||||
response = json_response(status=501, text='not implemented')
|
||||
return render_to_json_response(response)
|
||||
|
||||
def api_parse(request): #parse path and return info
|
||||
'''
|
||||
param data
|
||||
{path: string}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
data: {imdb: string}}
|
||||
'''
|
||||
path = json.loads(request.POST['data'])['path']
|
||||
response = json_response(utils.parsePath(path))
|
||||
return render_to_json_response(response)
|
||||
|
||||
def api_getImdbId(request):
|
||||
'''
|
||||
param data
|
||||
{title: string, director: string, year: string}
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {imdbId:string }}
|
||||
'''
|
||||
imdbId = oxweb.imdb.guess(search_title, r['director'], timeout=-1)
|
||||
if imdbId:
|
||||
response = json_response({'imdbId': imdbId})
|
||||
else:
|
||||
response = json_response(status=404, text='not found')
|
||||
return render_to_json_response(response)
|
||||
|
||||
def api_fileInfo(request):
|
||||
'''
|
||||
param data
|
||||
oshash string
|
||||
return {'status': {'code': int, 'text': string},
|
||||
'data': {imdbId:string }}
|
||||
'''
|
||||
if 'data' in request.POST:
|
||||
oshash = json.loads(request.POST['data'])
|
||||
elif 'oshash' in request.GET:
|
||||
oshash = request.GET['oshash']
|
||||
f = models.MovieFile.objects.get(oshash=oshash)
|
||||
response = {'data': f.json()}
|
||||
return render_to_json_response(response)
|
||||
|
||||
def api_subtitles(request):
|
||||
'''
|
||||
param data
|
||||
oshash string
|
||||
language string
|
||||
subtitle string
|
||||
return
|
||||
if no language is provided:
|
||||
{data: {languages: array}}
|
||||
if language is set:
|
||||
{data: {subtitle: string}}
|
||||
if subtitle is set:
|
||||
saves subtitle for given language
|
||||
'''
|
||||
if 'data' in request.POST:
|
||||
data = json.loads(request.POST['data'])
|
||||
oshash = data['oshash']
|
||||
language = data.get('language', None)
|
||||
srt = data.get('subtitle', None)
|
||||
if srt:
|
||||
user = request.user
|
||||
sub = models.Subtitles.objects.get_or_create(user, oshash, language)
|
||||
sub.srt = srt
|
||||
sub.save()
|
||||
else:
|
||||
response = json_response({})
|
||||
if language:
|
||||
q = models.Subtitles.objects.filter(movie_file__oshash=oshash, language=language)
|
||||
if q.count() > 0:
|
||||
response['data']['subtitle'] = q[0].srt
|
||||
return render_to_json_response(response)
|
||||
l = models.Subtitles.objects.filter(movie_file__oshash=oshash).values('language')
|
||||
response['data']['languages'] = [f['language'] for f in l]
|
||||
return render_to_json_response(response)
|
||||
|
||||
'''
|
||||
GET list
|
||||
> {
|
||||
"files": {
|
||||
"a41cde31c581e11d": {"path": "E/Example, The/An Example.avi", "size":1646274},
|
||||
}
|
||||
}
|
||||
'''
|
||||
@login_required_json
|
||||
def list_files(request):
|
||||
response = {}
|
||||
response['files'] = {}
|
||||
qs = models.UserFile.filter(user=request.user)
|
||||
p = Paginator(qs, 1000)
|
||||
for i in p.page_range:
|
||||
page = p.page(i)
|
||||
for f in page.object_list:
|
||||
response['files'][f.movie_file.oshash] = {'path': f.path, 'size': f.movie_file.size}
|
||||
return render_to_json_response(response)
|
||||
|
||||
def find_files(request):
|
||||
response = {}
|
||||
query = _parse_query(request)
|
||||
response['files'] = {}
|
||||
qs = models.UserFile.filter(user=request.user).filter(movie_file__movie__id__in=query['q'])
|
||||
p = Paginator(qs, 1000)
|
||||
for i in p.page_range:
|
||||
page = p.page(i)
|
||||
for f in page.object_list:
|
||||
response['files'][f.movie_file.oshash] = {'path': f.path, 'size': f.movie_file.size}
|
||||
return render_to_json_response(response)
|
||||
|
||||
|
||||
def apidoc(request):
|
||||
'''
|
||||
this is used for online documentation at http://127.0.0.1:8000/api/
|
||||
'''
|
||||
import sys
|
||||
def trim(docstring):
|
||||
if not docstring:
|
||||
return ''
|
||||
# Convert tabs to spaces (following the normal Python rules)
|
||||
# and split into a list of lines:
|
||||
lines = docstring.expandtabs().splitlines()
|
||||
# Determine minimum indentation (first line doesn't count):
|
||||
indent = sys.maxint
|
||||
for line in lines[1:]:
|
||||
stripped = line.lstrip()
|
||||
if stripped:
|
||||
indent = min(indent, len(line) - len(stripped))
|
||||
# Remove indentation (first line is special):
|
||||
trimmed = [lines[0].strip()]
|
||||
if indent < sys.maxint:
|
||||
for line in lines[1:]:
|
||||
trimmed.append(line[indent:].rstrip())
|
||||
# Strip off trailing and leading blank lines:
|
||||
while trimmed and not trimmed[-1]:
|
||||
trimmed.pop()
|
||||
while trimmed and not trimmed[0]:
|
||||
trimmed.pop(0)
|
||||
# Return a single string:
|
||||
return '\n'.join(trimmed)
|
||||
|
||||
functions = filter(lambda x: x.startswith('api_'), globals().keys())
|
||||
api = []
|
||||
for f in sorted(functions):
|
||||
api.append({
|
||||
'name': f[4:],
|
||||
'doc': trim(globals()[f].__doc__).replace('\n', '<br>\n')
|
||||
})
|
||||
context = RequestContext(request, {'api': api,
|
||||
'sitename': settings.SITENAME,})
|
||||
return render_to_response('api.html', context)
|
||||
Loading…
Add table
Add a link
Reference in a new issue