update files archive api

This commit is contained in:
j 2010-08-07 16:31:20 +02:00
parent 7e789242a1
commit d4e1dd85d5
7 changed files with 335 additions and 306 deletions

View file

@ -30,25 +30,35 @@ def parse_decimal(string):
d = string.split('/') d = string.split('/')
return Decimal(d[0]) / Decimal(d[1]) return Decimal(d[0]) / Decimal(d[1])
#ARCHIVE stuff def stream_path(f):
class Volume(models.Model): h = f.oshash
start = models.CharField(max_length=1) return os.path.join('stream', h[:2], h[2:4], h[4:6], h[6:], f.profile)
end = models.CharField(max_length=1)
name = models.CharField(max_length=255)
class Archive(models.Model): class Stream(models.Model):
created = models.DateTimeField(auto_now_add=True) file = models.ForeignKey(File, related_name='streams')
modified = models.DateTimeField(auto_now=True) profile = models.CharField(max_length=255, default='96p.webm')
published = models.DateTimeField(default=datetime.now, editable=False) video = models.FileField(default=None, blank=True, upload_to=lambda f, x: stream_path(f))
source = models.ForeignKey(Stream, related_name='derivatives', default=None, blank=True)
available = models.BooleanField(default=False)
name = models.CharField(max_length=255) def extract_derivates(self):
user = models.ForeignKey(User, related_name='owned_archives') #here based on settings derivates like smaller versions or other formats would be created
users = models.ManyToManyField(User, related_name='archives')
volumes = models.ManyToManyField(Volume, related_name='archives')
def editable(self, user): def editable(self, user):
return self.users.filter(username=user.username).count() > 0 #FIXME: possibly needs user setting for stream
return True
def save_chunk(self, chunk, chunk_id=-1):
if not self.available:
if not self.video:
self.video.save(self.profile, ContentFile(chunk))
else:
f = open(self.file.path, 'a')
#FIXME: should check that chunk_id/offset is right
f.write(chunk)
f.close()
return True
return False
class File(models.Model): class File(models.Model):
created = models.DateTimeField(auto_now_add=True) created = models.DateTimeField(auto_now_add=True)
@ -88,6 +98,9 @@ class File(models.Model):
bits_per_pixel = models.FloatField(default=-1) bits_per_pixel = models.FloatField(default=-1)
pixels = models.BigIntegerField(default=0) pixels = models.BigIntegerField(default=0)
#This is true if derivative is available or subtitles where uploaded
available = models.BooleanField(default = False)
is_audio = models.BooleanField(default = False) is_audio = models.BooleanField(default = False)
is_video = models.BooleanField(default = False) is_video = models.BooleanField(default = False)
is_extra = models.BooleanField(default = False) is_extra = models.BooleanField(default = False)
@ -95,6 +108,7 @@ class File(models.Model):
is_subtitle = models.BooleanField(default = False) is_subtitle = models.BooleanField(default = False)
is_version = models.BooleanField(default = False) is_version = models.BooleanField(default = False)
def __unicode__(self): def __unicode__(self):
return self.name return self.name
@ -152,28 +166,36 @@ class File(models.Model):
class FileInstance(models.Model): class FileInstance(models.Model):
created = models.DateTimeField(auto_now_add=True) created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True) modified = models.DateTimeField(auto_now=True)
published = models.DateTimeField(default=datetime.now, editable=False)
accessed = models.DateTimeField(default=datetime.now, editable=False) ctime = models.DateTimeField(default=datetime.now, editable=False)
mtime = models.DateTimeField(default=datetime.now, editable=False)
atime = models.DateTimeField(default=datetime.now, editable=False)
path = models.CharField(max_length=2048) path = models.CharField(max_length=2048)
folder = models.CharField(max_length=255) folder = models.CharField(max_length=255)
file = models.ForeignKey(File, related_name='instances') file = models.ForeignKey(File, related_name='instances')
archive = models.ForeignKey(Archive, related_name='files') user = models.ForeignKey(User, related_name='files')
def __unicode__(self): def __unicode__(self):
return u'%s <%s> in %s'% (self.path, self.oshash, self.archive.name) return u"%s's %s <%s>"% (self.user, self.path, self.oshash)
@property @property
def movieId(self): def movieId(self):
return File.objects.get(oshash=self.oshash).movieId return File.objects.get(oshash=self.oshash).movieId
def frame_path(f, name):
ext = os.path.splitext(name)
name = "%s.%s" % (f.position, ext)
h = f.file.oshash
return os.path.join('frame', h[:2], h[2:4], h[4:6], name)
class Frame(models.Model): class Frame(models.Model):
created = models.DateTimeField(auto_now_add=True) created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True) modified = models.DateTimeField(auto_now=True)
file = models.ForeignKey(File, related_name="frames") file = models.ForeignKey(File, related_name="frames")
position = models.FloatField() position = models.FloatField()
frame = models.ImageField(default=None, null=True, upload_to=lambda f, x: frame_path(f)) frame = models.ImageField(default=None, null=True, upload_to=frame_path)
#FIXME: frame path should be renamed on save to match current position #FIXME: frame path should be renamed on save to match current position

View file

@ -29,136 +29,265 @@ import ox
import models import models
from backend.utils import oxid, parsePath from backend.utils import oxid, parse_path
import backend.models import backend.models
#@login_required_json #@login_required_json
def api_update(request): def api_update(request):
''' '''
//both are optional, idea is to have 2 requests first with files, and
after that with info for the requested oshashes
param data param data
{archive: string, files: json} files: [
{oshash:, name:, folder:, oshash:, ctime:, atime:, mtime:, }
]
info: {oshash: object}
return {'status': {'code': int, 'text': string}, return {'status': {'code': int, 'text': string},
'data': {info: object, rename: object}} 'data': {info: list, data: list, file: list}}
''' '''
data = json.loads(request.POST['data']) data = json.loads(request.POST['data'])
archive = data['archive'] user = request.user
folder = data['folder']
files = data['files'] response = json_response({'info': [], 'rename': [], 'file': []})
needs_data = []
rename = [] if 'files' in data:
archive, created = models.Archive.objects.get_or_create(name=archive, user=request.user) all_files = []
if archive.editable(request.user): for f in data['files']:
print 'editing' folder = f['folder']
same_folder = models.FileInstance.objects.filter(folder=folder) name = f['name']
oshash = f['oshash']
all_files.append(oshash)
same_folder = models.FileInstance.objects.filter(folder=folder, user=user)
if same_folder.count() > 0: if same_folder.count() > 0:
movie = same_folder[0].file.movie movie = same_folder[0].file.movie
else: else:
movie = None movie = None
for filename in files:
data = files[filename]
oshash = data['oshash']
path = os.path.join(folder, filename)
instance = models.FileInstance.objects.filter(file__oshash=oshash) path = os.path.join(folder, name)
instance = models.FileInstance.objects.filter(file__oshash=oshash, user=user)
if instance.count()>0: if instance.count()>0:
instance = instance[0] instance = instance[0]
if path != instance.path: #file was movied updated = False
instance.path = path for key in ('atime', 'mtime', 'ctime', 'name', 'folder'):
instance.folder = folder if f[key] != getattr(instance, key):
setattr(instance, key, f[key])
updated=True
if updated:
f.save() f.save()
print "file movied, so other shit"
else: else:
#look if oshash is known #look if oshash is known
f = models.File.objects.filter(oshash=oshash) file_object = models.File.objects.filter(oshash=oshash)
if f.count() > 0: if file_object.count() > 0:
f = f[0] file_object = file_object[0]
instance = models.FileInstance() instance = models.FileInstance()
instance.file = f instance.file = file_object
instance.path=data['path'] for key in ('atime', 'mtime', 'ctime', 'name', 'folder'):
instance.folder=folder setattr(instance, key, f[key])
instance.save() instance.save()
movie = f.movie
#new oshash, add to database #new oshash, add to database
else: else:
if not movie: if not movie:
movie_info = parsePath(folder) movie_info = parse_path(folder)
movie = backend.models.getMovie(movie_info) movie = backend.models.getMovie(movie_info)
f = models.File() f = models.File()
f.oshash = oshash f.oshash = oshash
f.info = data f.name = name
del f.info['oshash']
f.name = filename
f.movie = movie f.movie = movie
f.save() f.save()
response['info'].append(oshash)
instance = models.FileInstance() instance = models.FileInstance()
instance.archive = archive instance.user = user
instance.file = f instance.file = f
instance.path = path for key in ('atime', 'mtime', 'ctime', 'name', 'folder'):
instance.folder = folder setattr(instance, key, f[key])
instance.save() instance.save()
response = json_response({'info': needs_data, 'rename': rename}) #remove deleted files
else: #FIXME: can this have any bad consequences? i.e. on the selction of used movie files.
response = json_response(status=403, text='permission denied') models.FileInstance.objects.filter(user=user).exclude(file__oshash__in=all_files).delete()
user_profile = user.get_profile()
user_profile.files_updated = datetime.now()
user_profile.save()
if 'info' in data:
for oshash in data['info']:
info = data['info'][oshash]
instance = models.FileInstance.objects.filter(file__oshash=oshash, user=user)
if instance.count()>0:
instance = instance[0]
if not instance.file.info:
instance.file.info = info
instance.file.save()
files = models.FileInstance.objects.filter(user=user, file__available=False)
response['data'] = [f.file.oshash for f in files.filter(file__is_video=True)]
response['files'] = [f.file.oshash for f in files.filter(file__is_subtitle=True)]
return render_to_json_response(response) return render_to_json_response(response)
@login_required_json
def api_addArchive(request): #@login_required_json
#FIXME: is this part of the api or does it have to be outside due to multipart?
def api_upload(request):
''' '''
ARCHIVE API NEEDS CLEANUP multipart
param data param data
{name: string} oshash: string
frame: [] //multipart frames
return {'status': {'code': int, 'text': string}, return {'status': {'code': int, 'text': string},
'data': {}} 'data': {info: object, rename: object}}
''' '''
data = json.loads(request.POST['data']) user = request.user
try: f = get_object_or_404(models.File, oshash=request.POST['oshash'])
archive = models.Archive.objects.get(name=data['name']) if f.frames.count() == 0 and 'frame' in request.FILES:
response = {'status': {'code': 401, 'text': 'archive with this name exists'}} for frame in request.FILES['frame']:
except models.Archive.DoesNotExist: name = frame.name
archive = models.Archive(name=data['name']) position = float(os.path.splitext(name)[0])
archive.user = request.user fr = models.Frame(file=f, position=position)
archive.save() fr.save()
archive.users.add(request.user) fr.frame.save(frame, name)
response = json_response({}) response = json_response({})
response['status']['text'] = 'archive created' else:
response = json_response(status=403, text='permissino denied')
return render_to_json_response(response) return render_to_json_response(response)
class VideoChunkForm(forms.Form):
chunk = forms.FileField()
chunkId = forms.IntegerField(required=False)
done = forms.IntegerField(required=False)
@login_required_json @login_required_json
def api_editArchive(request): def firefogg_upload(request):
#handle video upload
if request.method == 'POST':
#init upload
profile = request.POST.get('profile', request.GET['profile'])
#FIXME: check for valid profile
if 'oshash' in request.POST:
#404 if oshash is not know, files must be registered via update api first
f = get_object_or_404(models.File, oshash=request.POST['oshash'])
stream, created = models.Stream.objects.get_or_create(file=file, profile=profile)
if stream.video: #FIXME: check permission here instead of just starting over
stream.video.delete()
stream.available = False
stream.save()
response = {
#is it possible to no hardcode url here?
'uploadUrl': request.build_absolute_uri('/api/upload/?oshash=%s&profile=%s' % (f.oshash, profile)),
'result': 1
}
return render_to_json_response(response)
#post next chunk
if 'chunk' in request.FILES and 'oshash' in request.GET:
print "all chunk now"
stream = get_object_or_404(models.Stream, oshash=request.GET['oshash'], profile=profile)
form = VideoChunkForm(request.POST, request.FILES)
if form.is_valid() and stream.editable(request.user):
c = form.cleaned_data['chunk']
chunk_id = form.cleaned_data['chunkId']
response = {
'result': 1,
'resultUrl': request.build_absolute_uri('/')
}
if not stream.save_chunk(c, chunk_id):
response['result'] = -1
elif form.cleaned_data['done']:
#FIXME: send message to encode deamon to create derivates instead
stream.available = True
stream.save()
response['result'] = 1
response['done'] = 1
return render_to_json_response(response)
print request.GET, request.POST
response = json_response(status=400, text='this request requires POST')
return render_to_json_response(response)
"""
@login_required_json
def list_files(request):
'''
GET list
> {
"files": {
"a41cde31c581e11d": {"path": "E/Example, The/An Example.avi", "size":1646274},
}
}
'''
response = {}
response['files'] = {}
qs = models.UserFile.filter(user=request.user)
p = Paginator(qs, 1000)
for i in p.page_range:
page = p.page(i)
for f in page.object_list:
response['files'][f.movie_file.oshash] = {'path': f.path, 'size': f.movie_file.size}
return render_to_json_response(response)
def find_files(request):
response = {}
query = _parse_query(request)
response['files'] = {}
qs = models.UserFile.filter(user=request.user).filter(movie_file__movie__id__in=query['q'])
p = Paginator(qs, 1000)
for i in p.page_range:
page = p.page(i)
for f in page.object_list:
response['files'][f.movie_file.oshash] = {'path': f.path, 'size': f.movie_file.size}
return render_to_json_response(response)
def api_fileInfo(request):
''' '''
ARCHIVE API NEEDS CLEANUP
param data param data
{id: string, key: value,..} oshash string
return {'status': {'code': int, 'text': string}, return {'status': {'code': int, 'text': string},
'data': {}} 'data': {imdbId:string }}
''' '''
data = json.loads(request.POST['data']) if 'data' in request.POST:
item = get_object_or_404_json(models.Archive, name=data['name']) oshash = json.loads(request.POST['data'])
if item.editable(request.user): elif 'oshash' in request.GET:
response = json_response(status=501, text='not implemented') oshash = request.GET['oshash']
item.edit(data) f = models.MovieFile.objects.get(oshash=oshash)
else: response = {'data': f.json()}
response = json_response(status=403, text='permission denied')
return render_to_json_response(response) return render_to_json_response(response)
@login_required_json def api_subtitles(request):
def api_removeArchive(request):
''' '''
ARCHIVE API NEEDS CLEANUP
param data param data
string id oshash string
language string
return {'status': {'code': int, 'text': string}} subtitle string
return
if no language is provided:
{data: {languages: array}}
if language is set:
{data: {subtitle: string}}
if subtitle is set:
saves subtitle for given language
''' '''
response = json_response({}) if 'data' in request.POST:
itemId = json.loads(request.POST['data']) data = json.loads(request.POST['data'])
item = get_object_or_404_json(models.Archive, movieId=itemId) oshash = data['oshash']
if item.editable(request.user): language = data.get('language', None)
response = json_response(status=501, text='not implemented') srt = data.get('subtitle', None)
if srt:
user = request.user
sub = models.Subtitles.objects.get_or_create(user, oshash, language)
sub.srt = srt
sub.save()
else: else:
response = json_response(status=403, text='permission denied') response = json_response({})
if language:
q = models.Subtitles.objects.filter(movie_file__oshash=oshash, language=language)
if q.count() > 0:
response['data']['subtitle'] = q[0].srt
return render_to_json_response(response) return render_to_json_response(response)
l = models.Subtitles.objects.filter(movie_file__oshash=oshash).values('language')
response['data']['languages'] = [f['language'] for f in l]
return render_to_json_response(response)
"""

View file

@ -1,5 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4 # vi:si:et:sw=4:sts=4:ts=4
from __future__ import division, with_statement
from datetime import datetime from datetime import datetime
import os.path import os.path
import random import random
@ -67,7 +69,7 @@ def getMovie(info):
movie.movieId = info['oxdbId'] movie.movieId = info['oxdbId']
for key in ('episode_title', 'series_title', 'season', 'episode'): for key in ('episode_title', 'series_title', 'season', 'episode'):
if key in info: if key in info and info[key]:
movie.metadata[key] = info[key] movie.metadata[key] = info[key]
movie.save() movie.save()
return movie return movie
@ -138,32 +140,20 @@ class Movie(models.Model):
self.imdb = ox.web.imdb.Imdb(self.movieId) self.imdb = ox.web.imdb.Imdb(self.movieId)
self.save() self.save()
#FIXME: use data.0xdb.org poster = models.ImageField(default=None, blank=True, upload_to=lambda f, x: poster_path(f))
''' posters_url = models.TextField(blank=True)
tpb_id = models.CharField(max_length=128, blank=True)
kg_id = models.CharField(max_length=128, blank=True)
open_subtitle_id = models.IntegerField(null=True, blank=True)
wikipedia_url = models.TextField(blank=True)
#FIXME: use data.0xdb.org/posters for that
#what of this is still required?
still_pos = models.IntegerField(null=True, blank=True)
poster = models.TextField(blank=True)
posters_disabled = models.TextField(blank=True)
posters_available = models.TextField(blank=True)
poster = models.ImageField(default=None, blank=True, upload_to=poster_path)
'''
poster_height = models.IntegerField(default=0) poster_height = models.IntegerField(default=0)
poster_width = models.IntegerField(default=0) poster_width = models.IntegerField(default=0)
poster_frame = models.FloatField(default=-1)
def get_poser(self):
url = self.poster_url:
if not url:
url = self.poster.url
return url
#stream related fields #stream related fields
''' stream_aspect = models.FloatField(default=4/3)
'''
stream_low = models.FileField(default=None, blank=True, upload_to=lambda f, x: movie_path(f, 'low'))
stream_mid = models.FileField(default=None, blank=True, upload_to=lambda f, x: movie_path(f, 'mid'))
stream_high = models.FileField(default=None, blank=True, upload_to=lambda f, x: movie_path(f, 'high'))
#FIXME: is this still required? should this not be aspect ratio? depends on stream???
scene_height = models.IntegerField(null=True, blank=True)
def __unicode__(self): def __unicode__(self):
return u'%s (%s)' % (self.get('title'), self.get('year')) return u'%s (%s)' % (self.get('title'), self.get('year'))

View file

@ -10,7 +10,7 @@ import hashlib
import ox import ox
import ox.iso import ox.iso
from ox.normalize import normalizeName from ox.normalize import normalizeName, normalizeTitle
def oxid(title, directors, year='', seriesTitle='', episodeTitle='', season=0, episode=0): def oxid(title, directors, year='', seriesTitle='', episodeTitle='', season=0, episode=0):
director = ', '.join(directors) director = ', '.join(directors)
@ -64,6 +64,10 @@ def oxdb_title(_title, searchTitle = False):
title = title.replace('_dot_dot_dot_', '... ') title = title.replace('_dot_dot_dot_', '... ')
title = title.replace('_dot__space_', '. ') title = title.replace('_dot__space_', '. ')
title = title.replace('_space__dot_', ' .') title = title.replace('_space__dot_', ' .')
year = ox.findRe(title, '(\(\d{4}\))')
if title.endswith(year):
title = title[:-len(year)].strip()
title = normalizeTitle(title)
return title return title
def oxdb_year(data): def oxdb_year(data):
@ -117,16 +121,21 @@ def oxdb_part(path):
part = p[0] part = p[0]
return part return part
def parsePath(path): def parse_path(path):
import ox.web.imdb import ox.web.imdb
search_title = oxdb_title(path, True) search_title = oxdb_title(path, True)
r = {} r = {}
r['title'] = oxdb_title(path) r['title'] = oxdb_title(path)
r['directors'] = oxdb_directors(path) r['directors'] = oxdb_directors(path)
year = ox.findRe(path, '\((\d{4})\)')
if year:
r['year'] = year
#FIXME: only include it its actually a series
r['episode_title'] = oxdb_episode_title(path) r['episode_title'] = oxdb_episode_title(path)
r['season'], r['episode'] = oxdb_season_episode(path) r['season'], r['episode'] = oxdb_season_episode(path)
r['series_title'] = oxdb_series_title(path) r['series_title'] = oxdb_series_title(path)
r['part'] = oxdb_part(path)
r['imdbId'] = ox.web.imdb.guess(search_title, ', '.join(r['directors']), timeout=-1) r['imdbId'] = ox.web.imdb.guess(search_title, ', '.join(r['directors']), timeout=-1)
r['oxdbId'] = oxid(r['title'], r['directors'], r['oxdbId'] = oxid(r['title'], r['directors'],
seriesTitle=r['series_title'], seriesTitle=r['series_title'],

View file

@ -33,7 +33,8 @@ import tasks
from oxuser.models import getUserJSON from oxuser.models import getUserJSON
from oxuser.views import api_login, api_logout, api_register, api_contact, api_recover, api_preferences, api_findUser from oxuser.views import api_login, api_logout, api_register, api_contact, api_recover, api_preferences, api_findUser
from archive.views import api_update, api_addArchive, api_editArchive, api_removeArchive
from archive.views import api_update, api_upload
from archive.models import File from archive.models import File
@ -406,88 +407,6 @@ def api_encodingSettings(request):
response = json_response({'options': settings.VIDEO_ENCODING[settings.VIDEO_PROFILE]}) response = json_response({'options': settings.VIDEO_ENCODING[settings.VIDEO_PROFILE]})
return render_to_json_response(response) return render_to_json_response(response)
class UploadForm(forms.Form):
data = forms.TextInput()
file = forms.FileField()
class VideoChunkForm(forms.Form):
chunk = forms.FileField()
done = forms.IntegerField(required=False)
@login_required_json
def api_upload(request): #video, timeline, frame
'''
upload video, timeline or frame
param data
param file
return {'status': {'code': int, 'text': string},
'data': {}}
'''
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
data = json.loads(request.POST['data'])
oshash = data['oshash']
f = get_object_or_404(models.File, oshash=oshash)
if data['item'] == 'frame':
ff = form.cleaned_data['file']
position = data['position']
frame, created = models.Frame.objects.get_or_create(file=f, position=position)
if not created and frame.frame:
frame.frame.delete()
frame.frame.save(ff.name, ff)
frame.save()
response = json_response({'url': frame.frame.url})
return render_to_json_response(response)
if data['item'] == 'timeline':
pass
#print "not implemented"
response = json_response(status=501, text='not implemented')
return render_to_json_response(response)
@login_required_json
def firefogg_upload(request):
#handle video upload
if request.method == 'POST':
#init upload
if 'oshash' in request.POST:
#FIXME: what to do if requested oshash is not in db?
#FIXME: should existing data be reset here? or better, should this fail if an upload was there
f = get_object_or_404(models.File, oshash=request.POST['oshash'])
stream = getattr(f, 'stream_%s'%settings.VIDEO_UPLOAD)
if stream:
stream.delete()
f.available = False
f.save()
response = {
'uploadUrl': request.build_absolute_uri('/api/upload/?oshash=%s' % f.oshash),
'result': 1
}
return render_to_json_response(response)
#post next chunk
if 'chunk' in request.FILES and 'oshash' in request.GET:
print "all chunk now"
f = get_object_or_404(models.File, oshash=request.GET['oshash'])
form = VideoChunkForm(request.POST, request.FILES)
#FIXME:
if form.is_valid() and f.editable(request.user):
c = form.cleaned_data['chunk']
response = {
'result': 1,
'resultUrl': request.build_absolute_uri('/')
}
if not f.save_chunk(c, c.name):
response['result'] = -1
elif form.cleaned_data['done']:
#FIXME: send message to encode deamon to create derivates instead
f.available = True
f.save()
response['result'] = 1
response['done'] = 1
return render_to_json_response(response)
print request.GET, request.POST
response = json_response(status=400, text='this request requires POST')
return render_to_json_response(response)
@login_required_json @login_required_json
def api_editFile(request): #FIXME: should this be file.files. or part of update def api_editFile(request): #FIXME: should this be file.files. or part of update
@ -505,7 +424,47 @@ def api_parse(request): #parse path and return info
data: {imdb: string}} data: {imdb: string}}
''' '''
path = json.loads(request.POST['data'])['path'] path = json.loads(request.POST['data'])['path']
response = json_response(utils.parsePath(path)) response = json_response(utils.parse_path(path))
return render_to_json_response(response)
def api_setPosterFrame(request): #parse path and return info
'''
param data
{id: movieId, position: float}
return {'status': {'code': int, 'text': string},
data: {}}
'''
data = json.loads(request.POST['data'])
item = get_object_or_404_json(models.Movie, movieId=data['id'])
if item.editable(request.user):
#FIXME: some things need to be updated after changing this
item.poster_frame = data['position']
item.save()
response = json_response(status=200, text='ok')
else:
response = json_response(status=403, text='permissino denied')
return render_to_json_response(response)
def api_setPoster(request): #parse path and return info
'''
param data
{id: movieId, url: string}
return {'status': {'code': int, 'text': string},
data: {poster: url}}
'''
data = json.loads(request.POST['data'])
item = get_object_or_404_json(models.Movie, movieId=data['id'])
if item.editable(request.user):
#FIXME: check that poster is from allowed url
item.poster_url = data['url']
if item.poster:
item.poster.delete()
item.save()
response = json_response(status=200, text='ok')
response['data']['poster'] = item.get_poster()
else:
response = json_response(status=403, text='permissino denied')
return render_to_json_response(response) return render_to_json_response(response)
def api_getImdbId(request): def api_getImdbId(request):
@ -522,56 +481,6 @@ def api_getImdbId(request):
response = json_response(status=404, text='not found') response = json_response(status=404, text='not found')
return render_to_json_response(response) return render_to_json_response(response)
def api_fileInfo(request):
'''
param data
oshash string
return {'status': {'code': int, 'text': string},
'data': {imdbId:string }}
'''
if 'data' in request.POST:
oshash = json.loads(request.POST['data'])
elif 'oshash' in request.GET:
oshash = request.GET['oshash']
f = models.MovieFile.objects.get(oshash=oshash)
response = {'data': f.json()}
return render_to_json_response(response)
def api_subtitles(request):
'''
param data
oshash string
language string
subtitle string
return
if no language is provided:
{data: {languages: array}}
if language is set:
{data: {subtitle: string}}
if subtitle is set:
saves subtitle for given language
'''
if 'data' in request.POST:
data = json.loads(request.POST['data'])
oshash = data['oshash']
language = data.get('language', None)
srt = data.get('subtitle', None)
if srt:
user = request.user
sub = models.Subtitles.objects.get_or_create(user, oshash, language)
sub.srt = srt
sub.save()
else:
response = json_response({})
if language:
q = models.Subtitles.objects.filter(movie_file__oshash=oshash, language=language)
if q.count() > 0:
response['data']['subtitle'] = q[0].srt
return render_to_json_response(response)
l = models.Subtitles.objects.filter(movie_file__oshash=oshash).values('language')
response['data']['languages'] = [f['language'] for f in l]
return render_to_json_response(response)
def video(request, id, quality): def video(request, id, quality):
movie = get_object_or_404(models.Movie, movieId=id) movie = get_object_or_404(models.Movie, movieId=id)
if quality not in settings.VIDEO_ENCODING: if quality not in settings.VIDEO_ENCODING:
@ -590,39 +499,6 @@ def frame(request, id, position, size):
raise Http404 raise Http404
return HttpFileResponse(frame, content_type='image/jpeg') return HttpFileResponse(frame, content_type='image/jpeg')
'''
GET list
> {
"files": {
"a41cde31c581e11d": {"path": "E/Example, The/An Example.avi", "size":1646274},
}
}
'''
@login_required_json
def list_files(request):
response = {}
response['files'] = {}
qs = models.UserFile.filter(user=request.user)
p = Paginator(qs, 1000)
for i in p.page_range:
page = p.page(i)
for f in page.object_list:
response['files'][f.movie_file.oshash] = {'path': f.path, 'size': f.movie_file.size}
return render_to_json_response(response)
def find_files(request):
response = {}
query = _parse_query(request)
response['files'] = {}
qs = models.UserFile.filter(user=request.user).filter(movie_file__movie__id__in=query['q'])
p = Paginator(qs, 1000)
for i in p.page_range:
page = p.page(i)
for f in page.object_list:
response['files'][f.movie_file.oshash] = {'path': f.path, 'size': f.movie_file.size}
return render_to_json_response(response)
def apidoc(request): def apidoc(request):
''' '''
this is used for online documentation at http://127.0.0.1:8000/api/ this is used for online documentation at http://127.0.0.1:8000/api/

View file

@ -12,6 +12,8 @@ class UserProfile(models.Model):
recover_key = models.TextField() recover_key = models.TextField()
user = models.ForeignKey(User, unique=True) user = models.ForeignKey(User, unique=True)
files_updated = models.DateTimeField(default=None)
def user_post_save(sender, instance, **kwargs): def user_post_save(sender, instance, **kwargs):
profile, new = UserProfile.objects.get_or_create(user=instance) profile, new = UserProfile.objects.get_or_create(user=instance)

View file

@ -11,6 +11,7 @@ urlpatterns = patterns('',
# Example: # Example:
(r'^ajax_filtered_fields/', include('ajax_filtered_fields.urls')), (r'^ajax_filtered_fields/', include('ajax_filtered_fields.urls')),
(r'^api/', include('backend.urls')), (r'^api/', include('backend.urls')),
(r'^api/upload/$', 'archive.views.firefogg_upload'),
(r'^site.js$', 'app.views.site_js'), (r'^site.js$', 'app.views.site_js'),
(r'^$', 'app.views.index'), (r'^$', 'app.views.index'),
(r'^r/(?P<key>.*)$', 'oxuser.views.recover'), (r'^r/(?P<key>.*)$', 'oxuser.views.recover'),