pandora/pandora/item/models.py

1908 lines
75 KiB
Python
Raw Normal View History

2009-06-08 16:08:59 +00:00
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
2010-08-07 14:31:20 +00:00
import json
import os
2014-07-18 14:28:09 +00:00
import re
2011-04-18 18:50:31 +00:00
import shutil
import subprocess
import tempfile
2011-01-28 08:48:38 +00:00
import unicodedata
import uuid
from datetime import datetime
from glob import glob
from six import PY2, string_types
from six.moves.urllib.parse import quote
2019-08-21 14:23:34 +00:00
from django.conf import settings
2018-07-29 20:12:56 +00:00
from django.contrib.auth import get_user_model
2019-08-21 14:23:34 +00:00
from django.core.files.temp import NamedTemporaryFile
from django.db import models, transaction, connection
from django.db.models import Q, Sum, Max
2011-04-18 18:50:31 +00:00
from django.db.models.signals import pre_delete
2017-03-03 07:56:35 +00:00
from django.utils.encoding import python_2_unicode_compatible
2019-08-21 14:23:34 +00:00
from django.utils import datetime_safe
2010-07-07 22:46:41 +00:00
import ox
2018-06-19 18:48:18 +00:00
from oxdjango.fields import JSONField, to_json
2016-10-04 22:00:03 +00:00
from oxdjango.sortmodel import get_sort_field
import ox.web.imdb
2011-08-16 15:06:40 +00:00
import ox.image
2009-06-08 16:08:59 +00:00
from . import managers
from . import utils
from . import tasks
2012-05-17 09:38:59 +00:00
from .timelines import join_tiles
from .data_api import external_data
2009-06-08 16:08:59 +00:00
2011-11-02 14:06:34 +00:00
from annotation.models import Annotation
2014-12-16 19:42:41 +00:00
from archive import extract
from clip.models import Clip, get_layers
2011-01-01 11:44:42 +00:00
from person.models import get_name_sort
2012-06-18 14:36:04 +00:00
from sequence.tasks import get_sequences
2014-12-16 19:42:41 +00:00
from title.models import get_title_sort
from user.utils import update_groups
2018-07-29 20:28:46 +00:00
from user.models import Group
2014-12-16 19:42:41 +00:00
import archive.models
2018-07-29 20:12:56 +00:00
User = get_user_model()
if not PY2:
unicode = str
def get_id(info):
q = Item.objects.all()
for key in ('title', 'director', 'year'):
# 'episodeTitle', 'episodeDirector', 'episodeYear', 'season', 'episode'):
if key in info and info[key]:
k = 'find__key'
v = 'find__value'
if key in Item.facet_keys + ['title']:
k = 'facets__key'
v = 'facets__value'
if isinstance(info[key], list):
for value in info[key]:
q = q.filter(**{k: key, v: value})
else:
2016-05-28 09:14:34 +00:00
q = q.filter(**{k: key, v: info[key]})
if q.count() == 1:
2014-09-19 12:26:46 +00:00
return q[0].public_id
if settings.DATA_SERVICE:
r = external_data('getId', info)
if r['status']['code'] == 200:
2011-10-18 20:06:01 +00:00
imdbId = r['data']['id']
return imdbId
return None
def get_item(info, user=None):
2010-01-16 20:42:11 +00:00
'''
info dict with:
imdbId, title, director, year,
season, episode, episodeTitle, episodeDirector, episodeYear
2010-01-16 20:42:11 +00:00
'''
item_data = {
2011-10-17 19:43:42 +00:00
'title': info.get('title', ''),
'director': info.get('director', []),
}
2017-02-16 13:24:51 +00:00
if list(filter(lambda k: k['id'] == 'year', settings.CONFIG['itemKeys'])):
2013-04-28 17:00:59 +00:00
item_data['year'] = info.get('year', '') or ''
2016-05-28 09:14:34 +00:00
# add additional item metadata parsed from path
2016-08-08 13:54:52 +00:00
ignore_keys = set(list(Item.base_keys) + ['language'] + list(item_data))
possible_keys = set([k['id'] for k in settings.CONFIG['itemKeys'] if k['id'] not in ignore_keys])
for key in info:
if key in possible_keys:
item_data[key] = info[key]
2012-12-16 16:21:05 +00:00
for key in ('episodeTitle', 'episodeDirector', 'episodeYear',
'season', 'episode', 'seriesTitle'):
if key in info and info[key]:
item_data[key] = info[key]
item_data = utils.normalize_dict('NFC', item_data)
2011-01-16 13:28:57 +00:00
if settings.USE_IMDB:
if 'imdbId' in info and info['imdbId']:
try:
2014-09-19 12:26:46 +00:00
item = Item.objects.get(public_id=info['imdbId'])
2010-09-23 16:01:48 +00:00
except Item.DoesNotExist:
2014-09-19 12:26:46 +00:00
item = Item(public_id=info['imdbId'])
2011-01-16 13:28:57 +00:00
if 'title' in info and 'director' in info:
item.external_data = item_data
2011-02-22 16:09:13 +00:00
item.user = user
2014-09-19 12:26:46 +00:00
item.oxdbId = item.public_id
2016-06-20 16:28:05 +00:00
if not item.update_external():
item.save(sync=True)
2011-01-16 13:28:57 +00:00
else:
2014-09-19 12:26:46 +00:00
public_id = get_id(info)
if public_id:
try:
2014-09-19 12:26:46 +00:00
item = Item.objects.get(public_id=public_id)
except Item.DoesNotExist:
2014-09-19 12:26:46 +00:00
info['imdbId'] = public_id
2012-05-30 10:10:35 +00:00
item = get_item(info, user)
return item
try:
2014-09-19 12:26:46 +00:00
item = Item.objects.get(public_id=info.get('oxdbId'))
except Item.DoesNotExist:
2011-07-05 14:28:22 +00:00
item = Item()
item.user = user
item.data = item_data
2014-09-19 12:26:46 +00:00
item.public_id = info.get('oxdbId', item.oxdb_id())
try:
existing_item = Item.objects.get(oxdbId=item.oxdb_id())
item = existing_item
except Item.DoesNotExist:
item.oxdbId = item.oxdb_id()
item.save(sync=True)
tasks.update_poster.delay(item.public_id)
2011-01-16 13:28:57 +00:00
else:
title = unicodedata.normalize('NFKD', info['title']).lower()
qs = Item.objects.filter(find__key='title', find__value=title)
2013-02-19 12:26:38 +00:00
if 'year' in info:
qs = qs.filter(find__key='year', find__value=str(info['year']))
2011-01-28 08:48:38 +00:00
if qs.count() == 1:
item = qs[0]
else:
item = Item()
item.data = item_data
2011-02-22 16:09:13 +00:00
item.user = user
item.save(sync=True)
tasks.update_poster.delay(item.public_id)
2010-09-23 16:01:48 +00:00
return item
2010-01-16 20:42:11 +00:00
2016-06-14 19:06:27 +00:00
def get_path(f, x):
return f.path(x)
def get_icon_path(f, x):
return get_path(f, 'icon.jpg')
def get_poster_path(f, x):
return get_path(f, 'poster.jpg')
def get_torrent_path(f, x):
return get_path(f, 'torrent.torrent')
2017-03-03 07:56:35 +00:00
@python_2_unicode_compatible
2010-09-23 16:01:48 +00:00
class Item(models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
2009-06-08 16:08:59 +00:00
2011-02-22 16:09:13 +00:00
user = models.ForeignKey(User, null=True, related_name='items')
2011-02-23 11:51:32 +00:00
groups = models.ManyToManyField(Group, blank=True, related_name='items')
2011-01-21 09:31:49 +00:00
2016-06-14 19:06:27 +00:00
# while metadata is updated, files are set to rendered=False
2011-06-27 13:39:35 +00:00
rendered = models.BooleanField(default=False, db_index=True)
2016-06-14 19:06:27 +00:00
# should be set based on user
2011-12-27 06:54:49 +00:00
level = models.IntegerField(db_index=True)
2011-01-21 09:31:49 +00:00
2014-09-19 12:26:46 +00:00
public_id = models.CharField(max_length=128, unique=True, blank=True)
2011-04-05 10:49:58 +00:00
oxdbId = models.CharField(max_length=42, unique=True, blank=True, null=True)
2018-06-19 18:48:18 +00:00
external_data = JSONField(default=dict, editable=False)
data = JSONField(default=dict, editable=False)
2018-06-19 20:28:44 +00:00
cache = JSONField(default=dict, editable=False)
poster = models.ImageField(default=None, blank=True, upload_to=get_poster_path)
2011-07-30 12:52:49 +00:00
poster_source = models.TextField(blank=True)
2010-11-27 02:33:31 +00:00
poster_height = models.IntegerField(default=0)
poster_width = models.IntegerField(default=0)
poster_frame = models.FloatField(default=-1)
2011-01-04 07:32:32 +00:00
icon = models.ImageField(default=None, blank=True, upload_to=get_icon_path)
2011-01-04 07:32:32 +00:00
torrent = models.FileField(default=None, blank=True, max_length=1000, upload_to=get_torrent_path)
2018-06-19 18:48:18 +00:00
stream_info = JSONField(default=dict, editable=False)
2011-07-03 16:21:27 +00:00
2016-06-14 19:06:27 +00:00
# stream related fields
2010-11-27 02:33:31 +00:00
stream_aspect = models.FloatField(default=4/3)
2010-09-23 16:01:48 +00:00
objects = managers.ItemManager()
2009-08-01 14:14:54 +00:00
def get(self, key, default=None):
2011-10-28 22:43:44 +00:00
if key == 'rightslevel':
return self.level
if key == 'user':
return self.user and self.user.username or None
if key == 'groups':
return [g.name for g in self.groups.all()]
if self.data and key in self.data:
return self.data[key]
if self.external_data and key in self.external_data:
return self.external_data[key]
item_key = utils.get_by_id(settings.CONFIG['itemKeys'], key)
if item_key and 'value' in item_key \
2016-05-28 09:14:34 +00:00
and isinstance(item_key['value'], dict) \
and item_key['value'].get('type') == 'map' \
and self.get(item_key['value']['key']):
value = re.compile(item_key['value']['map']).findall(self.get(item_key['value']['key']))
return value[0] if value else default
return default
2011-01-24 13:44:38 +00:00
def access(self, user):
2011-09-16 17:17:49 +00:00
if user.is_anonymous():
level = 'guest'
else:
2016-02-19 16:34:15 +00:00
level = user.profile.get_level()
editable = self.editable(user)
if editable:
return True
if not self.rendered and settings.CONFIG.get('itemRequiresVideo'):
return False
2011-09-16 17:17:49 +00:00
allowed_level = settings.CONFIG['capabilities']['canSeeItem'][level]
2011-09-28 12:47:13 +00:00
if self.level <= allowed_level:
2011-01-24 13:44:38 +00:00
return True
return False
2011-01-24 13:44:38 +00:00
def editable(self, user):
2011-06-06 18:38:16 +00:00
if user.is_anonymous():
return False
2016-05-28 09:14:34 +00:00
if user.profile.capability('canEditMetadata') or \
user.is_staff or \
2011-01-21 09:31:49 +00:00
self.user == user or \
self.groups.filter(id__in=user.groups.all()).count() > 0:
return True
return False
def edit(self, data):
2014-12-18 17:39:48 +00:00
data = data.copy()
2016-06-14 19:06:27 +00:00
# FIXME: how to map the keys to the right place to write them to?
2011-01-21 09:31:49 +00:00
if 'id' in data:
2016-06-14 19:06:27 +00:00
# FIXME: check if id is valid and exists and move/merge items accordingly
2011-01-21 09:31:49 +00:00
del data['id']
2011-02-25 12:12:56 +00:00
if 'groups' in data:
2011-01-21 09:31:49 +00:00
groups = data.pop('groups')
update_groups(self, groups)
2012-01-15 15:05:37 +00:00
keys = [k['id'] for k in
2017-02-16 13:24:51 +00:00
list(filter(lambda i: i.get('description'), settings.CONFIG['itemKeys']))]
2012-01-15 15:05:37 +00:00
for k in keys:
key = '%sdescription' % k
if key in data:
2012-02-14 10:38:36 +00:00
description = data.pop(key)
if isinstance(description, dict):
for value in description:
d, created = Description.objects.get_or_create(key=k, value=value)
d.description = ox.sanitize_html(description[value])
d.save()
else:
value = data.get(k, self.get(k, ''))
if not description:
description = ''
d, created = Description.objects.get_or_create(key=k, value=value)
d.description = ox.sanitize_html(description)
d.save()
2011-01-01 11:44:42 +00:00
for key in data:
2016-05-28 09:14:34 +00:00
if data[key] is None:
if key in self.data:
del self.data[key]
else:
k = list(filter(lambda i: i['id'] == key, settings.CONFIG['itemKeys']))
ktype = k and k[0].get('type') or ''
if ktype == 'text':
2012-05-27 11:52:12 +00:00
self.data[key] = ox.sanitize_html(data[key])
elif ktype == '[text]':
2012-05-27 11:52:12 +00:00
self.data[key] = [ox.sanitize_html(t) for t in data[key]]
elif ktype == '[string]':
self.data[key] = [ox.escape_html(t) for t in data[key]]
elif key in ('episodeTitle', 'seriesTitle', 'episodeDirector', 'seriesYear'):
self.data[key] = ox.escape_html(data[key])
elif isinstance(data[key], string_types):
2012-02-21 16:26:06 +00:00
self.data[key] = ox.escape_html(data[key])
elif isinstance(data[key], list):
def cleanup(i):
if isinstance(i, string_types):
i = ox.escape_html(i)
return i
self.data[key] = [cleanup(i) for i in data[key]]
2014-10-17 15:04:47 +00:00
elif isinstance(data[key], int) or isinstance(data[key], float):
self.data[key] = data[key]
2012-02-21 16:26:06 +00:00
else:
self.data[key] = ox.escape_html(data[key])
2013-02-25 13:30:22 +00:00
p = self.save()
2017-02-16 13:24:51 +00:00
if not settings.USE_IMDB and list(filter(lambda k: k in self.poster_keys, data)):
2014-09-19 12:26:46 +00:00
p = tasks.update_poster.delay(self.public_id)
2013-02-25 13:30:22 +00:00
return p
2011-01-16 13:28:57 +00:00
def update_external(self):
2017-05-03 18:02:55 +00:00
poster_url = self.prefered_poster_url()
2014-09-19 12:26:46 +00:00
if settings.DATA_SERVICE and not self.public_id.startswith('0x'):
response = external_data('getData', {'id': self.public_id})
if response['status']['code'] == 200:
self.external_data = response['data']
2018-10-07 12:41:25 +00:00
keys = [
k['id'] for k in settings.CONFIG['itemKeys']
] + settings.ADDITIONAL_IMDB_KEYS
for key in set(self.external_data) - set(keys):
del self.external_data[key]
2016-06-20 16:28:05 +00:00
self.save(sync=True)
2017-05-03 18:02:55 +00:00
if poster_url != self.prefered_poster_url():
self.remove_poster()
self.make_poster()
2016-06-20 16:28:05 +00:00
return True
return False
2012-12-16 16:21:05 +00:00
def add_default_data(self):
for k in settings.CONFIG['itemKeys']:
if 'default' in k and not k['id'] in self.data:
self.data[k['id']] = k['default']
2011-09-30 17:37:41 +00:00
def expand_connections(self):
c = self.get('connections')
if c:
for t in list(c):
2011-09-30 18:13:56 +00:00
if c[t]:
if isinstance(c[t][0], string_types):
2016-05-28 09:14:34 +00:00
c[t] = [{'id': i, 'title': None} for i in c[t]]
2011-09-30 18:13:56 +00:00
ids = [i['id'] for i in c[t]]
2016-05-28 09:14:34 +00:00
known = {}
2014-09-19 12:26:46 +00:00
for l in Item.objects.filter(public_id__in=ids):
known[l.public_id] = l.get('title')
2011-09-30 18:13:56 +00:00
for i in c[t]:
if i['id'] in known:
i['item'] = i['id']
i['title'] = known[i['id']]
2017-02-16 13:24:51 +00:00
c[t] = list(filter(lambda x: x['title'], c[t]))
2011-09-30 18:13:56 +00:00
if not c[t]:
del c[t]
return c
2011-09-30 17:37:41 +00:00
2017-03-03 07:56:35 +00:00
def __str__(self):
year = self.get('year')
if year:
2013-10-07 09:18:53 +00:00
string = u'%s (%s)' % (ox.decode_html(self.get('title', 'Untitled')), self.get('year'))
2012-03-10 17:29:46 +00:00
else:
string = self.get('title', u'Untitled')
2016-05-28 09:14:34 +00:00
return u'[%s] %s' % (self.public_id, string)
2010-09-14 14:10:37 +00:00
def get_absolute_url(self):
2014-09-19 12:26:46 +00:00
return '/%s' % self.public_id
2010-09-14 14:10:37 +00:00
2009-06-08 16:08:59 +00:00
def save(self, *args, **kwargs):
2016-06-15 16:12:59 +00:00
sync = kwargs.get('sync', False)
if 'sync' in kwargs:
del kwargs['sync']
2011-10-24 20:48:14 +00:00
update_poster = False
2011-10-25 10:57:31 +00:00
update_ids = False
2011-01-28 08:48:38 +00:00
if not self.id:
2011-10-26 14:04:50 +00:00
if self.user:
2016-02-19 16:34:15 +00:00
self.level = settings.CONFIG['rightsLevel'][self.user.profile.get_level()]
2011-12-27 06:54:49 +00:00
else:
self.level = settings.CONFIG['rightsLevel']['member']
2014-09-19 12:26:46 +00:00
if not self.public_id:
self.public_id = str(uuid.uuid1())
2012-12-16 16:21:05 +00:00
self.add_default_data()
2011-01-28 08:48:38 +00:00
super(Item, self).save(*args, **kwargs)
update_poster = True
2011-01-28 08:48:38 +00:00
if not settings.USE_IMDB:
2014-09-19 12:26:46 +00:00
self.public_id = ox.toAZ(self.id)
2016-08-28 11:25:26 +00:00
# avoid looking like an ad
if self.id == ox.fromAZ('AD') - 1:
cursor = connection.cursor()
sql = "SELECT nextval('%s_id_seq')" % self._meta.db_table
cursor.execute(sql)
2016-06-14 19:06:27 +00:00
# this does not work if another item without imdbid has the same metadata
2011-09-28 12:47:13 +00:00
oxdbId = self.oxdb_id()
if not settings.USE_IMDB:
self.oxdbId = None
elif oxdbId:
if self.oxdbId != oxdbId:
q = Item.objects.filter(oxdbId=oxdbId).exclude(id=self.id)
if q.count() != 0:
2019-07-23 15:42:56 +00:00
if utils.is_imdb_id(self.public_id):
2011-10-24 21:41:55 +00:00
self.oxdbId = None
2019-07-23 15:52:23 +00:00
self.update_sort()
2011-10-24 21:41:55 +00:00
q[0].merge_with(self, save=False)
else:
n = 1
key = 'episodeTitle' in self.data and 'episodeTitle' or 'title'
title = self.get(key, 'Untitled')
while q.count() != 0:
n += 1
self.data[key] = u'%s [%d]' % (title, n)
oxdbId = self.oxdb_id()
q = Item.objects.filter(oxdbId=oxdbId).exclude(id=self.id)
self.oxdbId = oxdbId
2011-10-24 20:48:14 +00:00
update_poster = True
2019-07-23 15:42:56 +00:00
if not utils.is_imdb_id(self.public_id):
2012-10-10 11:36:01 +00:00
update_ids = True
2016-06-14 19:06:27 +00:00
# id changed, what about existing item with new id?
2019-07-23 15:42:56 +00:00
if settings.USE_IMDB and not utils.is_imdb_id(self.public_id) and self.oxdbId != self.public_id:
2014-09-19 12:26:46 +00:00
self.public_id = self.oxdbId
2016-06-14 19:06:27 +00:00
# FIXME: move files to new id here
2019-07-23 15:42:56 +00:00
if settings.USE_IMDB and utils.is_imdb_id(self.public_id):
2011-11-22 16:49:38 +00:00
for key in ('title', 'year', 'director', 'season', 'episode',
'seriesTitle', 'episodeTitle'):
if key in self.data:
del self.data[key]
2010-07-05 12:07:59 +00:00
# update defaults
if settings.USE_IMDB:
2017-02-16 13:24:51 +00:00
defaults = list(filter(lambda k: 'default' in k, settings.CONFIG['itemKeys']))
for k in defaults:
2019-07-23 15:42:56 +00:00
if utils.is_imdb_id(self.public_id):
if k['id'] in self.data and self.data[k['id']] == k['default']:
del self.data[k['id']]
else:
if k['id'] not in self.data:
self.data[k['id']] = k['default']
2011-10-31 10:46:59 +00:00
if self.poster and os.path.exists(self.poster.path):
2010-09-06 20:31:12 +00:00
self.poster_height = self.poster.height
self.poster_width = self.poster.width
else:
self.poster_height = 128
self.poster_width = 80
self.update_sort()
2014-07-18 14:28:09 +00:00
self.update_languages()
2018-06-19 22:03:01 +00:00
self.cache = self.json()
2018-06-19 20:28:44 +00:00
self.cache['modified'] = datetime.now()
super(Item, self).save(*args, **kwargs)
2012-09-27 04:28:34 +00:00
self.update_find()
self.update_sort()
self.update_facets()
2011-10-25 10:57:31 +00:00
if update_ids:
2016-06-14 19:06:27 +00:00
for c in self.clips.all():
c.save()
2011-10-25 10:57:31 +00:00
for a in self.annotations.all():
public_id = a.public_id.split('/')[1]
2014-09-19 12:26:46 +00:00
public_id = "%s/%s" % (self.public_id, public_id)
if public_id != a.public_id:
a.public_id = public_id
a.save()
2016-06-15 16:12:59 +00:00
if sync:
2016-06-15 16:29:09 +00:00
self.update_file_paths()
2016-06-15 16:12:59 +00:00
if update_poster:
tasks.update_poster(self.public_id)
else:
tasks.update_file_paths.delay(self.public_id)
if update_poster:
return tasks.update_poster.delay(self.public_id)
2011-10-24 20:48:14 +00:00
return None
2009-06-08 16:08:59 +00:00
2011-04-18 18:50:31 +00:00
def delete_files(self):
2011-05-25 19:11:08 +00:00
path = os.path.join(settings.MEDIA_ROOT, self.path())
if not isinstance(path, bytes):
2014-01-18 11:22:58 +00:00
path = path.encode('utf-8')
2011-05-25 19:11:08 +00:00
if os.path.exists(path):
shutil.rmtree(path)
2011-04-18 18:50:31 +00:00
def merge_with(self, other, save=True):
2010-09-12 14:23:23 +00:00
'''
move all related tables to other and delete self
'''
for l in self.lists.all():
2011-05-25 19:11:08 +00:00
l.remove(self)
if l.items.filter(id=other.id).count() == 0:
2011-05-25 19:11:08 +00:00
l.add(other)
for a in self.annotations.all().order_by('id'):
a.item = other
2013-03-19 14:04:03 +00:00
a.set_public_id()
Annotation.objects.filter(id=a.id).update(item=other, public_id=a.public_id)
2019-06-28 15:26:28 +00:00
try:
other_sort = other.sort
except:
other_sort = None
self.clips.all().update(item=other, sort=other_sort)
2011-01-03 20:27:40 +00:00
2010-09-12 14:23:23 +00:00
if hasattr(self, 'files'):
for f in self.files.all():
2010-09-23 16:01:48 +00:00
f.item = other
2010-09-12 14:23:23 +00:00
f.save()
self.delete()
if save:
other.save()
2016-06-14 19:06:27 +00:00
# FIXME: update poster, stills and streams after this
2019-06-28 15:26:28 +00:00
if other_sort is None:
other.clips.all().update(sort=other.sort)
2010-09-12 14:23:23 +00:00
def merge_streams(self, output, resolution=None, format="webm"):
streams = [s.get(resolution, format).media.path for s in self.streams()]
if len(streams) > 1:
if format == "webm":
cmd = ['mkvmerge', '-o', output]
cmd += [streams[0]] + ['+' + s for s in streams[1:]]
p = subprocess.Popen(cmd, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), close_fds=True)
p.wait()
return True
elif format == "mp4":
fd, tmp_output_txt = tempfile.mkstemp('.txt')
with open(tmp_output_txt, 'w') as f:
f.write('\n'.join(["file '{}'".format(path) for path in streams]))
cmd = [
settings.FFMPEG,
'-nostats', '-loglevel', 'error',
'-y',
'-f', 'concat', '-safe', '0', '-i', tmp_output_txt,
'-c', 'copy',
output
]
p = subprocess.Popen(
cmd, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), close_fds=True)
p.wait()
os.unlink(tmp_output_txt)
return True
else:
return None
return streams[0] if streams else None
2010-09-10 15:12:22 +00:00
def get_posters(self):
2011-06-06 18:38:16 +00:00
index = []
if settings.DATA_SERVICE:
url = self.prefered_poster_url()
external_posters = self.external_data.get('posters', {})
services = list(external_posters)
for service in settings.POSTER_PRECEDENCE:
if service in services:
index.append(service)
for service in services:
if service not in index:
index.append(service)
if settings.URL not in index:
index.append(settings.URL)
else:
external_posters = []
2011-07-30 13:51:14 +00:00
2011-06-06 18:58:51 +00:00
posters = []
2011-08-23 17:39:34 +00:00
poster = self.path('siteposter.jpg')
2011-06-06 18:58:51 +00:00
poster = os.path.abspath(os.path.join(settings.MEDIA_ROOT, poster))
if os.path.exists(poster):
posters.append({
2014-09-19 12:26:46 +00:00
'url': '/%s/siteposter.jpg' % self.public_id,
2011-07-26 17:22:23 +00:00
'width': 640,
'height': 1024,
2011-07-30 11:04:30 +00:00
'source': settings.URL,
2016-05-28 09:14:34 +00:00
'selected': url is None,
2011-06-06 18:38:16 +00:00
'index': index.index(settings.URL)
2011-06-06 18:58:51 +00:00
})
for service in external_posters:
p = external_posters[service][0]
2013-07-22 19:45:11 +00:00
selected = True if self.poster_source and self.poster_source == service or url == p['url'] else False
p['source'] = service
2013-07-22 19:45:11 +00:00
p['selected'] = selected
p['index'] = index.index(service)
posters.append(p)
2011-06-06 18:38:16 +00:00
posters.sort(key=lambda a: a['index'])
2010-09-10 15:12:22 +00:00
return posters
2011-06-06 18:58:51 +00:00
def get_frames(self):
frames = []
2011-08-04 17:53:43 +00:00
pframes = self.poster_frames()
if pframes:
2011-06-06 18:58:51 +00:00
pos = self.poster_frame
if pos < 0:
2011-08-04 17:55:41 +00:00
pos = int(len(pframes) / 2)
2011-06-06 18:58:51 +00:00
p = 0
2011-08-04 17:53:43 +00:00
for f in pframes:
2011-06-06 18:58:51 +00:00
frames.append({
'index': p,
'position': f['position'],
'selected': p == pos,
2016-05-28 09:14:34 +00:00
'url': '/%s/posterframe%d.jpg' % (self.public_id, p),
2011-06-06 18:58:51 +00:00
'height': f['height'],
'width': f['width']
2011-06-06 18:58:51 +00:00
})
p += 1
return frames
2010-09-10 15:12:22 +00:00
def get_stream(self):
2011-08-18 19:37:12 +00:00
for s in self.streams():
return s.json()
2010-09-10 15:12:22 +00:00
2011-02-11 10:21:25 +00:00
def get_layers(self, user=None):
return get_layers(item=self, user=user)
2013-05-27 11:21:08 +00:00
def get_documents(self, user=None):
qs = self.documents.all()
documents = [d.json(item=self) for d in qs]
return sorted(documents, key=lambda d: d['index'])
2010-09-18 14:44:35 +00:00
2018-06-19 22:03:01 +00:00
def json(self, keys=None):
i = {
2014-09-19 12:26:46 +00:00
'id': self.public_id,
2011-10-22 12:51:56 +00:00
'rendered': self.rendered,
2011-10-25 13:59:27 +00:00
'rightslevel': self.level
}
2011-12-27 06:54:49 +00:00
if self.user:
i['user'] = self.user.username
2016-08-23 13:24:37 +00:00
i['groups'] = [g.name for g in self.groups.all()]
i.update(self.external_data)
i.update(self.data)
2013-07-10 14:08:39 +00:00
if settings.USE_IMDB:
2014-09-19 12:26:46 +00:00
i['oxdbId'] = self.oxdbId or self.oxdb_id() or self.public_id
2011-09-06 12:06:59 +00:00
for k in settings.CONFIG['itemKeys']:
key = k['id']
2011-07-30 12:23:06 +00:00
if not keys or key in keys:
if key not in i:
value = self.get(key)
2016-06-14 19:06:27 +00:00
# also get values from sort table, i.e. numberof values
2012-10-06 11:59:33 +00:00
if not value:
try:
if self.sort and hasattr(self.sort, key):
value = getattr(self.sort, key)
except ItemSort.DoesNotExist:
pass
2011-07-30 12:23:06 +00:00
if value:
i[key] = value
if 'cast' in i and isinstance(i['cast'][0], string_types):
2011-08-06 07:17:35 +00:00
i['cast'] = [i['cast']]
2011-08-05 18:26:27 +00:00
if 'cast' in i and isinstance(i['cast'][0], list):
2016-06-14 19:06:27 +00:00
i['cast'] = [{'actor': x[0], 'character': x[1]} for x in i['cast']]
2011-08-05 15:50:18 +00:00
2011-09-30 17:37:41 +00:00
if 'connections' in i:
i['connections'] = self.expand_connections()
2011-08-19 12:20:30 +00:00
if not keys or 'posterRatio' in keys:
2018-09-25 09:31:13 +00:00
if self.poster_height:
i['posterRatio'] = self.poster_width / self.poster_height
2011-08-06 13:34:56 +00:00
2019-10-31 10:16:16 +00:00
if 'source' in keys:
i['source'] = self.files.filter(selected=True).exclude(data='').exists()
2011-08-19 12:20:30 +00:00
streams = self.streams()
i['durations'] = [s.duration for s in streams]
2011-08-18 19:37:12 +00:00
i['duration'] = sum(i['durations'])
2014-07-23 15:27:27 +00:00
i['audioTracks'] = self.audio_tracks()
if not i['audioTracks']:
del i['audioTracks']
2011-08-19 12:25:46 +00:00
i['parts'] = len(i['durations'])
if i['parts']:
2011-08-19 12:20:30 +00:00
i['videoRatio'] = streams[0].aspect_ratio
2011-10-28 23:17:16 +00:00
i['resolution'] = (streams[0].file.width, streams[0].file.height)
if i['resolution'] == (0, 0):
del i['resolution']
del i['videoRatio']
else:
i['duration'] = self.files.filter(
2016-05-28 09:14:34 +00:00
Q(selected=True) | Q(wanted=True)
).aggregate(Sum('duration'))['duration__sum']
videos = self.files.filter(selected=True, is_video=True)
if i['duration'] and videos.count():
2015-05-23 08:55:58 +00:00
i['resolution'] = (videos[0].width, videos[0].height)
if i['resolution'][1] != 0:
i['videoRatio'] = i['resolution'][0] / i['resolution'][1]
else:
for k in ('resolution', 'videoRatio'):
if k in i:
del i[k]
2016-06-14 19:06:27 +00:00
# only needed by admins
2013-07-22 19:51:43 +00:00
if keys and 'posters' in keys:
i['posters'] = self.get_posters()
2011-09-29 13:05:34 +00:00
frames = self.get_frames()
2011-06-06 18:58:51 +00:00
if keys and 'frames' in keys:
2011-09-29 13:05:34 +00:00
i['frames'] = frames
selected_frame = [f for f in frames if f['selected']]
2011-09-30 01:20:47 +00:00
if selected_frame:
i['posterFrame'] = selected_frame[0]['position']
2011-09-29 13:05:34 +00:00
elif self.poster_frame != -1.0:
i['posterFrame'] = self.poster_frame
2012-01-15 15:05:37 +00:00
dkeys = [k['id'] for k in
2017-02-16 13:24:51 +00:00
list(filter(lambda i: i.get('description'), settings.CONFIG['itemKeys']))]
2012-01-15 15:05:37 +00:00
if keys:
2017-02-16 13:24:51 +00:00
dkeys = list(filter(lambda k: k in keys, dkeys))
2012-01-15 15:05:37 +00:00
for key in dkeys:
k = list(filter(lambda i: i['id'] == key, settings.CONFIG['itemKeys']))
if isinstance((k and k[0].get('type') or ''), list):
2016-05-28 09:14:34 +00:00
i['%sdescription' % key] = {}
if key == 'name':
values = []
2017-02-16 13:24:51 +00:00
for ikey in list(filter(lambda i: i.get('sortType') == 'person', settings.CONFIG['itemKeys'])):
values += i.get(ikey['id'], [])
values = list(set(values))
else:
values = self.get(key)
if values:
for d in Description.objects.filter(key=key, value__in=values):
2016-05-28 09:14:34 +00:00
i['%sdescription' % key][d.value] = d.description
2012-01-15 15:05:37 +00:00
else:
qs = Description.objects.filter(key=key, value=self.get(key, ''))
2016-05-28 09:14:34 +00:00
i['%sdescription' % key] = '' if qs.count() == 0 else qs[0].description
2011-07-30 12:23:06 +00:00
if keys:
info = {}
for key in keys:
if key in i:
info[key] = i[key]
return info
return i
2009-06-08 16:08:59 +00:00
2013-03-09 09:35:04 +00:00
def get_item_description(self):
2014-01-16 13:11:45 +00:00
return ox.strip_tags(
self.get_item_description_html().replace(
'</div><div style="margin-top: 8px; text-align: justify">', '; '
)
)
def get_item_description_html(self):
description = ''
2018-06-19 22:03:01 +00:00
data = self.json()
2014-01-16 13:11:45 +00:00
info = []
for key in [
'director', 'writer', 'producer',
'cinematographer', 'editor', 'actor'
]:
value = data.get(key, [])
if value:
info.append('<b>%s:</b> %s' % (
'Cast' if key == 'actor' else key.capitalize(),
', '.join(value)
))
if info:
description += '<div style="margin-top: 8px; text-align: justify">%s</div>' % '; '.join(info)
info = []
for key in [
'duration', 'aspectratio',
'hue', 'saturation', 'lightness',
2014-01-18 08:48:34 +00:00
'volume', 'cutsperminute', 'wordsperminute'
2014-01-16 13:11:45 +00:00
]:
value = data.get(key, 0)
if value:
info.append('<b>%s:</b> %s' % (
'Aspect Ratio' if key == 'aspectratio'
else 'Cuts per Minute' if key == 'cutsperminute'
2014-01-18 08:48:34 +00:00
else 'Words per Minute' if key == 'wordsperminute'
2014-01-16 13:11:45 +00:00
else key.capitalize(),
ox.format_duration(value * 1000 if value else 0, milliseconds=False) if key == 'duration'
else '%.3f:1' % value if key == 'aspectratio'
else '%.3f' % value
))
if info:
description += '<div style="margin-top: 8px; text-align: justify">%s</div>' % '; '.join(info)
if not settings.USE_IMDB:
value = data.get('summary', '')
if value:
description += '<div style="margin-top: 8px; text-align: justify"><b style="display: none">Summary:</b> %s</div>' % value
2013-03-09 09:35:04 +00:00
return description
2010-11-28 16:03:23 +00:00
def oxdb_id(self):
2011-04-22 23:34:01 +00:00
if not self.get('title') and not self.get('director'):
2014-09-19 12:26:46 +00:00
return self.public_id
return ox.get_oxid(self.get('seriesTitle', self.get('title', '')),
self.get('director', []),
2011-10-24 19:33:03 +00:00
self.get('seriesYear', self.get('year', '')),
self.get('season', ''),
self.get('episode', ''),
self.get('episodeTitle', ''),
self.get('episodeDirector', []),
self.get('episodeYear', ''))
2010-09-10 15:12:22 +00:00
'''
Search related functions
'''
2011-01-01 11:44:42 +00:00
2014-07-18 14:28:09 +00:00
def update_languages(self):
languages = {}
for layer in settings.CONFIG['layers']:
l = layer['id']
ll = []
if self.annotations.filter(layer=l).count():
ll.append(settings.CONFIG['language'])
for a in self.annotations.filter(layer=l, value__contains='lang="'):
ll += re.compile('lang="(.*?)"').findall(a.value)
languages[l] = sorted(set(ll))
changed = languages != self.data.get('annotationLanguages')
self.data['annotationLanguages'] = languages
return changed
def update_find(self):
layer_keys = []
2011-01-01 11:44:42 +00:00
2010-11-06 16:14:00 +00:00
def save(key, value):
2011-09-04 21:56:22 +00:00
if value not in ('', None):
2011-10-24 20:48:14 +00:00
f, created = ItemFind.objects.get_or_create(item=self, key=key)
if isinstance(value, bool):
value = value and 'true' or 'false'
if isinstance(value, string_types):
2012-05-27 11:52:12 +00:00
value = ox.decode_html(ox.strip_tags(value.strip()))
value = unicodedata.normalize('NFKD', value).lower()
2011-01-16 13:28:57 +00:00
f.value = value
2010-11-06 16:14:00 +00:00
f.save()
else:
2011-10-24 20:48:14 +00:00
ItemFind.objects.filter(item=self, key=key).delete()
def get_titles():
titles = [self.get('title', 'Untitled')]
if self.get('originalTitle'):
titles.append(self.get('originalTitle'))
at = self.get('alternativeTitles')
if at:
titles += [a[0] for a in at]
return titles
2016-02-19 16:25:09 +00:00
with transaction.atomic():
2011-10-29 14:59:12 +00:00
for key in settings.CONFIG['itemKeys']:
i = key['id']
if i == 'title':
save(i, u'\n'.join(get_titles()))
2011-10-29 14:59:12 +00:00
elif i == 'rightslevel':
save(i, self.level)
elif i == 'filename':
2012-09-07 22:25:26 +00:00
save(i, '\n'.join(self.all_paths()))
2013-09-15 12:06:13 +00:00
elif i == 'annotations':
2012-01-18 19:03:29 +00:00
qs = Annotation.objects.filter(item=self)
2012-06-18 16:36:40 +00:00
qs = qs.filter(layer__in=Annotation.public_layers()).exclude(findvalue=None)
2012-01-18 19:03:29 +00:00
qs = qs.order_by('start')
save(i, u'\n'.join([l.findvalue for l in qs]))
2011-10-29 14:59:12 +00:00
elif key['type'] == 'layer':
2012-06-18 16:36:40 +00:00
qs = Annotation.objects.filter(item=self).exclude(findvalue=None)
2012-01-18 19:03:29 +00:00
qs = qs.filter(layer=i)
2012-01-12 19:32:54 +00:00
qs = qs.order_by('start')
2017-02-25 10:41:48 +00:00
save(i, u'\n'.join(list(filter(None, [l.findvalue for l in qs]))))
layer_keys.append(i)
2011-10-29 14:59:12 +00:00
elif i != '*' and i not in self.facet_keys:
value = self.get(i)
if isinstance(value, list):
value = u'\n'.join(value)
save(i, value)
for key in self.facet_keys:
if key in layer_keys:
continue
2014-01-27 12:50:51 +00:00
if key == 'title':
values = get_titles()
elif key == 'character':
2011-10-29 14:59:12 +00:00
values = self.get('cast', '')
if values:
2017-02-16 13:24:51 +00:00
values = list(filter(lambda x: x.strip(),
[f['character'] for f in values]))
2011-10-29 14:59:12 +00:00
values = list(set(values))
elif key == 'name':
values = []
2016-06-14 19:06:27 +00:00
for k in settings.CONFIG['itemKeys']:
if k.get('sortType') == 'person':
values += self.get(k['id'], [])
2011-10-20 08:43:33 +00:00
values = list(set(values))
2011-10-29 14:59:12 +00:00
else:
values = self.get(key, '')
if isinstance(values, list):
save(key, '\n'.join(values))
else:
save(key, values)
2011-01-05 13:06:09 +00:00
isSeries = self.get('series',
self.get('episodeTitle',
2016-06-14 19:06:27 +00:00
self.get('episode',
self.get('seriesTitle')))) is not None
save('series', isSeries)
2016-08-08 13:54:52 +00:00
base_keys = (
'aspectratio',
'bitrate',
'clips',
'created',
'cutsperminute',
'duration',
'hue',
'id',
'oxdbId',
'lightness',
'modified',
'numberofannotations',
'numberofcuts',
'numberofdocuments',
'numberoffiles',
'parts',
'pixels',
'random',
'timesaccessed',
'accessed',
'resolution',
'width',
'height',
'rendered',
'rightslevel',
'saturation',
'size',
'volume',
)
def update_sort(self):
try:
s = self.sort
2010-09-23 16:01:48 +00:00
except ItemSort.DoesNotExist:
s = ItemSort(item=self)
2009-08-16 12:23:29 +00:00
def sortNames(values):
2011-01-03 08:45:31 +00:00
sort_value = u''
if values:
2011-01-03 08:45:31 +00:00
sort_value = u'; '.join([get_name_sort(name) for name in values])
2009-08-16 12:23:29 +00:00
if not sort_value:
2011-01-03 08:45:31 +00:00
sort_value = u''
return sort_value.lower()
2009-08-16 12:23:29 +00:00
def set_value(s, name, value):
if isinstance(value, string_types):
2012-05-27 11:52:12 +00:00
value = ox.decode_html(value.lower())
2014-02-24 10:22:55 +00:00
if not value:
value = None
setattr(s, name, value)
def get_value(source, key):
if 'value' in key and 'layer' in key['value']:
value = [a.value for a in self.annotations.filter(layer=key['value']['layer']).exclude(value='')]
else:
value = self.get(source)
return value
def get_words(source, key):
value = get_value(source, key)
if isinstance(value, list):
value = '\n'.join(value)
value = len(value.split(' ')) if value else 0
return value
2016-06-14 19:06:27 +00:00
# sort keys based on database, these will always be available
2014-09-19 12:26:46 +00:00
s.public_id = self.public_id.replace('0x', 'xx')
2013-08-09 18:30:56 +00:00
s.oxdbId = self.oxdbId
2014-09-30 13:13:40 +00:00
if not settings.USE_IMDB and s.public_id.isupper() and s.public_id.isalpha():
2014-09-19 12:26:46 +00:00
s.public_id = ox.sort_string(str(ox.fromAZ(s.public_id)))
2019-08-02 11:15:51 +00:00
else:
2019-08-02 11:40:02 +00:00
s.public_id = ox.sort_string(s.public_id)
s.modified = self.modified or datetime.now()
s.created = self.created or datetime.now()
2011-10-25 13:59:27 +00:00
s.rightslevel = self.level
2011-10-28 23:27:44 +00:00
s.aspectratio = self.get('aspectratio')
2013-05-12 12:20:55 +00:00
if self.id:
s.clips = self.clips.count()
2011-09-06 12:06:59 +00:00
2013-08-25 12:54:30 +00:00
s.numberoffiles = self.files.all().count()
2016-05-28 09:14:34 +00:00
videos = self.files.filter(selected=True).filter(Q(is_video=True) | Q(is_audio=True))
2011-08-23 17:39:34 +00:00
if videos.count() > 0:
2016-06-14 19:06:27 +00:00
# s.duration = sum([v.duration for v in videos])
2013-09-06 10:02:24 +00:00
s.duration = sum([v.duration for v in self.streams()])
2011-08-23 17:39:34 +00:00
v = videos[0]
if v.is_audio or not v.info.get('video'):
2012-01-27 22:22:54 +00:00
s.resolution = None
s.width = None
s.height = None
s.aspectratio = None
2012-01-27 22:22:54 +00:00
else:
s.resolution = v.width * v.height
s.width = v.width
s.height = v.height
if not s.aspectratio and v.display_aspect_ratio:
s.aspectratio = float(utils.parse_decimal(v.display_aspect_ratio))
s.pixels = sum([v.pixels for v in videos])
2011-08-23 17:39:34 +00:00
s.parts = videos.count()
2016-06-14 19:06:27 +00:00
s.size = sum([v.size for v in videos]) # FIXME: only size of movies?
2011-09-30 13:46:26 +00:00
if s.duration:
s.bitrate = s.size * 8 / s.duration
else:
s.bitrate = 0
2012-11-09 23:28:34 +00:00
s.volume = self.data.get('volume', None)
2010-12-22 18:45:41 +00:00
else:
s.duration = None
s.resolution = None
s.bitrate = None
s.pixels = None
s.filename = None
s.files = None
s.size = None
s.volume = None
s.parts = 0
s.aspectratio = None
for key in ('hue', 'saturation', 'lightness'):
if key in self.data:
setattr(s, key, self.data.get(key, None))
2015-11-26 08:21:52 +00:00
s.numberofannotations = self.annotations.exclude(value='').count()
2011-10-18 20:06:01 +00:00
s.numberofcuts = len(self.data.get('cuts', []))
2014-01-02 13:54:35 +00:00
s.numberofdocuments = self.documents.count()
if s.duration:
2011-10-18 20:06:01 +00:00
s.cutsperminute = s.numberofcuts / (s.duration/60)
2010-12-25 14:00:48 +00:00
else:
s.cutsperminute = None
2011-11-10 19:52:26 +00:00
s.timesaccessed = self.accessed.aggregate(Sum('accessed'))['accessed__sum']
2011-11-10 20:00:57 +00:00
if not s.timesaccessed:
s.timesaccessed = 0
2011-11-11 17:45:46 +00:00
s.accessed = self.accessed.aggregate(Max('access'))['access__max']
2017-02-16 13:24:51 +00:00
for key in list(filter(lambda k: k.get('sort', False), settings.CONFIG['itemKeys'])):
name = key['id']
source = name
sort_type = key.get('sortType', key['type'])
if 'value' in key:
if 'key' in key['value']:
source = key['value']['key']
sort_type = key['value'].get('type', sort_type)
if isinstance(sort_type, list):
sort_type = sort_type[0]
2016-08-08 13:54:52 +00:00
if name not in self.base_keys:
if sort_type == 'title':
value = get_title_sort(self.get(source, u'Untitled'))
value = utils.sort_title(value)[:955]
set_value(s, name, value)
elif sort_type == 'person':
value = sortNames(self.get(source, []))
value = utils.sort_string(value)[:955]
set_value(s, name, value)
elif sort_type == 'string':
value = self.get(source, u'')
if isinstance(value, list):
value = u','.join(value)
value = utils.sort_string(value)[:955]
set_value(s, name, value)
elif sort_type == 'words':
value = get_words(source, key) if s.duration else None
set_value(s, name, value)
elif sort_type == 'wordsperminute':
value = get_words(source, key)
value = value / (s.duration / 60) if value and s.duration else None
set_value(s, name, value)
elif sort_type in ('length', 'integer', 'time', 'float'):
2016-06-14 19:06:27 +00:00
# can be length of strings or length of arrays, i.e. keywords
if 'layer' in key.get('value', []):
value = self.annotations.filter(layer=key['value']['layer']).count()
else:
value = self.get(source)
if isinstance(value, list):
value = len(value)
set_value(s, name, value)
elif sort_type == 'year':
value = self.get(source)
set_value(s, name, value)
elif sort_type == 'date':
2018-09-25 08:44:56 +00:00
value = value_ = self.get(source)
if isinstance(value, string_types):
2018-08-04 15:51:43 +00:00
value_ = None
for fmt in ('%Y-%m-%d', '%Y-%m', '%Y'):
try:
value_ = datetime_safe.datetime.strptime(value, fmt)
except ValueError:
pass
else:
continue
2019-06-28 08:36:42 +00:00
set_value(s, name, value_)
2009-08-16 12:23:29 +00:00
s.save()
2009-06-08 16:08:59 +00:00
2015-04-15 07:51:34 +00:00
def update_facet(self, key):
current_values = self.get(key, [])
if key == 'title':
if current_values:
current_values = [current_values]
else:
current_values = []
ot = self.get('originalTitle')
if ot:
current_values.append(ot)
at = self.get('alternativeTitles')
if at:
current_values += [a[0] for a in at]
elif key == 'character':
2017-02-16 13:24:51 +00:00
current_values = list(filter(lambda x: x.strip(),
[f['character'] for f in self.get('cast', [])]))
2015-04-15 07:51:34 +00:00
current_values = [item for sublist in [x.split(' / ') for x in current_values]
for item in sublist]
elif key == 'name':
current_values = []
2016-06-14 19:06:27 +00:00
for k in settings.CONFIG['itemKeys']:
if k.get('sortType') == 'person':
current_values += self.get(k['id'], [])
2015-04-15 07:51:34 +00:00
if not isinstance(current_values, list):
if not current_values:
current_values = []
else:
current_values = [unicode(current_values)]
2015-04-15 07:51:34 +00:00
filter_map = utils.get_by_id(settings.CONFIG['itemKeys'], key).get('filterMap')
if filter_map:
filter_map = re.compile(filter_map)
_current_values = []
for value in current_values:
value = filter_map.findall(value)
if value:
_current_values.append(value[0])
current_values = _current_values
current_values = list(set(current_values))
current_values = [ox.decode_html(ox.strip_tags(v)) for v in current_values]
2016-07-30 10:04:35 +00:00
current_values = [unicodedata.normalize('NFKD', v) for v in current_values]
2015-04-15 07:51:34 +00:00
self.update_facet_values(key, current_values)
def update_layer_facet(self, key):
2015-02-13 11:06:09 +00:00
from entity.models import Entity
current_values = [a['value']
2016-05-28 09:14:34 +00:00
for a in self.annotations.filter(layer=key).distinct().values('value')]
2014-12-16 19:42:41 +00:00
layer = utils.get_by_id(settings.CONFIG['layers'], key)
if layer.get('type') == 'entity':
2016-06-14 19:06:27 +00:00
current_values = [
a['name']
for a in Entity.objects.filter(id__in=[ox.fromAZ(i) for i in current_values]).values('name')
]
2014-11-19 16:54:45 +00:00
current_values = [ox.decode_html(ox.strip_tags(v.replace('<br>', ' '))) for v in current_values]
2018-11-14 14:03:53 +00:00
current_values = [unicodedata.normalize('NFKD', v) for v in current_values if v]
2015-04-15 07:51:34 +00:00
self.update_facet_values(key, current_values)
def update_facet_values(self, key, current_values):
current_sortvalues = set([value.lower() for value in current_values])
saved_values = [i.value.lower() for i in Facet.objects.filter(item=self, key=key)]
2017-02-16 13:24:51 +00:00
removed_values = list(filter(lambda i: i not in current_sortvalues, saved_values))
if removed_values:
q = Q()
for v in removed_values:
2016-05-28 09:14:34 +00:00
q |= Q(value__iexact=v)
Facet.objects.filter(item=self, key=key).filter(q).delete()
for value in current_values:
if value.lower() not in saved_values:
2015-04-15 07:51:34 +00:00
sortvalue = value
if key in self.person_keys + ['name']:
sortvalue = get_name_sort(value)
sortvalue = utils.sort_string(sortvalue).lower()[:900]
2015-09-25 13:44:02 +00:00
f, created = Facet.objects.get_or_create(item=self, key=key, value=value, sortvalue=sortvalue)
if created:
Facet.objects.filter(item=self, key=key, value__iexact=value).exclude(value=value).delete()
2015-09-25 13:44:02 +00:00
Facet.objects.filter(key=key, value__iexact=value).exclude(value=value).update(value=value)
saved_values.append(value.lower())
def update_facets(self):
2015-04-15 07:51:34 +00:00
for key in set(self.facet_keys + ['title']):
if key in self.layer_facet_keys:
self.update_layer_facet(key)
else:
self.update_facet(key)
2011-01-01 11:44:42 +00:00
2016-06-15 16:29:09 +00:00
def update_file_paths(self):
2016-06-15 17:13:00 +00:00
for f in self.files.all():
2016-06-15 16:29:09 +00:00
if f.normalize_path() != f.path:
f.save()
2010-12-04 01:26:49 +00:00
def path(self, name=''):
2014-09-19 12:26:46 +00:00
h = self.public_id
2011-12-26 18:36:58 +00:00
h = (7-len(h))*'0' + h
2010-12-07 19:05:59 +00:00
return os.path.join('items', h[:2], h[2:4], h[4:6], h[6:], name)
2010-12-04 01:26:49 +00:00
2010-09-10 15:12:22 +00:00
'''
Video related functions
'''
2012-02-17 07:32:04 +00:00
def frame(self, position, height=None):
offset = 0
2011-08-18 19:37:12 +00:00
streams = self.streams()
for stream in streams:
2017-03-11 19:05:52 +00:00
if stream.duration + offset < position:
2011-08-18 19:37:12 +00:00
offset += stream.duration
else:
2014-11-29 13:40:56 +00:00
if not stream.file.is_video or not stream.file.info.get('video'):
return None
position = position - offset
2012-02-17 07:32:04 +00:00
if not height:
height = stream.resolution
else:
height = min(height, stream.resolution)
path = os.path.join(settings.MEDIA_ROOT, stream.path(),
2016-05-28 09:14:34 +00:00
'frames', "%dp" % height, "%s.jpg" % position)
2013-04-22 11:45:22 +00:00
if not os.path.exists(path) and stream.media:
extract.frame(stream.media.path, path, position, height, info=stream.info)
if not os.path.exists(path):
return None
return path
2010-09-10 15:12:22 +00:00
2019-08-21 14:23:34 +00:00
def extract_clip(self, in_, out, resolution, format, track=None, force=False):
streams = self.streams(track)
stream = streams[0].get(resolution, format)
if streams.count() > 1 and stream.info['duration'] < out:
video = NamedTemporaryFile(suffix='.%s' % format)
r = self.merge_streams(video.name, resolution, format)
if not r:
return False
path = video.name
duration = sum(item.cache['durations'])
else:
path = stream.media.path
duration = stream.info['duration']
cache_name = '%s_%sp_%s.%s' % (self.public_id, resolution, '%s,%s' % (in_, out), format)
cache_path = os.path.join(settings.MEDIA_ROOT, self.path('cache/%s' % cache_name))
if os.path.exists(cache_path) and not force:
return cache_path
if duration >= out:
subtitles = utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True)
if subtitles:
srt = self.srt(subtitles['id'], encoder=ox.srt)
if len(srt) < 4:
srt = None
else:
srt = None
ox.makedirs(os.path.dirname(cache_path))
extract.chop(path, in_, out, subtitles=srt, dest=cache_path, encode=True)
return cache_path
return False
2010-09-10 15:12:22 +00:00
@property
def timeline_prefix(self):
2011-08-23 17:39:34 +00:00
videos = self.streams()
if len(videos) == 1:
2012-05-17 09:38:59 +00:00
return os.path.join(settings.MEDIA_ROOT, videos[0].path(''))
return os.path.join(settings.MEDIA_ROOT, self.path())
2010-09-10 15:12:22 +00:00
def all_paths(self):
return list(set([
item for sublist in
[f.all_paths() for f in self.files.all()]
for item in sublist
]))
2011-08-23 17:39:34 +00:00
def get_files(self, user):
2011-10-18 20:06:01 +00:00
files = self.files.all().select_related()
2016-02-19 16:34:15 +00:00
if user.profile.get_level() != 'admin':
2011-10-18 20:06:01 +00:00
files = files.filter(instances__volume__user=user)
return [f.json() for f in files]
2011-08-23 17:39:34 +00:00
def users_with_files(self):
return User.objects.filter(
volumes__files__file__item=self
2013-10-22 19:17:12 +00:00
).order_by('date_joined').distinct()
2016-06-14 19:06:27 +00:00
# FIXME: profile not showing up here
# ).order_by('-profile__level', 'date_joined').distinct()
def sets(self):
sets = []
for user in self.users_with_files():
files = self.files.filter(instances__volume__user=user, instances__ignore=False)
if files.count():
sets.append(files)
2013-07-02 23:18:14 +00:00
if not sets:
files = self.files.filter(instances=None, available=True)
if files.count():
sets.append(files)
return sets
2011-08-23 17:39:34 +00:00
def update_wanted(self):
wanted = []
for s in self.sets():
if s.filter(selected=False).count() != 0:
2014-03-16 13:47:18 +00:00
wanted += [f.id for f in s if not f.available]
else:
break
2014-03-16 13:47:18 +00:00
qs = self.files.all()
if wanted:
self.files.filter(id__in=wanted).update(wanted=True)
qs = qs.exclude(id__in=wanted)
qs.update(wanted=False)
2011-08-23 17:39:34 +00:00
def update_selected(self):
2013-05-30 11:49:31 +00:00
sets = self.sets()
for s in sets:
2016-05-28 09:14:34 +00:00
if s.filter(Q(is_video=True) | Q(is_audio=True)).filter(available=False).count() == 0:
update = False
2011-10-18 20:06:01 +00:00
self.files.exclude(id__in=s).exclude(part=None).update(part=None)
deselect = self.files.filter(selected=True).exclude(id__in=s)
if deselect.count() > 0:
deselect.update(selected=False)
update = True
if s.filter(selected=False).count() > 0:
s.update(selected=True, wanted=False)
update = True
if update:
self.rendered = False
self.save()
break
2013-05-30 11:49:31 +00:00
if not sets:
self.rendered = False
self.files.filter(selected=True).update(selected=False)
self.save()
def get_torrent(self, request):
if self.torrent:
self.torrent.seek(0)
data = ox.torrent.bdecode(self.torrent.read())
2016-05-28 09:14:34 +00:00
url = request.build_absolute_uri("%s/torrent/" % self.get_absolute_url())
if url.startswith('https://'):
url = 'http' + url[5:]
data['url-list'] = ['%s%s' % (url, u.split('torrent/')[1]) for u in data['url-list']]
return ox.torrent.bencode(data)
2011-07-03 16:21:27 +00:00
def make_torrent(self):
2016-05-28 09:14:34 +00:00
if not settings.CONFIG['video'].get('torrent'):
return
2011-12-15 11:21:21 +00:00
streams = self.streams()
if streams.count() == 0:
return
2011-07-03 16:21:27 +00:00
base = self.path('torrent')
base = os.path.abspath(os.path.join(settings.MEDIA_ROOT, base))
if not isinstance(base, bytes):
base = base.encode('utf-8')
2011-07-03 16:21:27 +00:00
if os.path.exists(base):
shutil.rmtree(base)
ox.makedirs(base)
2011-07-03 16:21:27 +00:00
2014-03-16 14:42:39 +00:00
filename = utils.safe_filename(ox.decode_html(self.get('title')))
2012-01-17 11:12:39 +00:00
base = self.path('torrent/%s' % filename)
2011-07-03 16:21:27 +00:00
base = os.path.abspath(os.path.join(settings.MEDIA_ROOT, base))
size = 0
duration = 0.0
2011-08-23 17:39:34 +00:00
if streams.count() == 1:
v = streams[0]
media_path = v.media.path
extension = media_path.split('.')[-1]
2016-05-28 09:14:34 +00:00
url = "%s/torrent/%s.%s" % (self.get_absolute_url(),
2016-06-14 19:06:27 +00:00
quote(filename.encode('utf-8')),
extension)
video = "%s.%s" % (base, extension)
if not isinstance(media_path, bytes):
media_path = media_path.encode('utf-8')
if not isinstance(video, bytes):
2014-01-18 11:29:26 +00:00
video = video.encode('utf-8')
2014-04-24 14:12:11 +00:00
media_path = os.path.relpath(media_path, os.path.dirname(video))
os.symlink(media_path, video)
2013-04-22 11:45:22 +00:00
size = v.media.size
2011-08-23 17:39:34 +00:00
duration = v.duration
2011-07-03 16:21:27 +00:00
else:
2016-05-28 09:14:34 +00:00
url = "%s/torrent/" % self.get_absolute_url()
2011-07-03 16:21:27 +00:00
part = 1
2014-10-16 19:59:38 +00:00
ox.makedirs(base)
2011-08-23 17:39:34 +00:00
for v in streams:
media_path = v.media.path
extension = media_path.split('.')[-1]
video = "%s/%s.Part %d.%s" % (base, filename, part, extension)
part += 1
if not isinstance(media_path, bytes):
media_path = media_path.encode('utf-8')
if not isinstance(video, bytes):
2014-01-18 11:29:26 +00:00
video = video.encode('utf-8')
2014-04-24 14:12:11 +00:00
media_path = os.path.relpath(media_path, os.path.dirname(video))
os.symlink(media_path, video)
2013-04-22 11:45:22 +00:00
size += v.media.size
2011-08-23 17:39:34 +00:00
duration += v.duration
2011-07-03 16:21:27 +00:00
video = base
torrent = '%s.torrent' % base
2012-01-16 08:27:56 +00:00
url = "http://%s%s" % (settings.CONFIG['site']['url'], url)
2011-07-03 16:21:27 +00:00
meta = {
'filesystem_encoding': 'utf-8',
2011-07-03 16:21:27 +00:00
'target': torrent,
'url-list': url,
}
if duration:
2012-08-15 15:15:06 +00:00
meta['playtime'] = ox.format_duration(duration*1000)[:-4]
2011-07-03 16:21:27 +00:00
2016-06-14 19:06:27 +00:00
# slightly bigger torrent file but better for streaming
piece_size_pow2 = 15 # 1 mbps -> 32KB pieces
2011-07-03 16:21:27 +00:00
if size / duration >= 1000000:
2016-06-14 19:06:27 +00:00
piece_size_pow2 = 16 # 2 mbps -> 64KB pieces
2011-07-03 16:21:27 +00:00
meta['piece_size_pow2'] = piece_size_pow2
2012-08-21 08:49:20 +00:00
ox.torrent.create_torrent(video, settings.TRACKER_URL, meta)
self.torrent.name = torrent[len(settings.MEDIA_ROOT)+1:]
2011-07-03 16:21:27 +00:00
self.save()
2014-07-23 15:27:27 +00:00
def audio_tracks(self):
2016-05-28 09:14:34 +00:00
tracks = [f['language']
for f in self.files.filter(selected=True).filter(Q(is_video=True) | Q(is_audio=True)).values('language')
if f['language']]
2014-07-23 15:27:27 +00:00
return sorted(set(tracks))
def streams(self, track=None):
qs = archive.models.Stream.objects.filter(
2012-01-21 10:14:25 +00:00
source=None, available=True, file__item=self, file__selected=True
).filter(
2016-05-28 09:14:34 +00:00
Q(file__is_audio=True) | Q(file__is_video=True)
2014-07-23 15:27:27 +00:00
)
if not track:
tracks = self.audio_tracks()
if len(tracks) > 1:
if settings.CONFIG['language'] in tracks:
track = settings.CONFIG['language']
else:
track = tracks[0]
if track:
qs = qs.filter(file__language=track)
qs = qs.order_by('file__part', 'file__sort_path')
return qs
2011-08-18 19:37:12 +00:00
2019-07-14 18:23:06 +00:00
def update_timeline(self, async_=True):
2011-08-19 15:37:37 +00:00
streams = self.streams()
self.make_timeline()
2011-10-21 18:02:36 +00:00
if streams.count() == 1:
self.data['hue'], self.data['saturation'], self.data['lightness'] = streams[0].color
2011-10-21 18:02:36 +00:00
self.data['cuts'] = streams[0].cuts
2012-05-17 09:38:59 +00:00
self.data['volume'] = streams[0].volume
2011-10-21 18:02:36 +00:00
else:
2012-05-17 09:38:59 +00:00
self.data['cuts'] = extract.cuts(self.timeline_prefix)
self.data['volume'] = 0
2011-10-21 18:02:36 +00:00
offset = 0
color = [0, 0, 0]
n = streams.count()
for s in streams:
2012-11-09 23:28:34 +00:00
self.data['volume'] += s.volume * s.duration
color = [(a+b)/n for a, b in zip(color, ox.image.getRGB(s.color or [0.0] * 3))]
2011-10-21 18:02:36 +00:00
offset += s.duration
self.data['hue'], self.data['saturation'], self.data['lightness'] = ox.image.getHSL(color)
2012-05-30 15:52:50 +00:00
if offset:
self.data['volume'] /= offset
2016-06-14 19:06:27 +00:00
# extract.timeline_strip(self, self.data['cuts'], stream.info, self.timeline_prefix[:-8])
2018-06-19 22:03:01 +00:00
self.cache = self.json()
2013-03-01 09:14:57 +00:00
self.update_sort()
2011-08-20 10:06:18 +00:00
self.select_frame()
self.make_poster()
2011-08-19 15:37:37 +00:00
self.make_icon()
self.make_torrent()
2011-10-23 11:57:52 +00:00
self.rendered = streams.count() > 0
2011-08-19 15:37:37 +00:00
self.save()
2013-09-26 14:06:27 +00:00
if self.rendered:
2019-07-14 18:23:06 +00:00
if async_:
2014-09-19 12:26:46 +00:00
get_sequences.delay(self.public_id)
2013-09-26 14:06:27 +00:00
else:
2014-09-19 12:26:46 +00:00
get_sequences(self.public_id)
tasks.load_subtitles.delay(self.public_id)
2010-09-10 15:12:22 +00:00
2018-06-19 21:33:16 +00:00
def update_cache(self, **kwargs):
2018-06-19 22:03:01 +00:00
self.cache = self.json()
2018-06-19 21:33:16 +00:00
Item.objects.filter(id=self.id).update(cache=self.cache, **kwargs)
2011-12-29 19:13:03 +00:00
def save_poster(self, data):
self.poster.name = self.path('poster.jpg')
poster = self.poster.path
with open(poster, 'wb') as f:
2011-12-29 19:13:03 +00:00
f.write(data)
self.poster_height = self.poster.height
self.poster_width = self.poster.width
self.clear_poster_cache(self.poster.path)
2018-06-19 20:28:44 +00:00
if self.cache.get('posterRatio') != self.poster_width / self.poster_height:
2018-06-19 21:33:16 +00:00
self.update_cache(poster_width=self.poster_width,
poster_height=self.poster_height)
2011-12-29 19:13:03 +00:00
def prefered_poster_url(self):
if settings.DATA_SERVICE:
external_posters = self.external_data.get('posters', {})
service = self.poster_source
if service and service != settings.URL and service in external_posters:
return external_posters[service][0]['url']
if not service:
for service in settings.POSTER_PRECEDENCE:
if service in external_posters:
for p in external_posters[service]:
if service in settings.POSTER_ONLY_PORTRAIT and p['width'] > p['height']:
continue
return p['url']
return None
2011-08-11 14:15:56 +00:00
def make_timeline(self):
2011-08-19 15:37:37 +00:00
streams = self.streams()
if streams.count() > 1:
timelines = []
durations = []
for s in self.streams():
timelines.append(s.timeline_prefix)
durations.append(s.duration)
join_tiles(timelines, durations, self.timeline_prefix)
else:
2016-06-14 19:06:27 +00:00
# remove joined timeline if it was created at some point
2012-05-30 12:48:08 +00:00
for f in glob(os.path.join(settings.MEDIA_ROOT, self.path(), 'timeline*.jpg')):
os.unlink(f)
2011-08-11 14:15:56 +00:00
def remove_poster(self):
if self.poster:
self.clear_poster_cache(self.poster.path)
self.poster.delete()
self.clear_poster_cache(os.path.join(settings.MEDIA_ROOT, self.path('siteposter.jpg')))
2016-10-28 15:56:39 +00:00
def clear_poster_cache(self, poster=None):
if poster is None:
poster = self.poster.path
for f in glob(poster.replace('.jpg', '*.jpg')):
if f != poster:
try:
os.unlink(f)
except OSError:
pass
def make_poster(self):
2016-05-28 09:14:34 +00:00
ox.makedirs(os.path.join(settings.MEDIA_ROOT, self.path()))
poster = self.path('poster.jpg')
poster = os.path.abspath(os.path.join(settings.MEDIA_ROOT, poster))
if self.poster and self.poster.path != poster:
self.clear_poster_cache(self.poster.path)
self.poster.delete()
2018-09-12 13:56:10 +00:00
if not self.poster or not os.path.exists(self.poster.path):
2011-10-24 20:04:14 +00:00
poster = self.make_siteposter()
2011-10-24 23:07:50 +00:00
url = self.prefered_poster_url()
if url:
2012-08-15 15:15:06 +00:00
data = ox.net.read_url(url)
2011-12-29 19:13:03 +00:00
self.save_poster(data)
2011-10-24 23:07:50 +00:00
elif os.path.exists(poster):
with open(poster, 'rb') as f:
2011-10-26 14:04:50 +00:00
data = f.read()
if data:
2011-12-29 19:13:03 +00:00
self.save_poster(data)
2017-09-21 09:38:12 +00:00
self.clear_poster_cache(poster)
2011-10-24 20:04:14 +00:00
def make_siteposter(self):
2011-08-23 17:39:34 +00:00
poster = self.path('siteposter.jpg')
2011-07-26 17:22:23 +00:00
poster = os.path.abspath(os.path.join(settings.MEDIA_ROOT, poster))
frame = self.get_poster_frame_path()
2012-05-30 12:48:08 +00:00
timeline = '%stimelineantialias64p.jpg' % self.timeline_prefix
2015-12-09 18:29:18 +00:00
audio_timeline = '%stimelineaudio64p.jpg' % self.timeline_prefix
if not os.path.exists(timeline) and os.path.exists(audio_timeline):
timeline = audio_timeline
2011-07-26 17:22:23 +00:00
2016-05-28 09:14:34 +00:00
cmd = [settings.ITEM_POSTER, '-d', '-', '-p', poster]
2018-09-25 09:31:13 +00:00
data = self.json()
2011-07-26 17:22:23 +00:00
if frame:
data['frame'] = frame
2011-08-04 13:50:13 +00:00
if os.path.exists(timeline):
data['timeline'] = timeline
2014-09-19 12:26:46 +00:00
data['oxdbId'] = self.oxdbId or self.oxdb_id() or self.public_id
data = utils.normalize_dict('NFC', data)
2016-05-28 09:14:34 +00:00
ox.makedirs(os.path.join(settings.MEDIA_ROOT, self.path()))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, close_fds=True)
2018-06-19 18:48:18 +00:00
p.communicate(json.dumps(data, default=to_json).encode('utf-8'))
self.clear_poster_cache(poster)
2011-07-26 17:22:23 +00:00
return poster
def poster_frames(self):
frames = []
if settings.CONFIG['media']['importFrames']:
offset = 0
for f in self.files.filter(selected=True, is_video=True).order_by('sort_path'):
for ff in f.frames.all().order_by('position'):
2014-09-03 11:31:44 +00:00
if ff.frame:
frames.append({
'position': offset + ff.position,
'path': ff.frame.path,
2014-09-28 10:16:25 +00:00
'width': ff.width,
'height': ff.height
2014-09-03 11:31:44 +00:00
})
offset += f.duration
else:
2018-06-19 20:28:44 +00:00
if 'videoRatio' in self.cache and self.sort.duration and self.streams():
width, height = self.cache['resolution']
2014-11-14 14:05:45 +00:00
if width and height:
pos = self.sort.duration / 2
2016-06-14 19:06:27 +00:00
for p in [pos/2, pos, pos+pos/2]:
p = int(p)
2014-11-14 14:05:45 +00:00
path = self.frame(p, height)
if path:
frames.append({
'position': p,
'path': path,
'width': width,
'height': height,
})
2011-07-26 17:22:23 +00:00
return frames
2010-09-03 13:28:44 +00:00
2011-08-16 15:06:40 +00:00
def select_frame(self):
frames = self.poster_frames()
if frames:
heat = [ox.image.getImageHeat(f['path']) for f in frames]
2011-08-16 15:06:40 +00:00
self.poster_frame = heat.index(max(heat))
if not settings.CONFIG['media']['importFrames']:
self.poster_frame = frames[self.poster_frame]['position']
2011-08-16 15:06:40 +00:00
2011-01-21 10:49:24 +00:00
def get_poster_frame_path(self):
path = None
2013-03-01 09:14:57 +00:00
frames = self.poster_frames()
if frames and self.poster_frame < 0:
self.select_frame()
2011-01-21 10:49:24 +00:00
if self.poster_frame >= 0:
if settings.CONFIG['media']['importFrames']:
if frames and len(frames) > int(self.poster_frame):
path = frames[int(self.poster_frame)]['path']
elif frames:
path = frames[int(len(frames)/2)]['path']
2013-02-28 20:47:02 +00:00
else:
2013-03-04 15:39:08 +00:00
size = max(settings.CONFIG['video']['resolutions'])
2013-02-28 20:47:02 +00:00
path = self.frame(self.poster_frame, size)
2013-03-01 09:14:57 +00:00
return path
2011-01-21 10:49:24 +00:00
def make_icon(self):
frame = self.get_poster_frame_path()
2011-08-10 14:00:03 +00:00
icon = self.path('icon.jpg')
self.icon.name = icon
2012-05-30 12:48:08 +00:00
timeline = '%stimelineantialias64p.jpg' % self.timeline_prefix
2013-05-07 16:40:37 +00:00
audio_timeline = '%stimelineaudio64p.jpg' % self.timeline_prefix
if not os.path.exists(timeline) and os.path.exists(audio_timeline):
timeline = audio_timeline
2016-05-28 09:14:34 +00:00
cmd = [settings.ITEM_ICON, '-i', self.icon.path]
2011-08-10 14:00:03 +00:00
if os.path.exists(timeline):
2016-05-28 09:14:34 +00:00
cmd += ['-l', timeline]
2011-01-21 10:49:24 +00:00
if frame:
2016-05-28 09:14:34 +00:00
cmd += ['-f', frame]
p = subprocess.Popen(cmd, close_fds=True)
2011-08-10 14:00:03 +00:00
p.wait()
2016-06-14 19:06:27 +00:00
# remove cached versions
2012-09-25 17:00:10 +00:00
icon = os.path.abspath(os.path.join(settings.MEDIA_ROOT, icon))
for f in glob(icon.replace('.jpg', '*.jpg')):
if f != icon:
try:
os.unlink(f)
except OSError:
pass
2011-08-10 14:00:03 +00:00
return icon
2011-01-04 07:32:32 +00:00
2014-03-15 14:10:37 +00:00
def add_empty_clips(self):
subtitles = utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True)
if not subtitles:
return
2016-06-14 19:06:27 +00:00
# otherwise add empty 5 seconds annotation every minute
2014-03-15 14:10:37 +00:00
duration = sum([s.duration for s in self.streams()])
layer = subtitles['id']
2016-06-14 19:06:27 +00:00
# FIXME: allow annotations from no user instead?
2014-03-15 14:10:37 +00:00
user = User.objects.all().order_by('id')[0]
2014-03-16 13:52:56 +00:00
clips = [(i, i+5) for i in range(0, int(duration) - 5, 60)]
exist = []
delete = []
for a in Annotation.objects.filter(layer=layer, item=self, value=''):
clip = (a.start, a.end)
if clip not in clips:
delete.append(a.id)
else:
exist.append(clip)
if delete:
Annotation.objects.filter(layer=layer, item=self, value='', id__in=delete).delete()
clips = list(set(clips) - set(exist))
for clip in clips:
2014-03-15 14:10:37 +00:00
annotation = Annotation(
item=self,
layer=layer,
2014-03-16 13:52:56 +00:00
start=clip[0],
end=clip[1],
2014-03-15 14:10:37 +00:00
value='',
user=user
)
annotation.save()
def load_subtitles(self, force=False):
2013-02-26 16:12:00 +00:00
subtitles = utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True)
if not subtitles:
return
2015-09-24 17:16:29 +00:00
layer = subtitles['id']
existing = self.annotations.filter(layer=layer).exclude(value='')
# only import on 0xdb for now or if forced manually
# since this will remove all existing subtitles
2015-09-24 17:35:13 +00:00
if force or not existing.count() or settings.USE_IMDB:
2016-09-05 22:13:40 +00:00
new = []
current = [(v.start, v.end, v.value) for v in Annotation.objects.filter(layer=layer, item=self)]
current.sort()
offset = 0
language = ''
subtitles = self.files.filter(selected=True, is_subtitle=True, available=True)
languages = [f.language for f in subtitles]
if languages:
if 'en' in languages:
language = 'en'
elif '' in languages:
language = ''
else:
language = languages[0]
# loop over all videos
for f in self.files.filter(Q(is_audio=True) | Q(is_video=True)) \
.filter(selected=True).order_by('sort_path'):
subtitles_added = False
prefix = os.path.splitext(f.path)[0]
if f.instances.all().count() > 0:
user = f.instances.all()[0].volume.user
else:
# FIXME: allow annotations from no user instead?
user = User.objects.all().order_by('id')[0]
# if there is a subtitle with the same prefix, import
q = subtitles.filter(path__startswith=prefix,
language=language)
if q.count() == 1:
s = q[0]
for data in s.srt(offset):
subtitles_added = True
value = data['value'].replace('\n', '<br>\n').replace('<br><br>\n', '<br>\n')
2018-06-19 20:28:44 +00:00
if data['in'] < self.cache['duration'] and data['out'] > self.cache['duration']:
data['out'] = self.cache['duration']
if data['in'] < self.cache['duration']:
2016-09-05 22:13:40 +00:00
new.append((float('%0.03f' % data['in']), float('%0.03f' % data['out']), value))
# otherwise add empty 5 seconds annotation every minute
if not subtitles_added:
start = offset and int(offset / 60) * 60 + 60 or 0
for i in range(start,
int(offset + f.duration) - 5,
60):
new.append((i, i+5, ''))
offset += f.duration
if current != new:
with transaction.atomic():
# FIXME: only reset if most subtitles are new
Annotation.objects.filter(layer=layer, item=self).delete()
AnnotationSequence.reset(self)
for start, end, value in new:
annotation = Annotation(
item=self,
layer=layer,
start=start,
end=end,
value=value,
user=user
)
annotation.save(delay_matches=True)
# remove left over clips without annotations
Clip.objects.filter(item=self, annotations__id=None).delete()
for a in self.annotations.filter(layer=layer):
a.update_matches()
2015-09-24 17:35:13 +00:00
return True
else:
self.add_empty_clips()
return False
2011-08-20 10:06:18 +00:00
2016-03-11 11:48:13 +00:00
def srt(self, layer, language=None, encoder=ox.srt):
2012-03-16 16:16:02 +00:00
def format_value(value):
value = value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n')
value = value.replace('\n\n', '<br>\n')
return value
annotations = self.annotations.filter(layer=layer).exclude(value='')
2014-09-02 14:25:27 +00:00
if language:
annotations = annotations.filter(languages__contains=language)
2016-03-11 11:48:13 +00:00
return encoder.encode([{
2012-01-02 17:08:19 +00:00
'in': a.start,
'out': a.end,
2012-03-16 16:16:02 +00:00
'value': format_value(a.value)
2014-09-02 14:25:27 +00:00
} for a in annotations.order_by('start', 'end', 'sortvalue')])
2012-01-02 17:08:19 +00:00
def next_annotationid(self):
return AnnotationSequence.nextid(self)
2011-04-18 18:50:31 +00:00
def delete_item(sender, **kwargs):
i = kwargs['instance']
i.delete_files()
pre_delete.connect(delete_item, sender=Item)
Item.facet_keys = []
2015-04-15 07:51:34 +00:00
Item.layer_facet_keys = []
Item.poster_keys = []
2011-09-06 12:06:59 +00:00
for key in settings.CONFIG['itemKeys']:
2016-05-28 09:14:34 +00:00
if 'autocomplete' in key and 'autocompleteSortKey' not in key or \
key.get('filter'):
Item.facet_keys.append(key['id'])
elif key.get('type') == 'layer' and \
2016-05-28 09:14:34 +00:00
utils.get_by_id(settings.CONFIG['layers'], key['id']).get('type') == 'string':
Item.facet_keys.append(key['id'])
if key['id'] in ('title', 'director', 'year') or key.get('poster'):
Item.poster_keys.append(key['id'])
2015-04-15 07:51:34 +00:00
if key.get('type') == 'layer' and (
2016-05-28 09:14:34 +00:00
key.get('filter') or
2015-04-15 07:51:34 +00:00
utils.get_by_id(settings.CONFIG['layers'], key['id']).get('type') == 'string'
):
Item.layer_facet_keys.append(key['id'])
Item.person_keys = []
2011-09-06 12:06:59 +00:00
for key in settings.CONFIG['itemKeys']:
2012-03-08 11:39:04 +00:00
if key.get('sortType') == 'person':
Item.person_keys.append(key['id'])
2011-01-01 11:44:42 +00:00
2017-03-03 07:56:35 +00:00
@python_2_unicode_compatible
2010-09-23 16:01:48 +00:00
class ItemFind(models.Model):
2009-08-16 12:23:29 +00:00
"""
2010-11-06 16:14:00 +00:00
used to find items,
item.update_find populates this table
2010-11-06 16:14:00 +00:00
its used in manager.ItemManager
2009-08-16 12:23:29 +00:00
"""
2011-01-01 11:44:42 +00:00
2010-11-06 16:14:00 +00:00
class Meta:
unique_together = ("item", "key")
item = models.ForeignKey('Item', related_name='find', db_index=True)
key = models.CharField(max_length=200, db_index=True)
value = models.TextField(blank=True, db_index=settings.DB_GIN_TRGM)
2009-06-08 16:08:59 +00:00
2017-03-03 07:56:35 +00:00
def __str__(self):
2011-01-15 14:22:29 +00:00
return u"%s=%s" % (self.key, self.value)
2011-01-05 13:06:09 +00:00
'''
ItemSort
2011-09-06 12:06:59 +00:00
table constructed based on info in settings.CONFIG['itemKeys']
2011-01-05 13:06:09 +00:00
'''
attrs = {
'__module__': 'item.models',
'item': models.OneToOneField('Item', related_name='sort', primary_key=True),
2011-09-05 16:33:47 +00:00
'duration': models.FloatField(null=True, blank=True, db_index=True),
2011-11-10 21:22:58 +00:00
'width': models.BigIntegerField(null=True, blank=True, db_index=True),
'height': models.BigIntegerField(null=True, blank=True, db_index=True),
2011-11-11 13:04:15 +00:00
'created': models.DateTimeField(null=True, blank=True, db_index=True),
}
2017-02-16 13:24:51 +00:00
for key in list(filter(lambda k: k.get('sort', False) or k['type'] in ('integer', 'time', 'float', 'date', 'enum'), settings.CONFIG['itemKeys'])):
name = key['id']
2014-09-19 12:26:46 +00:00
name = {'id': 'public_id'}.get(name, name)
2012-03-08 11:39:04 +00:00
sort_type = key.get('sortType', key['type'])
if isinstance(sort_type, list):
sort_type = sort_type[0]
2016-10-04 22:00:03 +00:00
field = get_sort_field(sort_type)
if name not in attrs:
2016-10-04 22:00:03 +00:00
attrs[name] = field[0](**field[1])
2011-01-01 11:44:42 +00:00
ItemSort = type('ItemSort', (models.Model,), attrs)
ItemSort.fields = [f.name for f in ItemSort._meta.fields]
2017-03-03 07:56:35 +00:00
@python_2_unicode_compatible
2011-01-24 13:44:38 +00:00
class Access(models.Model):
class Meta:
unique_together = ("item", "user")
access = models.DateTimeField(auto_now=True)
item = models.ForeignKey(Item, related_name='accessed')
user = models.ForeignKey(User, null=True, related_name='accessed_items')
accessed = models.IntegerField(default=0)
def save(self, *args, **kwargs):
if not self.accessed:
self.accessed = 0
self.accessed += 1
super(Access, self).save(*args, **kwargs)
timesaccessed = Access.objects.filter(item=self.item).aggregate(Sum('accessed'))['accessed__sum']
2011-11-11 17:45:46 +00:00
ItemSort.objects.filter(item=self.item).update(timesaccessed=timesaccessed, accessed=self.access)
2017-03-03 07:56:35 +00:00
def __str__(self):
2011-01-24 13:44:38 +00:00
if self.user:
return u"%s/%s/%s" % (self.user, self.item, self.access)
return u"%s/%s" % (self.item, self.access)
2017-03-03 07:56:35 +00:00
@python_2_unicode_compatible
class Facet(models.Model):
2011-01-05 13:06:09 +00:00
'''
used for keys that can have multiple values like people, languages etc.
does not perform to well if total number of items goes above 10k
this happens for keywords in 0xdb right now
'''
class Meta:
unique_together = ("item", "key", "value")
2010-09-23 16:01:48 +00:00
item = models.ForeignKey('Item', related_name='facets')
key = models.CharField(max_length=200, db_index=True)
2011-08-25 15:41:14 +00:00
value = models.CharField(max_length=1000, db_index=True)
2011-10-29 23:32:11 +00:00
sortvalue = models.CharField(max_length=1000, db_index=True)
2009-06-08 16:08:59 +00:00
2017-03-03 07:56:35 +00:00
def __str__(self):
2011-04-05 10:49:58 +00:00
return u"%s=%s" % (self.key, self.value)
def save(self, *args, **kwargs):
2011-10-29 23:32:11 +00:00
if not self.sortvalue:
self.sortvalue = utils.sort_string(self.value).lower()[:900]
self.sotvalue = self.sortvalue.lower()
super(Facet, self).save(*args, **kwargs)
2009-08-16 12:23:29 +00:00
2012-01-15 15:05:37 +00:00
class Description(models.Model):
'''
shared itemkey descriptions
'''
class Meta:
unique_together = ("key", "value")
key = models.CharField(max_length=200, db_index=True)
value = models.CharField(max_length=1000, db_index=True)
description = models.TextField()
class AnnotationSequence(models.Model):
2016-02-19 16:34:15 +00:00
item = models.OneToOneField('Item', related_name='_annotation_sequence')
value = models.BigIntegerField(default=1)
@classmethod
def reset(cls, item):
s, created = cls.objects.get_or_create(item=item)
ids = [ox.fromAZ(a['public_id'].split('/')[1])
2016-05-28 09:14:34 +00:00
for a in item.annotations.exclude(public_id=None).values('public_id')]
s.value = max(ids) if ids else 0
s.save()
@classmethod
def nextid(cls, item):
2016-02-19 16:25:09 +00:00
with transaction.atomic():
s, created = cls.objects.get_or_create(item=item)
if created:
nextid = s.value
else:
cursor = connection.cursor()
sql = "UPDATE %s SET value = value + 1 WHERE item_id = %s RETURNING value" % (cls._meta.db_table, item.id)
cursor.execute(sql)
nextid = cursor.fetchone()[0]
return "%s/%s" % (item.public_id, ox.toAZ(nextid))