2010-11-26 15:07:24 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-08-23 10:27:06 +00:00
|
|
|
from __future__ import division, print_function, absolute_import
|
|
|
|
|
2012-01-17 08:58:33 +00:00
|
|
|
import re
|
2012-11-18 19:26:13 +00:00
|
|
|
import unicodedata
|
2010-11-26 15:07:24 +00:00
|
|
|
|
2017-03-03 07:56:35 +00:00
|
|
|
from django.utils.encoding import python_2_unicode_compatible
|
2014-11-18 14:55:21 +00:00
|
|
|
from django.db import models, transaction
|
2012-01-17 08:58:33 +00:00
|
|
|
from django.db.models import Q
|
2018-07-29 20:12:56 +00:00
|
|
|
from django.contrib.auth import get_user_model
|
2011-11-02 14:42:07 +00:00
|
|
|
from django.conf import settings
|
2012-02-01 15:25:18 +00:00
|
|
|
from django.db.models.signals import pre_delete
|
|
|
|
|
2011-01-28 08:43:46 +00:00
|
|
|
import ox
|
2010-11-26 15:07:24 +00:00
|
|
|
|
2011-10-02 18:16:28 +00:00
|
|
|
from clip.models import Clip
|
|
|
|
|
2013-03-02 08:57:39 +00:00
|
|
|
from item.utils import sort_string, get_by_key
|
2011-10-27 08:44:05 +00:00
|
|
|
|
2016-08-23 10:27:06 +00:00
|
|
|
from . import managers
|
|
|
|
from . import utils
|
2010-12-28 14:04:28 +00:00
|
|
|
|
2018-07-29 20:12:56 +00:00
|
|
|
User = get_user_model()
|
|
|
|
|
2012-05-27 14:21:08 +00:00
|
|
|
def get_super_matches(obj, model):
|
2012-01-17 08:58:33 +00:00
|
|
|
super_matches = []
|
2016-06-29 21:58:29 +00:00
|
|
|
name_lower = obj.name.lower()
|
|
|
|
q = Q(name_find__contains=" " + name_lower) | Q(name_find__contains="|%s" % name_lower)
|
2012-01-17 08:58:33 +00:00
|
|
|
for name in obj.alternativeNames:
|
2016-06-29 21:58:29 +00:00
|
|
|
name_lower = name.lower()
|
|
|
|
q = q | Q(name_find__contains=" " + name_lower) | Q(name_find__contains="|%s" % name_lower)
|
2012-01-17 08:58:33 +00:00
|
|
|
for p in model.objects.filter(q).exclude(id=obj.id):
|
|
|
|
for othername in [p.name] + list(p.alternativeNames):
|
|
|
|
for name in [obj.name] + list(obj.alternativeNames):
|
|
|
|
if name in othername:
|
|
|
|
super_matches.append(othername)
|
2012-05-27 14:21:08 +00:00
|
|
|
return super_matches
|
2012-01-17 08:58:33 +00:00
|
|
|
|
2012-05-27 14:21:08 +00:00
|
|
|
def get_matches(obj, model, layer_type, qs=None):
|
|
|
|
super_matches = obj.get_super_matches()
|
2017-02-16 13:24:51 +00:00
|
|
|
exact = [l['id'] for l in list(filter(lambda l: l['type'] == layer_type, settings.CONFIG['layers']))]
|
2012-01-17 08:58:33 +00:00
|
|
|
if exact:
|
|
|
|
q = Q(value__iexact=obj.name)
|
|
|
|
for name in obj.alternativeNames:
|
2016-09-05 21:42:41 +00:00
|
|
|
q = q | Q(value__iexact=name)
|
|
|
|
f = q & Q(layer__in=exact)
|
2012-01-17 08:58:33 +00:00
|
|
|
else:
|
|
|
|
f = None
|
|
|
|
|
|
|
|
has_type = 'has%ss' % layer_type.capitalize()
|
2017-02-16 13:24:51 +00:00
|
|
|
contains = [l['id'] for l in list(filter(lambda l: l.get(has_type), settings.CONFIG['layers']))]
|
2012-01-17 08:58:33 +00:00
|
|
|
if contains:
|
2012-05-27 11:52:12 +00:00
|
|
|
name = ox.decode_html(obj.name)
|
2014-06-06 12:04:47 +00:00
|
|
|
name = unicodedata.normalize('NFKD', name).lower()
|
2016-06-29 21:58:29 +00:00
|
|
|
q = Q(findvalue__contains=" " + name) | Q(findvalue__startswith=name)
|
2012-01-17 08:58:33 +00:00
|
|
|
for name in obj.alternativeNames:
|
2012-05-27 11:52:12 +00:00
|
|
|
name = ox.decode_html(name)
|
2014-06-06 12:04:47 +00:00
|
|
|
name = unicodedata.normalize('NFKD', name).lower()
|
2016-06-29 21:58:29 +00:00
|
|
|
q = q | Q(findvalue__contains=" " + name) | Q(findvalue__startswith=name)
|
2016-09-05 21:42:41 +00:00
|
|
|
contains_matches = q & Q(layer__in=contains)
|
2012-01-17 08:58:33 +00:00
|
|
|
if f:
|
|
|
|
f = contains_matches | f
|
|
|
|
else:
|
|
|
|
f = contains_matches
|
|
|
|
|
|
|
|
matches = []
|
2012-05-27 14:21:08 +00:00
|
|
|
if not qs:
|
|
|
|
qs = Annotation.objects.all()
|
|
|
|
for a in qs.filter(f):
|
2013-01-12 08:12:11 +00:00
|
|
|
if a.findvalue:
|
|
|
|
value = a.findvalue.lower()
|
|
|
|
for name in super_matches:
|
|
|
|
name = ox.decode_html(name)
|
|
|
|
value = value.replace(name.lower(), '')
|
|
|
|
for name in [obj.name] + list(obj.alternativeNames):
|
|
|
|
name = name.lower()
|
|
|
|
name = ox.decode_html(name)
|
2014-06-06 12:04:47 +00:00
|
|
|
name = unicodedata.normalize('NFKD', name).lower()
|
2016-06-30 00:40:01 +00:00
|
|
|
if name in value and (exact or re.compile('((^|\s)%s([\.,;:!?\'"\)\]\-\/\s]|$))' % re.escape(name)).findall(value)):
|
2013-01-12 08:12:11 +00:00
|
|
|
matches.append(a.id)
|
|
|
|
break
|
2012-01-17 08:58:33 +00:00
|
|
|
if not matches:
|
|
|
|
matches = [-1]
|
|
|
|
return Annotation.objects.filter(id__in=matches)
|
2011-02-11 10:21:25 +00:00
|
|
|
|
2017-03-03 07:56:35 +00:00
|
|
|
@python_2_unicode_compatible
|
2010-11-28 16:03:23 +00:00
|
|
|
class Annotation(models.Model):
|
2011-06-17 07:44:45 +00:00
|
|
|
objects = managers.AnnotationManager()
|
2011-01-01 11:44:42 +00:00
|
|
|
|
2010-11-26 15:07:24 +00:00
|
|
|
#FIXME: here having a item,start index would be good
|
|
|
|
created = models.DateTimeField(auto_now_add=True)
|
|
|
|
modified = models.DateTimeField(auto_now=True)
|
2014-12-22 14:29:30 +00:00
|
|
|
user = models.ForeignKey(User, related_name='annotations')
|
2011-01-06 03:11:00 +00:00
|
|
|
item = models.ForeignKey('item.Item', related_name='annotations')
|
2011-10-02 18:16:28 +00:00
|
|
|
clip = models.ForeignKey('clip.Clip', null=True, related_name='annotations')
|
2010-11-26 15:07:24 +00:00
|
|
|
|
2015-09-14 12:18:10 +00:00
|
|
|
public_id = models.CharField(max_length=128, unique=True)
|
2010-11-26 15:07:24 +00:00
|
|
|
#seconds
|
2011-08-23 10:47:59 +00:00
|
|
|
start = models.FloatField(default=-1, db_index=True)
|
2012-02-19 09:11:18 +00:00
|
|
|
end = models.FloatField(default=-1, db_index=True)
|
2010-11-26 15:07:24 +00:00
|
|
|
|
2011-11-02 14:06:34 +00:00
|
|
|
layer = models.CharField(max_length=255, db_index=True)
|
2010-11-26 15:07:24 +00:00
|
|
|
value = models.TextField()
|
2016-06-30 00:40:01 +00:00
|
|
|
findvalue = models.TextField(null=True, db_index=settings.DB_GIN_TRGM)
|
2011-11-03 11:31:46 +00:00
|
|
|
sortvalue = models.CharField(max_length=1000, null=True, blank=True, db_index=True)
|
2016-04-19 09:51:15 +00:00
|
|
|
|
2014-07-23 15:26:23 +00:00
|
|
|
languages = models.CharField(max_length=255, null=True, blank=True)
|
2010-11-26 15:07:24 +00:00
|
|
|
|
|
|
|
def editable(self, user):
|
|
|
|
if user.is_authenticated():
|
2016-02-19 16:34:15 +00:00
|
|
|
if user.profile.capability('canEditAnnotations') or \
|
2011-01-21 09:31:49 +00:00
|
|
|
self.user == user or \
|
2012-02-08 10:45:33 +00:00
|
|
|
user.groups.filter(id__in=self.item.groups.all()).count() > 0:
|
2010-11-26 15:07:24 +00:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2014-11-17 19:56:27 +00:00
|
|
|
@classmethod
|
|
|
|
def get(cls, id):
|
|
|
|
return cls.objects.get(public_id=id)
|
|
|
|
|
2011-09-30 22:28:35 +00:00
|
|
|
def set_public_id(self):
|
2015-09-14 12:18:10 +00:00
|
|
|
self.public_id = self.item.next_annotationid()
|
2011-09-30 22:28:35 +00:00
|
|
|
|
2012-01-10 16:00:41 +00:00
|
|
|
@classmethod
|
|
|
|
def public_layers(self):
|
|
|
|
layers = []
|
|
|
|
for layer in settings.CONFIG['layers']:
|
|
|
|
if not layer.get('private', False):
|
|
|
|
layers.append(layer['id'])
|
|
|
|
return layers
|
|
|
|
|
2011-12-26 14:30:30 +00:00
|
|
|
def get_layer(self):
|
|
|
|
for layer in settings.CONFIG['layers']:
|
|
|
|
if layer['id'] == self.layer:
|
|
|
|
return layer
|
|
|
|
return {}
|
|
|
|
|
2011-08-23 10:47:59 +00:00
|
|
|
def save(self, *args, **kwargs):
|
2016-09-05 22:13:40 +00:00
|
|
|
delay_matches = kwargs.pop('delay_matches', False)
|
2016-08-23 10:27:06 +00:00
|
|
|
|
2011-10-29 14:12:28 +00:00
|
|
|
set_public_id = not self.id or not self.public_id
|
2011-12-26 14:30:30 +00:00
|
|
|
layer = self.get_layer()
|
2011-11-03 11:44:53 +00:00
|
|
|
if self.value:
|
2011-12-26 14:43:04 +00:00
|
|
|
self.value = utils.cleanup_value(self.value, layer['type'])
|
2015-02-13 15:22:14 +00:00
|
|
|
|
|
|
|
findvalue = self.value
|
|
|
|
try:
|
2016-04-19 09:51:15 +00:00
|
|
|
if layer['type'] == 'entity':
|
2015-02-13 15:22:14 +00:00
|
|
|
findvalue = self.get_entity().name
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
self.findvalue = ox.decode_html(ox.strip_tags(re.sub('<br */?>\n?', ' ', findvalue))).replace('\n', ' ')
|
2012-11-18 19:26:13 +00:00
|
|
|
self.findvalue = unicodedata.normalize('NFKD', self.findvalue).lower()
|
2011-12-26 14:43:04 +00:00
|
|
|
sortvalue = sort_string(self.findvalue)
|
2013-11-10 23:05:07 +00:00
|
|
|
while sortvalue and not unicodedata.category(sortvalue[0])[0] in ('L', 'N'):
|
|
|
|
sortvalue = sortvalue[1:]
|
2011-11-03 11:44:53 +00:00
|
|
|
if sortvalue:
|
2012-02-14 13:44:54 +00:00
|
|
|
self.sortvalue = sortvalue[:900]
|
2011-11-03 11:44:53 +00:00
|
|
|
else:
|
|
|
|
self.sortvalue = None
|
2014-07-23 15:26:23 +00:00
|
|
|
self.languages = ','.join(re.compile('lang="(.*?)"').findall(self.value))
|
|
|
|
if not self.languages:
|
|
|
|
self.languages = None
|
2011-11-03 11:44:53 +00:00
|
|
|
else:
|
2012-03-08 18:56:31 +00:00
|
|
|
self.findvalue = None
|
2011-11-03 11:44:53 +00:00
|
|
|
self.sortvalue = None
|
2014-07-23 15:26:23 +00:00
|
|
|
self.languages = None
|
2011-10-04 09:39:00 +00:00
|
|
|
|
2016-02-19 16:25:09 +00:00
|
|
|
with transaction.atomic():
|
2014-11-18 14:55:21 +00:00
|
|
|
if not self.clip or self.start != self.clip.start or self.end != self.clip.end:
|
|
|
|
self.clip, created = Clip.get_or_create(self.item, self.start, self.end)
|
2011-10-04 09:39:00 +00:00
|
|
|
|
2014-11-18 14:55:21 +00:00
|
|
|
if set_public_id:
|
|
|
|
self.set_public_id()
|
2011-11-02 13:26:38 +00:00
|
|
|
|
2015-09-14 12:18:10 +00:00
|
|
|
super(Annotation, self).save(*args, **kwargs)
|
|
|
|
|
2014-11-18 14:55:21 +00:00
|
|
|
if self.clip:
|
|
|
|
Clip.objects.filter(**{
|
|
|
|
'id': self.clip.id,
|
|
|
|
self.layer: False
|
|
|
|
}).update(**{self.layer: True})
|
2016-08-31 16:03:19 +00:00
|
|
|
# update clip.findvalue
|
2014-11-18 14:55:21 +00:00
|
|
|
self.clip.save()
|
2011-11-02 13:26:38 +00:00
|
|
|
|
2016-09-05 22:13:40 +00:00
|
|
|
# update matches in bulk if called from load_subtitles
|
|
|
|
if not delay_matches:
|
2016-09-05 21:42:41 +00:00
|
|
|
self.update_matches()
|
2017-01-26 15:56:28 +00:00
|
|
|
self.update_documents()
|
2016-09-05 21:42:41 +00:00
|
|
|
|
|
|
|
def update_matches(self):
|
|
|
|
from place.models import Place
|
|
|
|
from event.models import Event
|
|
|
|
types = []
|
|
|
|
layer = self.get_layer()
|
|
|
|
if layer.get('type') == 'place' or layer.get('hasPlaces'):
|
|
|
|
types.append('place')
|
|
|
|
if layer.get('type') == 'event' or layer.get('hasEvents'):
|
|
|
|
types.append('event')
|
|
|
|
for type in types:
|
|
|
|
if type == 'place':
|
|
|
|
Model = Place
|
|
|
|
elif type == 'event':
|
|
|
|
Model = Event
|
|
|
|
|
|
|
|
a_matches = getattr(self, type == 'place' and 'places' or 'events')
|
|
|
|
|
|
|
|
# remove undefined matches that only have this annotation
|
|
|
|
for p in a_matches.filter(defined=False).exclude(name=self.value):
|
2017-06-28 10:55:01 +00:00
|
|
|
if p.annotations.exclude(id=self.id).count() == 0:
|
2016-09-05 21:42:41 +00:00
|
|
|
p.delete()
|
|
|
|
if layer.get('type') == type and a_matches.count() == 0:
|
|
|
|
a_matches.add(Model.get_or_create(self.value))
|
|
|
|
for p in a_matches.all():
|
|
|
|
p.update_matches()
|
|
|
|
|
|
|
|
if self.findvalue:
|
|
|
|
names = {}
|
|
|
|
for n in Model.objects.all().values('id', 'name', 'alternativeNames'):
|
|
|
|
names[n['id']] = [ox.decode_html(x) for x in (n['name'],) + n['alternativeNames']]
|
|
|
|
value = self.findvalue.lower()
|
|
|
|
current = {p.id for p in a_matches.all()}
|
|
|
|
matches = []
|
|
|
|
name_matches = set()
|
|
|
|
new = set()
|
|
|
|
|
|
|
|
for i in names:
|
|
|
|
for name in names[i]:
|
|
|
|
if name.lower() in value:
|
|
|
|
matches.append(i)
|
|
|
|
name_matches.add(name.lower())
|
|
|
|
break
|
|
|
|
for p in Model.objects.filter(id__in=matches):
|
|
|
|
# only add places/events that did not get added as a super match
|
|
|
|
# i.e. only add The Paris Region and not Paris
|
|
|
|
super_match = False
|
|
|
|
for n in p.get_super_matches():
|
|
|
|
if n.lower() in name_matches:
|
|
|
|
super_match = True
|
|
|
|
break
|
|
|
|
if not super_match:
|
|
|
|
new.add(p.id)
|
|
|
|
|
|
|
|
# added or removed items are only in current or only in new
|
|
|
|
update = list(current ^ new)
|
|
|
|
if update:
|
|
|
|
for e in Model.objects.filter(id__in=update):
|
|
|
|
e.update_matches(Annotation.objects.filter(id=self.id))
|
|
|
|
else:
|
|
|
|
# annotation has no value, remove all exisint matches
|
|
|
|
for e in a_matches.all():
|
|
|
|
e.update_matches(Annotation.objects.filter(pk=self.id))
|
2011-01-18 09:54:14 +00:00
|
|
|
|
2017-01-26 15:56:28 +00:00
|
|
|
def update_documents(self):
|
|
|
|
from document.models import Document
|
|
|
|
from document.utils import get_documents
|
2017-06-01 15:00:36 +00:00
|
|
|
old = [d.id for d in self.documents.all()]
|
2017-01-26 15:56:28 +00:00
|
|
|
current = get_documents(self.value) if self.value else []
|
|
|
|
removed = list(set(old) - set(current))
|
|
|
|
added = list(set(current) - set(old))
|
|
|
|
if removed:
|
|
|
|
for document in Document.objects.filter(id__in=removed):
|
|
|
|
self.documents.remove(document)
|
|
|
|
if added:
|
|
|
|
for document in Document.objects.filter(id__in=added):
|
|
|
|
self.documents.add(document)
|
|
|
|
|
2012-02-28 20:02:50 +00:00
|
|
|
def delete(self, *args, **kwargs):
|
2016-02-19 16:25:09 +00:00
|
|
|
with transaction.atomic():
|
2014-11-18 14:55:21 +00:00
|
|
|
super(Annotation, self).delete(*args, **kwargs)
|
|
|
|
if self.clip and self.clip.annotations.count() == 0:
|
|
|
|
self.clip.delete()
|
|
|
|
self.item.update_find()
|
|
|
|
self.item.update_sort()
|
|
|
|
self.item.update_facets()
|
2012-02-28 20:02:50 +00:00
|
|
|
|
2012-02-01 15:25:18 +00:00
|
|
|
def cleanup_undefined_relations(self):
|
|
|
|
layer = self.get_layer()
|
|
|
|
if layer.get('type') == 'place':
|
|
|
|
for p in self.places.filter(defined=False):
|
|
|
|
if p.annotations.exclude(id=self.id).count() == 0:
|
|
|
|
p.delete()
|
|
|
|
elif layer.get('type') == 'event':
|
|
|
|
for e in self.events.filter(defined=False):
|
|
|
|
if e.annotations.exclude(id=self.id).count() == 0:
|
|
|
|
e.delete()
|
|
|
|
|
2015-02-13 15:22:14 +00:00
|
|
|
def get_entity(self):
|
2015-02-13 11:06:09 +00:00
|
|
|
from entity.models import Entity
|
2015-02-13 15:22:14 +00:00
|
|
|
return Entity.get(self.value)
|
|
|
|
|
2015-09-14 12:08:02 +00:00
|
|
|
def _get_entity_json(self, user=None, entity_cache=None):
|
|
|
|
"""When serializing many annotations pointing to the same entity, it is expensive to
|
|
|
|
repeatedly look up and serialize the same entity.
|
|
|
|
"""
|
Annotation.json: only include entity id & name
Fetching documents for each entity in turn is expensive. (I have tried
using ArrayAgg to fetch them in the same query as the Entity — no
improvement. It's possible that being able to join to entity_entity,
and then use ArrayAgg, would be better.)
Even once you've fetched them all, if the same entity appears many
times in an item, then get(..., keys=['layers']) duplicates the whole
JSON for the entity many times: expensive to serialize, expensive to
send over the wire.
Pandora's own web interface only depends on the 'id' key of 'entity' in
each annotation, and refetches the rest of the entity to show the pop-up
dialog when you press E. So by just not bothering to fetch and send any
other keys, get(..., keys=['layers']) on an item with many entity
annotations is substantially faster.
(I experimented with splitting the full entities off to one side, so,
you'd have:
{
"layers": {
somelayer: [...,
{..., "entity": {"id": ABC}},
], ...
},
"entities": {
ABC: {...},
...
}
}
This is quicker than the status quo, but obviously not as fast as not
fetching & sending the rest at all!)
2016-04-15 14:10:43 +00:00
|
|
|
from entity.models import Entity
|
|
|
|
|
2015-09-14 12:08:02 +00:00
|
|
|
if entity_cache is not None and self.value in entity_cache:
|
|
|
|
return entity_cache[self.value]
|
|
|
|
|
Annotation.json: only include entity id & name
Fetching documents for each entity in turn is expensive. (I have tried
using ArrayAgg to fetch them in the same query as the Entity — no
improvement. It's possible that being able to join to entity_entity,
and then use ArrayAgg, would be better.)
Even once you've fetched them all, if the same entity appears many
times in an item, then get(..., keys=['layers']) duplicates the whole
JSON for the entity many times: expensive to serialize, expensive to
send over the wire.
Pandora's own web interface only depends on the 'id' key of 'entity' in
each annotation, and refetches the rest of the entity to show the pop-up
dialog when you press E. So by just not bothering to fetch and send any
other keys, get(..., keys=['layers']) on an item with many entity
annotations is substantially faster.
(I experimented with splitting the full entities off to one side, so,
you'd have:
{
"layers": {
somelayer: [...,
{..., "entity": {"id": ABC}},
], ...
},
"entities": {
ABC: {...},
...
}
}
This is quicker than the status quo, but obviously not as fast as not
fetching & sending the rest at all!)
2016-04-15 14:10:43 +00:00
|
|
|
id = ox.fromAZ(self.value)
|
|
|
|
entity = Entity.objects.filter(id=id).only('name').get()
|
|
|
|
entity_json = entity.json(keys=['id', 'name'])
|
2015-09-14 12:08:02 +00:00
|
|
|
value = entity.annotation_value()
|
|
|
|
|
|
|
|
if entity_cache is not None:
|
|
|
|
entity_cache[self.value] = (entity_json, value)
|
|
|
|
|
|
|
|
return (entity_json, value)
|
|
|
|
|
2015-02-16 16:10:45 +00:00
|
|
|
annotation_keys = (
|
|
|
|
'id', 'in', 'out', 'value', 'created', 'modified',
|
2016-04-19 09:51:15 +00:00
|
|
|
'duration', 'layer', 'item', 'videoRatio', 'languages',
|
2015-02-16 16:10:45 +00:00
|
|
|
'entity', 'event', 'place'
|
|
|
|
)
|
2015-08-02 14:22:45 +00:00
|
|
|
_clip_keys = ('hue', 'lightness', 'saturation', 'volume')
|
2016-09-05 21:42:41 +00:00
|
|
|
|
2015-09-14 12:08:02 +00:00
|
|
|
def json(self, layer=False, keys=None, user=None, entity_cache=None):
|
2011-06-01 11:03:07 +00:00
|
|
|
j = {
|
2010-12-28 14:04:28 +00:00
|
|
|
'user': self.user.username,
|
2015-08-02 14:22:45 +00:00
|
|
|
'id': self.public_id,
|
|
|
|
'in': self.start,
|
|
|
|
'out': self.end,
|
|
|
|
'value': self.value,
|
|
|
|
'created': self.created,
|
|
|
|
'modified': self.modified,
|
2010-12-28 14:04:28 +00:00
|
|
|
}
|
2012-02-15 17:25:17 +00:00
|
|
|
j['duration'] = abs(j['out'] - j['in'])
|
2012-02-08 11:00:11 +00:00
|
|
|
if user:
|
|
|
|
j['editable'] = self.editable(user)
|
2014-07-23 15:26:23 +00:00
|
|
|
if self.languages:
|
|
|
|
j['languages'] = self.languages.split(',')
|
2012-01-12 19:32:54 +00:00
|
|
|
l = self.get_layer()
|
2017-07-26 11:31:57 +00:00
|
|
|
if l:
|
|
|
|
if l['type'] == 'entity':
|
|
|
|
try:
|
|
|
|
(j['entity'], j['value']) = self._get_entity_json(
|
|
|
|
user=user, entity_cache=entity_cache)
|
|
|
|
except:
|
|
|
|
j['entity'] = {}
|
|
|
|
elif l['type'] == 'event':
|
|
|
|
qs = self.events.all()
|
|
|
|
if qs.count() > 0:
|
|
|
|
j['event'] = qs[0].json(user=user)
|
|
|
|
else:
|
|
|
|
j['event'] = {}
|
|
|
|
elif l['type'] == 'place':
|
|
|
|
qs = self.places.all()
|
|
|
|
if qs.count() > 0:
|
|
|
|
j['place'] = qs[0].json(user=user)
|
|
|
|
else:
|
|
|
|
j['place'] = {}
|
2012-01-12 19:32:54 +00:00
|
|
|
|
2011-10-19 15:55:29 +00:00
|
|
|
if layer or (keys and 'layer' in keys):
|
2011-11-02 14:42:07 +00:00
|
|
|
j['layer'] = self.layer
|
2014-10-26 17:44:36 +00:00
|
|
|
if keys and 'item' in keys:
|
|
|
|
j['item'] = self.item.public_id
|
2011-06-16 20:00:10 +00:00
|
|
|
if keys:
|
|
|
|
_j = {}
|
|
|
|
for key in keys:
|
|
|
|
if key in j:
|
|
|
|
_j[key] = j[key]
|
|
|
|
j = _j
|
2011-08-19 14:43:05 +00:00
|
|
|
if 'videoRatio' in keys:
|
|
|
|
streams = self.item.streams()
|
|
|
|
if streams:
|
|
|
|
j['videoRatio'] = streams[0].aspect_ratio
|
2015-02-16 16:10:45 +00:00
|
|
|
for key in keys:
|
2015-08-02 14:22:45 +00:00
|
|
|
if key not in j:
|
|
|
|
if key in self._clip_keys:
|
|
|
|
j[key] = getattr(self.clip, key)
|
|
|
|
elif key not in self.annotation_keys:
|
2018-06-19 20:28:44 +00:00
|
|
|
value = self.item.get(key) or self.item.cache.get(key)
|
2015-08-02 14:22:45 +00:00
|
|
|
if not value and hasattr(self.item.sort, key):
|
|
|
|
value = getattr(self.item.sort, key)
|
|
|
|
if value != None:
|
|
|
|
j[key] = value
|
2016-04-19 09:51:15 +00:00
|
|
|
|
2016-04-19 11:20:49 +00:00
|
|
|
# Items without any real subtitles are given a dummy 5-second subtitle
|
|
|
|
# every minute to ensure that they have at least *some* clips. Treat
|
|
|
|
# them specially. See Item.add_empty_clips
|
2016-04-19 09:51:15 +00:00
|
|
|
if l.get('isSubtitles') and 'id' in j and not self.value:
|
|
|
|
del j['id']
|
|
|
|
|
2011-06-01 11:03:07 +00:00
|
|
|
return j
|
2010-12-28 14:04:28 +00:00
|
|
|
|
2017-03-03 07:56:35 +00:00
|
|
|
def __str__(self):
|
2016-09-05 21:42:41 +00:00
|
|
|
return u"%s %s-%s" % (self.public_id, self.start, self.end)
|
2011-06-04 16:19:06 +00:00
|
|
|
|
2012-02-01 15:25:18 +00:00
|
|
|
def cleanup_related(sender, **kwargs):
|
|
|
|
kwargs['instance'].cleanup_undefined_relations()
|
|
|
|
pre_delete.connect(cleanup_related, sender=Annotation)
|
2015-04-16 09:53:05 +00:00
|
|
|
|
|
|
|
def rename_layer(old, new):
|
|
|
|
import item.models
|
|
|
|
Annotation.objects.filter(layer=old).update(layer=new)
|
|
|
|
item.models.ItemFind.objects.filter(key=old).update(key=new)
|
|
|
|
item.models.Facet.objects.filter(key=old).update(key=new)
|