Merge changes
This commit is contained in:
commit
5d43ed0585
8 changed files with 50 additions and 40 deletions
|
@ -134,6 +134,7 @@ class Annotation(models.Model):
|
||||||
|
|
||||||
def save(self, *args, **kwargs):
|
def save(self, *args, **kwargs):
|
||||||
from .tasks import update_matches
|
from .tasks import update_matches
|
||||||
|
async = kwargs.pop('async', False)
|
||||||
|
|
||||||
set_public_id = not self.id or not self.public_id
|
set_public_id = not self.id or not self.public_id
|
||||||
layer = self.get_layer()
|
layer = self.get_layer()
|
||||||
|
@ -177,14 +178,16 @@ class Annotation(models.Model):
|
||||||
'id': self.clip.id,
|
'id': self.clip.id,
|
||||||
self.layer: False
|
self.layer: False
|
||||||
}).update(**{self.layer: True})
|
}).update(**{self.layer: True})
|
||||||
#update clip.findvalue
|
# update clip.findvalue
|
||||||
self.clip.save()
|
self.clip.save()
|
||||||
|
|
||||||
#editAnnotations needs to be in snyc
|
# editAnnotations needs to be in snyc
|
||||||
|
# load_subtitles can not be in sync
|
||||||
|
fn = update_matches.delay if async else update_matches
|
||||||
if layer.get('type') == 'place' or layer.get('hasPlaces'):
|
if layer.get('type') == 'place' or layer.get('hasPlaces'):
|
||||||
update_matches(self.id, 'place')
|
fn(self.id, 'place')
|
||||||
if layer.get('type') == 'event' or layer.get('hasEvents'):
|
if layer.get('type') == 'event' or layer.get('hasEvents'):
|
||||||
update_matches(self.id, 'event')
|
fn(self.id, 'event')
|
||||||
|
|
||||||
def delete(self, *args, **kwargs):
|
def delete(self, *args, **kwargs):
|
||||||
with transaction.atomic():
|
with transaction.atomic():
|
||||||
|
|
|
@ -18,10 +18,13 @@ def update_matches(id, type):
|
||||||
elif type == 'event':
|
elif type == 'event':
|
||||||
from event.models import Event as Model
|
from event.models import Event as Model
|
||||||
|
|
||||||
a = Annotation.objects.get(pk=id)
|
try:
|
||||||
|
a = Annotation.objects.get(pk=id)
|
||||||
|
except Annotation.DoesNotExist:
|
||||||
|
return
|
||||||
a_matches = getattr(a, type == 'place' and 'places' or 'events')
|
a_matches = getattr(a, type == 'place' and 'places' or 'events')
|
||||||
|
|
||||||
#remove undefined matches that only have this annotation
|
# remove undefined matches that only have this annotation
|
||||||
for p in a_matches.filter(defined=False).exclude(name=a.value):
|
for p in a_matches.filter(defined=False).exclude(name=a.value):
|
||||||
if p.annotations.exclude(id=id).count() == 0:
|
if p.annotations.exclude(id=id).count() == 0:
|
||||||
p.delete()
|
p.delete()
|
||||||
|
@ -33,8 +36,7 @@ def update_matches(id, type):
|
||||||
if a.findvalue:
|
if a.findvalue:
|
||||||
names = {}
|
names = {}
|
||||||
for n in Model.objects.all().values('id', 'name', 'alternativeNames'):
|
for n in Model.objects.all().values('id', 'name', 'alternativeNames'):
|
||||||
names[n['id']] = [ox.decode_html(x)
|
names[n['id']] = [ox.decode_html(x) for x in (n['name'],) + n['alternativeNames']]
|
||||||
for x in (n['name'],) + n['alternativeNames']]
|
|
||||||
value = a.findvalue.lower()
|
value = a.findvalue.lower()
|
||||||
|
|
||||||
current = [p.id for p in a_matches.all()]
|
current = [p.id for p in a_matches.all()]
|
||||||
|
@ -49,19 +51,19 @@ def update_matches(id, type):
|
||||||
new = []
|
new = []
|
||||||
for i in matches:
|
for i in matches:
|
||||||
p = Model.objects.get(pk=i)
|
p = Model.objects.get(pk=i)
|
||||||
#only add places/events that did not get added as a super match
|
# only add places/events that did not get added as a super match
|
||||||
#i.e. only add The Paris Region and not Paris
|
# i.e. only add The Paris Region and not Paris
|
||||||
if not filter(lambda n: n in name_matches,
|
if not filter(lambda n: n in name_matches,
|
||||||
[n.lower() for n in p.get_super_matches()]):
|
[n.lower() for n in p.get_super_matches()]):
|
||||||
new.append(i)
|
new.append(i)
|
||||||
removed = list(filter(lambda p: p not in new, current))
|
removed = set(filter(lambda p: p not in new, current))
|
||||||
added = list(filter(lambda p: p not in current, new))
|
added = set(filter(lambda p: p not in current, new))
|
||||||
update = removed + added
|
update = list(removed | added)
|
||||||
if update:
|
if update:
|
||||||
for e in Model.objects.filter(id__in=update):
|
for e in Model.objects.filter(id__in=update):
|
||||||
e.update_matches(Annotation.objects.filter(pk=a.id))
|
e.update_matches(Annotation.objects.filter(pk=a.id))
|
||||||
else:
|
else:
|
||||||
#annotation has no value, remove all exisint matches
|
# annotation has no value, remove all exisint matches
|
||||||
for e in a_matches.all():
|
for e in a_matches.all():
|
||||||
e.update_matches(Annotation.objects.filter(pk=a.id))
|
e.update_matches(Annotation.objects.filter(pk=a.id))
|
||||||
|
|
||||||
|
|
|
@ -1632,7 +1632,7 @@ class Item(models.Model):
|
||||||
value=value,
|
value=value,
|
||||||
user=user
|
user=user
|
||||||
)
|
)
|
||||||
annotation.save()
|
annotation.save(async=True)
|
||||||
# otherwise add empty 5 seconds annotation every minute
|
# otherwise add empty 5 seconds annotation every minute
|
||||||
if not subtitles_added:
|
if not subtitles_added:
|
||||||
start = offset and int(offset / 60) * 60 + 60 or 0
|
start = offset and int(offset / 60) * 60 + 60 or 0
|
||||||
|
@ -1647,7 +1647,7 @@ class Item(models.Model):
|
||||||
value='',
|
value='',
|
||||||
user=user
|
user=user
|
||||||
)
|
)
|
||||||
annotation.save()
|
annotation.save(async=True)
|
||||||
offset += f.duration
|
offset += f.duration
|
||||||
# remove left over clips without annotations
|
# remove left over clips without annotations
|
||||||
Clip.objects.filter(item=self, annotations__id=None).delete()
|
Clip.objects.filter(item=self, annotations__id=None).delete()
|
||||||
|
|
|
@ -9,6 +9,16 @@ import re
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from ox.utils import json
|
from ox.utils import json
|
||||||
|
|
||||||
|
__all__ = ['join_tiles', 'split_tiles']
|
||||||
|
|
||||||
|
def divide(num, by):
|
||||||
|
# divide(100, 3) -> [33, 33, 34]
|
||||||
|
arr = []
|
||||||
|
div = int(num / by)
|
||||||
|
mod = num % by
|
||||||
|
for i in range(int(by)):
|
||||||
|
arr.append(div + (i > by - 1 - mod))
|
||||||
|
return arr
|
||||||
|
|
||||||
def join_tiles(source_paths, durations, target_path):
|
def join_tiles(source_paths, durations, target_path):
|
||||||
'''
|
'''
|
||||||
|
@ -17,15 +27,6 @@ def join_tiles(source_paths, durations, target_path):
|
||||||
be written to target_path.
|
be written to target_path.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def divide(num, by):
|
|
||||||
# divide(100, 3) -> [33, 33, 34]
|
|
||||||
arr = []
|
|
||||||
div = int(num / by)
|
|
||||||
mod = num % by
|
|
||||||
for i in range(int(by)):
|
|
||||||
arr.append(div + (i > by - 1 - mod))
|
|
||||||
return arr
|
|
||||||
|
|
||||||
def get_file_info(file_name):
|
def get_file_info(file_name):
|
||||||
for mode in modes:
|
for mode in modes:
|
||||||
if re.match('^timeline' + mode + '64p\d+\.jpg', file_name):
|
if re.match('^timeline' + mode + '64p\d+\.jpg', file_name):
|
||||||
|
@ -86,11 +87,12 @@ def join_tiles(source_paths, durations, target_path):
|
||||||
#print(image_file)
|
#print(image_file)
|
||||||
if mode == full_tile_mode:
|
if mode == full_tile_mode:
|
||||||
# render full tile
|
# render full tile
|
||||||
resized = data['target_images']['large'].resize((
|
if data['full_tile_widths'][0]:
|
||||||
data['full_tile_widths'][0], large_tile_h
|
resized = data['target_images']['large'].resize((
|
||||||
), Image.ANTIALIAS)
|
data['full_tile_widths'][0], large_tile_h
|
||||||
data['target_images']['full'].paste(resized, (data['full_tile_offset'], 0))
|
), Image.ANTIALIAS)
|
||||||
data['full_tile_offset'] += data['full_tile_widths'][0]
|
data['target_images']['full'].paste(resized, (data['full_tile_offset'], 0))
|
||||||
|
data['full_tile_offset'] += data['full_tile_widths'][0]
|
||||||
data['full_tile_widths'] = data['full_tile_widths'][1:]
|
data['full_tile_widths'] = data['full_tile_widths'][1:]
|
||||||
large_tile_i += 1
|
large_tile_i += 1
|
||||||
# open next large tile
|
# open next large tile
|
||||||
|
|
|
@ -107,13 +107,13 @@ class Place(models.Model):
|
||||||
numberofmatches = -1
|
numberofmatches = -1
|
||||||
for a in annotations.exclude(id__in=matches):
|
for a in annotations.exclude(id__in=matches):
|
||||||
self.annotations.remove(a)
|
self.annotations.remove(a)
|
||||||
#annotations of type place always need a place
|
# annotations of type place always need a place
|
||||||
if a.get_layer().get('type') == 'place' and a.places.count() == 0:
|
if a.get_layer().get('type') == 'place' and a.places.count() == 0:
|
||||||
a.places.add(Place.get_or_create(a.value))
|
a.places.add(Place.get_or_create(a.value))
|
||||||
for p in a.places.exclude(id=self.id):
|
for p in a.places.exclude(id=self.id):
|
||||||
p.update_matches()
|
p.update_matches()
|
||||||
for a in matches.exclude(id__in=self.annotations.all()):
|
for a in matches.exclude(id__in=self.annotations.all()):
|
||||||
#need to check again since editEvent might have been called again
|
# need to check again since editEvent might have been called again
|
||||||
if self.annotations.filter(id=a.id).count() == 0:
|
if self.annotations.filter(id=a.id).count() == 0:
|
||||||
self.annotations.add(a)
|
self.annotations.add(a)
|
||||||
ids = list(set([a['item_id'] for a in self.annotations.all().values('item_id')]))
|
ids = list(set([a['item_id'] for a in self.annotations.all().values('item_id')]))
|
||||||
|
|
|
@ -66,7 +66,8 @@ class Task(models.Model):
|
||||||
task, created = cls.objects.get_or_create(item=item)
|
task, created = cls.objects.get_or_create(item=item)
|
||||||
if task.update(save=False) or created:
|
if task.update(save=False) or created:
|
||||||
task.user = user
|
task.user = user
|
||||||
task.started = datetime.now()
|
if not task.started:
|
||||||
|
task.started = datetime.now()
|
||||||
task.ended = None
|
task.ended = None
|
||||||
task.save()
|
task.save()
|
||||||
|
|
||||||
|
@ -83,10 +84,10 @@ class Task(models.Model):
|
||||||
status = 'pending'
|
status = 'pending'
|
||||||
elif self.item.files.filter(uploading=True).count():
|
elif self.item.files.filter(uploading=True).count():
|
||||||
status = 'uploading'
|
status = 'uploading'
|
||||||
elif self.item.files.filter(queued=True).count():
|
|
||||||
status = 'queued'
|
|
||||||
elif self.item.files.filter(encoding=True).count():
|
elif self.item.files.filter(encoding=True).count():
|
||||||
status = 'processing'
|
status = 'processing'
|
||||||
|
elif self.item.files.filter(queued=True).count():
|
||||||
|
status = 'queued'
|
||||||
elif self.item.files.filter(failed=True).count():
|
elif self.item.files.filter(failed=True).count():
|
||||||
status = 'failed'
|
status = 'failed'
|
||||||
elif self.item.rendered:
|
elif self.item.rendered:
|
||||||
|
|
|
@ -37,7 +37,6 @@ def parseCondition(condition, user):
|
||||||
else:
|
else:
|
||||||
key = k + get_operator(op, 'istr')
|
key = k + get_operator(op, 'istr')
|
||||||
key = str(key)
|
key = str(key)
|
||||||
|
|
||||||
q = Q(**{key: v})
|
q = Q(**{key: v})
|
||||||
if exclude:
|
if exclude:
|
||||||
q = ~q
|
q = ~q
|
||||||
|
@ -53,7 +52,7 @@ def parseConditions(conditions, operator, user):
|
||||||
for condition in conditions:
|
for condition in conditions:
|
||||||
if 'conditions' in condition:
|
if 'conditions' in condition:
|
||||||
q = parseConditions(condition['conditions'],
|
q = parseConditions(condition['conditions'],
|
||||||
condition.get('operator', '&'), user)
|
condition.get('operator', '&'), user)
|
||||||
if q:
|
if q:
|
||||||
conn.append(q)
|
conn.append(q)
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -47,7 +47,7 @@ class SessionData(models.Model):
|
||||||
|
|
||||||
objects = managers.SessionDataManager()
|
objects = managers.SessionDataManager()
|
||||||
|
|
||||||
groupssort = models.CharField(default=None,blank=True,null=True, max_length=255)
|
groupssort = models.CharField(default=None, blank=True, null=True, max_length=255)
|
||||||
|
|
||||||
def __unicode__(self):
|
def __unicode__(self):
|
||||||
return u"%s" % self.session_key
|
return u"%s" % self.session_key
|
||||||
|
@ -141,6 +141,9 @@ class SessionData(models.Model):
|
||||||
|
|
||||||
def json(self, keys=None, user=None):
|
def json(self, keys=None, user=None):
|
||||||
ua = ox.parse_useragent(self.useragent or '')
|
ua = ox.parse_useragent(self.useragent or '')
|
||||||
|
if ua['robot']['name'] and self.level != -1:
|
||||||
|
self.level = -1
|
||||||
|
self.save()
|
||||||
j = {
|
j = {
|
||||||
'browser': ua['browser']['string'],
|
'browser': ua['browser']['string'],
|
||||||
'disabled': False,
|
'disabled': False,
|
||||||
|
@ -234,6 +237,7 @@ def get_ui(user_ui, user=None):
|
||||||
ui = {}
|
ui = {}
|
||||||
config = copy.deepcopy(settings.CONFIG)
|
config = copy.deepcopy(settings.CONFIG)
|
||||||
ui.update(config['user']['ui'])
|
ui.update(config['user']['ui'])
|
||||||
|
|
||||||
def update_ui(ui, new):
|
def update_ui(ui, new):
|
||||||
'''
|
'''
|
||||||
only update set keys in dicts
|
only update set keys in dicts
|
||||||
|
@ -385,8 +389,7 @@ def has_capability(user, capability):
|
||||||
else:
|
else:
|
||||||
level = user.profile.get_level()
|
level = user.profile.get_level()
|
||||||
return level in settings.CONFIG['capabilities'][capability] \
|
return level in settings.CONFIG['capabilities'][capability] \
|
||||||
and settings.CONFIG['capabilities'][capability][level]
|
and settings.CONFIG['capabilities'][capability][level]
|
||||||
|
|
||||||
|
|
||||||
def merge_users(old, new):
|
def merge_users(old, new):
|
||||||
old.annotations.all().update(user=new)
|
old.annotations.all().update(user=new)
|
||||||
|
|
Loading…
Reference in a new issue