Compare commits

..

1 commit
main ... master

Author SHA1 Message Date
j
a333d73305 export subtitles 2024-02-01 14:53:01 +01:00
29 changed files with 26 additions and 4715 deletions

View file

@ -1,23 +0,0 @@
[*]
end_of_line = lf
insert_final_newline = true
[*.py]
indent_style = space
indent_size = 4
[*.sass]
indent_style = space
indent_size = 2
[*.scss]
indent_style = space
indent_size = 2
[*.html]
indent_style = space
indent_size = 4
[*.js]
indent_style = space
indent_size = 4

2
.gitignore vendored
View file

@ -1,3 +1 @@
*.pyc
*.swp
*.swo

View file

@ -1,10 +1,26 @@
# pan.do/ra overlay for t-for-time
# pan.do/ra site config overlay
pandora settings and render pipeline.
fork this repo into pandora_sitename and add your pan.do/ra customizations
`apt install melt kdenlive ladspa-sdk xvfb`
place your config as config.jsonc, add custom files to static/js, poster scripts to scripts
`pandoractl manage genreate_clips`
custom files should be in the form <file>.<sitename>.js
`pandoractl manage render`
i.e. js overlay:
static/js/home.<sitename>.js
png overly the same i.e.
static/png/icon.<sitename>.png
poster/icon script without <sitename>:
script/item_icon.py
script/list_icon.py
script/potser.py
if you need a custom django module, touch __init__.py and install.py will take care of that too.
to use js pages from other sites, add them to overwrite in install.py
to deploy, checkout your fork into /srv/pandora/pandora/<sitename> and run ./install.py

View file

@ -1,98 +0,0 @@
import requests
import json
import os
import subprocess
'''
apt-get install -y podman
podman run -P -p 8765:8765 lowerquality/gentle
'''
def load_subs():
subtitles = {}
for url in """
https://textb.org/r/t_for_time_subtitles_1_melodic/
https://textb.org/r/t_for_time_subtitles_2_whispered/
https://textb.org/r/t_for_time_subtitles_3_free/
https://textb.org/r/t_for_time_subtitles_4_read/
https://textb.org/r/t_for_time_subtitles_5_ashley/
""".strip().split('\n'):
data = requests.get(url).text
parts = data.strip().split('##')
print(url)
prefix = '/srv/t_for_time/vo/' + url.split('/')[-2].split('subtitles_')[-1]
for part in parts:
part = part.strip().split('\n')
if part:
title = part[0]
text = "\n".join(part[1:]).strip()
if text:
fname = '%s_%s.txt' % (prefix, title)
with open(fname, 'w') as fd:
fd.write(text)
def gentle2subtitles(align):
new_block = '\r\n\r\n'
if new_block not in align['transcript']:
new_block = '\n\n'
data = []
end = 0
for block in align['transcript'].split(new_block):
if not block.strip():
continue
start = end
end += len(block)
in_ = -1
out_ = -1
for word in align['words']:
if word['startOffset'] < start:
continue
if word.get('case') == 'not-found-in-audio':
continue
if in_ == -1:
in_ = word['start']
out_ = word['end']
if word['endOffset'] > end:
break
if 'end' in word:
out_ = word['end']
if word['endOffset'] == end:
break
data.append({
'in': in_, 'out': out_, 'value': block.replace('\r\n', '\n')
})
end += len(new_block)
return data
def align_text(txt, wav):
cmd = ['curl', '-s', '-F', 'audio=@' + wav, '-F', 'transcript=@%s' % txt,
'http://localhost:8765/transcriptions?async=false']
data = subprocess.check_output(cmd).decode()
return json.loads(data)
def update_subtitles():
import item.models
from annotation.tasks import add_annotations
load_subs()
for i in item.models.Item.objects.filter(data__type=['Voice Over']):
wav = i.files.filter(selected=True)[0].data.path
id = i.get('title').split('_')[0]
batch = i.get('batch')[0][5:].lower().replace('-', '_').replace(' ', '')
txt = '/srv/t_for_time/vo/%s_%s.txt' % (batch, id)
if os.path.exists(txt):
print(i, wav, txt)
subtitles = gentle2subtitles(align_text(txt, wav))
add_annotations({
'item': i.public_id,
'layer': 'subtitles',
'user': 'j',
'annotations': subtitles
})

File diff suppressed because it is too large Load diff

View file

@ -1,15 +0,0 @@
[Unit]
Description=render to infinity and beyond
After=pandora.service
[Service]
Type=simple
Restart=always
User=pandora
Group=pandora
Nice=19
WorkingDirectory=/srv/pandora/pandora
ExecStart=/srv/pandora/pandora/manage.py infinity
[Install]
WantedBy=multi-user.target

View file

@ -2,43 +2,24 @@ import json
import os
import subprocess
import ox
from django.core.management.base import BaseCommand
from django.conf import settings
from ...render import add_translations
class Command(BaseCommand):
help = 'export all subtitles for translations'
def add_arguments(self, parser):
parser.add_argument('--lang', action='store', dest='lang', default=None, help='subtitle language')
pass
def handle(self, **options):
import annotation.models
import item.models
lang = options["lang"]
if lang:
lang = lang.split(',')
tlang = lang[1:]
lang = lang[0]
else:
tlang = None
if lang == "en":
lang = None
for i in item.models.Item.objects.filter(data__type__contains='Voice Over').order_by('sort__title'):
print("## %s %s" % (i.get("title"), i.public_id))
for sub in i.annotations.all().filter(layer='subtitles').exclude(value='').filter(languages=lang).order_by("start"):
if tlang:
value = add_translations(sub, tlang)
value = ox.strip_tags(value)
else:
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if sub.languages:
value = ox.strip_tags(value)
print(value.strip() + "\n")
for sub in i.annotations.all().filter(layer='subtitles').exclude(value='').order_by("start"):
if not sub.languages:
print(sub.value.strip() + "\n")
print("\n\n\n")

View file

@ -1,124 +0,0 @@
import json
import os
import re
from collections import defaultdict
from django.core.management.base import BaseCommand
from django.conf import settings
import item.models
import itemlist.models
from ...render import get_srt
def resolve_roman(s):
extra = re.compile('^\d+(.*?)$').findall(s)
if extra:
extra = extra[0].lower()
new = {
'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5',
'vi': '6', 'vii': 7, 'viii': '8', 'ix': '9', 'x': '10'
}.get(extra, extra)
return s.replace(extra, new)
return s
class Command(BaseCommand):
help = 'generate symlinks to clips and clips.json'
def add_arguments(self, parser):
parser.add_argument('--lang', action='store', dest='lang', default=None, help='subtitle language')
parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in')
def handle(self, **options):
prefix = options['prefix']
lang = options["lang"]
if lang:
lang = lang.split(',')
tlang = lang[1:]
lang = lang[0]
else:
tlang = None
if lang == "en":
lang = None
clips = []
for i in item.models.Item.objects.filter(sort__type='original'):
qs = item.models.Item.objects.filter(data__title=i.data['title']).exclude(id=i.id)
if qs.count() >= 1:
clip = {}
durations = []
for e in item.models.Item.objects.filter(data__title=i.data['title']):
if 'type' not in e.data:
print("ignoring invalid video %s (no type)" % e)
continue
if not e.files.filter(selected=True).exists():
continue
source = e.files.filter(selected=True)[0].data.path
ext = os.path.splitext(source)[1]
type_ = e.data['type'][0].lower()
target = os.path.join(prefix, type_, i.data['title'] + ext)
os.makedirs(os.path.dirname(target), exist_ok=True)
if os.path.islink(target):
os.unlink(target)
os.symlink(source, target)
clip[type_] = target
durations.append(e.files.filter(selected=True)[0].duration)
clip["duration"] = min(durations)
if not clip["duration"]:
print('!!', durations, clip)
continue
clip['tags'] = i.data.get('tags', [])
clip['editingtags'] = i.data.get('editingtags', [])
name = os.path.basename(clip['original'])
seqid = re.sub("Hotel Aporia_(\d+)", "S\\1_", name)
seqid = re.sub("Night March_(\d+)", "S\\1_", seqid)
seqid = re.sub("_(\d+)H_(\d+)", "_S\\1\\2_", seqid)
seqid = seqid.split('_')[:2]
seqid = [b[1:] if b[0] in ('B', 'S') else '0' for b in seqid]
seqid[1] = resolve_roman(seqid[1])
seqid[1] = ''.join([b for b in seqid[1] if b.isdigit()])
if not seqid[1]:
seqid[1] = '0'
try:
clip['seqid'] = int(''.join(['%06d' % int(b) for b in seqid]))
except:
print(name, seqid, 'failed')
raise
if "original" in clip and "foreground" in clip and "background" in clip:
clips.append(clip)
elif "original" in clip and "animation" in clip:
clips.append(clip)
else:
print("ignoring incomplete video", i)
with open(os.path.join(prefix, 'clips.json'), 'w') as fd:
json.dump(clips, fd, indent=2, ensure_ascii=False)
print("using", len(clips), "clips")
voice_over = defaultdict(dict)
for vo in item.models.Item.objects.filter(
data__type__contains="Voice Over",
):
fragment_id = int(vo.get('title').split('_')[0])
source = vo.files.filter(selected=True)[0]
batch = vo.get('batch')[0].replace('Text-', '')
src = source.data.path
target = os.path.join(prefix, 'voice_over', batch, '%s.wav' % fragment_id)
os.makedirs(os.path.dirname(target), exist_ok=True)
if os.path.islink(target):
os.unlink(target)
os.symlink(src, target)
subs = []
for sub in vo.annotations.filter(layer="subtitles", languages=lang).exclude(value="").order_by("start"):
sdata = get_srt(sub, lang=tlang)
subs.append(sdata)
voice_over[fragment_id][batch] = {
"src": target,
"duration": source.duration,
"subs": subs
}
with open(os.path.join(prefix, 'voice_over.json'), 'w') as fd:
json.dump(voice_over, fd, indent=2, ensure_ascii=False)

View file

@ -1,109 +0,0 @@
import json
import os
import subprocess
import ox
from django.core.management.base import BaseCommand
from django.conf import settings
from item.models import Item
from annotation.models import Annotation
class Command(BaseCommand):
help = 'export all subtitles for translations'
def add_arguments(self, parser):
parser.add_argument('--lang', action='store', dest='lang', default=None, help='subtitle language')
parser.add_argument('--test', action='store_true', dest='test', default=False, help='test run')
parser.add_argument('args', metavar='args', type=str, nargs='*', help='file or url')
def handle(self, filename, **options):
if not options["lang"]:
print("--lang is required")
return
lang = options["lang"]
if filename.startswith("http"):
data = ox.net.read_url(filename).decode()
else:
with open(filename) as fd:
data = fd.read()
data = ('\n' + data.strip()).split('\n## ')[1:]
invalid = []
valid = []
for block in data:
title, block = block.split('\n', 1)
block = block.strip()
title = title.strip()
item_id = title.split(' ')[-1]
item = Item.objects.get(public_id=item_id)
subtitles_en = item.annotations.filter(layer="subtitles", languages=None).exclude(value='')
lines = block.split('\n\n')
if len(lines) != subtitles_en.count():
print('%s: number of subtitles does not match, en: %s vs %s: %s' % (title, subtitles_en.count(), lang, len(lines)))
if options["test"]:
print(json.dumps(lines, indent=2, ensure_ascii=False))
print(json.dumps([s.value for s in subtitles_en.order_by('start')], indent=2, ensure_ascii=False))
continue
if options["test"]:
print('%s: valid %s subtitles' % (title, len(lines)))
else:
n = 0
item.annotations.filter(layer="subtitles", languages=lang).delete()
for sub_en in subtitles_en.order_by('start'):
sub = Annotation()
sub.item = sub_en.item
sub.user = sub_en.user
sub.layer = sub_en.layer
sub.start = sub_en.start
sub.end = sub_en.end
sub.value = '<span lang="%s">%s</span>' % (lang, lines[n])
sub.save()
n += 1
'''
srt = 'vocals_txt/%s/%s' % (title[0], title.replace('.wav', '.srt'))
filename = 'vocals_txt/%s/%s' % (title[0], title.replace('.wav', '.' + lang + '.srt'))
folder = os.path.dirname(filename)
if not os.path.exists(folder):
os.makedirs(folder)
data = json.load(open(srt + '.json'))
subs = block.replace('\n\n', '\n').split('\n')
if len(data) != len(subs):
print('invalid', title, 'expected', len(data), 'got', len(subs))
invalid.append('## %s\n\n%s' % (title, block))
valid.append('## %s\n\n%s' % (title, '\n\n'.join([d['value'] for d in data])))
continue
for i, sub in enumerate(data):
sub['value'] = subs[i]
kodata = ox.srt.encode(data)
current = None
if os.path.exists(filename):
with open(filename, 'rb') as fd:
current = fd.read()
if current != kodata:
print('update', title, filename)
with open(filename, 'wb') as fd:
fd.write(kodata)
with open(filename + '.json', 'w') as fd:
ko = [{
'in': s['in'],
'out': s['out'],
'value': s['value'],
} for s in data]
json.dump(ko, fd, ensure_ascii=False, indent=4)
if invalid:
with open('invalid_%s_subtitles.txt' % lang, 'w') as fd:
fd.write('\n\n\n\n'.join(invalid))
with open('invalid_%s_subtitles_en.txt' % lang, 'w') as fd:
fd.write('\n\n\n\n'.join(valid))
'''

View file

@ -1,19 +0,0 @@
import json
import os
import subprocess
from django.core.management.base import BaseCommand
from django.conf import settings
from ...render import render_infinity
class Command(BaseCommand):
help = 'render infinity'
def add_arguments(self, parser):
parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in')
parser.add_argument('--duration', action='store', dest='duration', default="3600", help='target duration of all fragments in seconds')
def handle(self, **options):
render_infinity(options)

View file

@ -1,21 +0,0 @@
import json
import os
import subprocess
from django.core.management.base import BaseCommand
from django.conf import settings
from ...render import render_all
class Command(BaseCommand):
help = 'genrate kdenlive porject and render'
def add_arguments(self, parser):
parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in')
parser.add_argument('--duration', action='store', dest='duration', default="3600", help='target duration of all fragments in seconds')
parser.add_argument('--offset', action='store', dest='offset', default="1024", help='inital offset in pi')
parser.add_argument('--no-video', action='store_true', dest='no_video', default=False, help='don\'t render video')
def handle(self, **options):
render_all(options)

View file

@ -1,20 +0,0 @@
import json
import os
import subprocess
from django.core.management.base import BaseCommand
from django.conf import settings
from ...render import update_subtitles
class Command(BaseCommand):
help = 'genrate kdenlive porject and render'
def add_arguments(self, parser):
parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in')
parser.add_argument('--offset', action='store', dest='offset', default="1024", help='inital offset in pi')
parser.add_argument('--lang', action='store', dest='lang', default=None, help='subtitle language')
def handle(self, **options):
update_subtitles(options)

18
pi.py
View file

@ -1,18 +0,0 @@
from mpmath import mp
class random(object):
def __init__(self, offset=0):
self.position = offset
mp.dps = 10000 + offset
self.PI = str(mp.pi).replace('.', '')
self.numbers = list(map(int, self.PI[offset:]))
def __call__(self):
if not self.numbers:
offset = mp.dps
mp.dps += 1000
self.PI = str(mp.pi).replace('.', '')
self.numbers = list(map(int, self.PI[offset:]))
self.position += 1
return self.numbers.pop(0)

View file

@ -1 +0,0 @@
dbus-send --session --dest=org.gnome.Shell --print-reply /org/gnome/Shell org.freedesktop.DBus.Properties.Get string:org.gnome.Shell string:OverviewActive

View file

@ -1,3 +0,0 @@
#!/bin/sh
dbus-send --session --dest=org.gnome.Shell --type=method_call /org/gnome/Shell org.freedesktop.DBus.Properties.Set string:org.gnome.Shell string:OverviewActive variant:boolean:false

View file

@ -1,8 +0,0 @@
[Desktop Entry]
Type=Application
Exec=/srv/pandora/t_for_time/player/player.py --mode peer --playlist /srv/t_for_time/render/back.m3u
Hidden=false
NoDisplay=false
X-GNOME-Autostart-enabled=true
Name=t-for-time
Comment=

View file

@ -1,8 +0,0 @@
[Desktop Entry]
Type=Application
Exec=/srv/pandora/t_for_time/player/player.py --mode main --playlist /srv/t_for_time/render/front.m3u
Hidden=false
NoDisplay=false
X-GNOME-Autostart-enabled=true
Name=t-for-time
Comment=

View file

@ -1,13 +0,0 @@
[Unit]
Description=player
After=gnome-session.target
Wants=network-online.target
[Service]
Type=simple
Restart=on-failure
KillSignal=SIGINT
ExecStart=/srv/pandora/t_for_time/player/player.py --mode peer --playlist /srv/t_for_time/render/back.m3u
[Install]
WantedBy=graphical-session.target

View file

@ -1,12 +0,0 @@
[Unit]
Description=player
After=gnome-session.target network-online.target
[Service]
Type=simple
Restart=on-failure
KillSignal=SIGINT
ExecStart=/srv/pandora/t_for_time/player/player.py --mode main --playlist /srv/t_for_time/render/front.m3u
[Install]
WantedBy=graphical-session.target

View file

@ -1,449 +0,0 @@
#!/usr/bin/python3
import argparse
import collections
import json
import os
import socket
import time
from threading import Thread
from datetime import datetime
import ox
import mpv
import logging
logger = logging.getLogger('t_for_time')
SYNC_TOLERANCE = 0.05
SYNC_GRACE_TIME = 5
SYNC_JUMP_AHEAD = 1
PORT = 9067
DEBUG = False
CONFIG = {
"font": "Menlo",
"font_size": 30,
"font_border": 4,
"sub_border_color": "0.0/0.0/0.0/0.75",
"sub_margin": 2 * 36 + 6,
"sub_spacing": 0,
"vf": None,
"sync_group": None,
}
def hide_gnome_overview():
import dbus
bus = dbus.SessionBus()
shell = bus.get_object('org.gnome.Shell', '/org/gnome/Shell')
props = dbus.Interface(shell, 'org.freedesktop.DBus.Properties')
props.Set('org.gnome.Shell', 'OverviewActive', False)
def mpv_log(loglevel, component, message):
logger.info('[{}] {}: {}'.format(loglevel, component, message))
class Main:
playlist_current_pos = -1
time_pos = -1
class Sync(Thread):
active = True
is_main = True
is_paused = False
ready = False
destination = "255.255.255.255"
reload_check = None
_pos = None
_tick = 0
need_to_sync = False
def __init__(self, *args, **kwargs):
self.is_main = kwargs.get('mode', 'main') == 'main'
self.start_at_hour = kwargs.get("hour", False)
self.sock = self.init_socket()
self.main = Main()
if self.is_main:
self.socket_enable_broadcast()
if kwargs.get("sax"):
self.sax = mpv.MPV(
log_handler=mpv_log, input_default_bindings=True,
input_vo_keyboard=True,
)
self.sax.loop_file = True
self.sax.play("/srv/t_for_time/render/Saxophone-5.1.mp4")
else:
self.sax = None
if mpv.MPV_VERSION >= (2, 2):
self.mpv = mpv.MPV(
log_handler=mpv_log, input_default_bindings=True,
input_vo_keyboard=True,
sub_font_size=CONFIG["font_size"], sub_font=CONFIG["font"],
sub_border_size=CONFIG["font_border"],
sub_border_color=CONFIG["sub_border_color"],
sub_margin_y=CONFIG["sub_margin"],
sub_ass_line_spacing=CONFIG["sub_spacing"],
)
else:
self.mpv = mpv.MPV(
log_handler=mpv_log, input_default_bindings=True,
input_vo_keyboard=True,
sub_text_font_size=CONFIG["font_size"], sub_text_font=CONFIG["font"],
sub_border_size=CONFIG["font_border"],
sub_border_color=CONFIG["sub_border_color"],
sub_margin_y=CONFIG["sub_margin"],
sub_ass_line_spacing=CONFIG["sub_spacing"],
)
if CONFIG.get("vf"):
self.mpv.vf = CONFIG["vf"]
self.mpv.observe_property('time-pos', self.time_pos_cb)
self.mpv.fullscreen = kwargs.get('fullscreen', False)
self.mpv.loop_file = False
self.mpv.loop_playlist = True
self.mpv.register_key_binding('q', self.q_binding)
self.mpv.register_key_binding('s', self.s_binding)
self.mpv.register_key_binding('p', self.p_binding)
self.mpv.register_key_binding('SPACE', self.space_binding)
self.playlist = kwargs['playlist']
self.playlist_mtime = os.stat(self.playlist).st_mtime
self.mpv.loadlist(self.playlist)
logger.error("loaded paylist: %s", self.playlist)
logger.debug("current playlist: %s", json.dumps(self.mpv.playlist, indent=2))
self.deviations = collections.deque(maxlen=10)
if not self.is_main:
self.mpv.pause = False
time.sleep(0.1)
self.mpv.pause = True
self.sync_to_main()
elif self.start_at_hour:
self.mpv.pause = True
fmt = '%Y-%m-%d %H'
now = datetime.now()
offset = (now - datetime.strptime(now.strftime(fmt), fmt)).total_seconds()
if self.sax:
self.sax.wait_until_playing()
self.sax.seek(offset, 'absolute', 'exact')
self.sax.pause = True
position = 0
for idx, item in enumerate(self.mpv.playlist):
duration = ox.avinfo(item['filename'])['duration']
if position + duration > offset:
pos = offset - position
self.mpv.playlist_play_index(idx)
self.mpv.pause = False
self.mpv.wait_until_playing()
self.mpv.seek(pos, 'absolute', 'exact')
time.sleep(0.1)
break
else:
position += duration
if self.sax:
self.sax.pause = False
self.ready = True
Thread.__init__(self)
self.start()
def run(self):
while self.active:
if self.is_main:
time.sleep(0.5)
else:
if self.need_to_sync:
self.sync_to_main()
self.deviations = collections.deque(maxlen=10)
self.need_to_sync = False
else:
self.read_position_main()
self.reload_playlist()
if not self.is_paused and self._tick and abs(time.time() - self._tick) > 60:
logger.error("player is stuck")
self._tick = 0
self.stop()
self.mpv.stop()
def q_binding(self, *args):
if args[0] != 'd-':
return
self.stop()
self.mpv.stop()
def space_binding(self, *args):
if args[0] != 'd-':
return
if self.mpv.pause:
self.p_binding(*args)
else:
self.s_binding(*args)
def s_binding(self, *args):
if args[0] != 'd-':
return
self.is_paused = True
self.mpv.pause = True
if self.sax:
self.sax.pause = True
self.send_playback_state()
def p_binding(self, *args):
if args[0] != 'd-':
return
self.is_paused = False
self._tick = 0
self.mpv.pause = False
if self.sax:
self.sax.pause = False
self.send_playback_state()
def stop(self, *args):
self.active = False
if self.sock:
self.sock.close()
self.sock = None
def time_pos_cb(self, pos, *args, **kwargs):
self._tick = time.time()
if self.is_main:
self.send_position_local()
elif self.ready:
self.adjust_position()
if self._pos != self.mpv.playlist_current_pos:
self._pos = self.mpv.playlist_current_pos
self.deviations = collections.deque(maxlen=10)
self.need_to_sync = False
try:
track = self.mpv.playlist[self._pos]
logger.error("%s %s", datetime.now(), track["filename"])
except:
pass
def reload_playlist(self):
if not self.reload_check:
self.reload_check = time.time()
if time.time() - self.reload_check > 5:
self.reload_check = time.time()
playlist_mtime = os.stat(self.playlist).st_mtime
if self.playlist_mtime != playlist_mtime:
self.playlist_mtime = playlist_mtime
#self.mpv.loadlist(self.playlist)
with open(self.playlist) as fd:
items = fd.read().strip().split('\n')
base = os.path.dirname(self.playlist)
items = [os.path.join(base, item) for item in items]
current_items = self.mpv.playlist_filenames
for filename in items:
if filename not in current_items:
self.mpv.playlist_append(filename)
logger.error("add: %s", filename)
remove = []
for filename in current_items:
if filename not in items:
remove.append(filename)
for filename in remove:
for idx, item in enumerate(self.mpv.playlist):
if item["filename"] == filename:
logger.error("remove: %s %s", idx, filename)
self.mpv.playlist_remove(idx)
break
for idx, filename in enumerate(items):
current_idx = self.mpv.playlist_filenames.index(filename)
if idx != current_idx:
logger.error("move item %s %s -> %s", filename, current_idx, idx)
self.mpv.playlist_move(current_idx, idx)
logger.error("reloaded paylist: %s", self.playlist)
logger.debug("current playlist: %s", json.dumps(self.mpv.playlist, indent=2))
def init_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("0.0.0.0", PORT))
return sock
#
# main specific
#
def socket_enable_broadcast(self):
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.sock.connect((self.destination, PORT))
def send_position_local(self):
if not self.active:
return
try:
msg = (
"%0.4f %s"
% (self.mpv.time_pos, self.mpv.playlist_current_pos)
).encode()
if CONFIG.get("sync_group"):
msg = ("%s " % CONFIG["sync_group"]).encode() + msg
except:
return
try:
self.sock.send(msg)
except socket.error as e:
logger.error("send failed: %s", e)
def send_playback_state(self):
state = 'pause' if self.mpv.pause else 'play'
msg = ("%s -1" % state).encode()
try:
self.sock.send(msg)
except socket.error as e:
logger.error("send failed: %s", e)
#
# follower specific
#
_last_ping = None
def read_position_main(self):
self.sock.settimeout(5)
while True:
try:
data = self.sock.recvfrom(1024)[0].decode().split(" ", 1)
except socket.timeout:
if self._last_ping != "pause":
logger.error("failed to receive data from main")
return
except OSError:
logger.error("socket closed")
return
if CONFIG.get("sync_group"):
if data[0] == str(CONFIG["sync_group"]):
data = data[1].split(" ", 1)
break
else:
break
self._last_ping = data[0]
if data[0] == "pause":
self.is_paused = True
self.mpv.pause = True
elif data[0] == "play":
self.is_paused = False
self._tick = 0
self.mpv.pause = False
else:
self.main.time_pos = float(data[0])
self.main.playlist_current_pos = int(data[1])
def adjust_position(self):
if self.mpv.time_pos is not None:
try:
deviation = self.main.time_pos - self.mpv.time_pos
except:
return
self.deviations.append(deviation)
median_deviation = self.median(list(self.deviations))
frames = deviation / 0.04
median_frames = median_deviation / 0.04
if abs(deviation) <= 0.04 and self.mpv.speed != 1.0:
self.mpv.speed = 1.0
logger.error(
'%0.05f back to normal speed %0.05f (%d) median %0.05f (%d) -> %s' % (self.mpv.time_pos, deviation, frames, median_deviation, median_frames, self.mpv.speed)
)
if time.time() - self.last_sync > SYNC_GRACE_TIME and abs(median_deviation) > SYNC_TOLERANCE:
if abs(median_deviation) < 1:
step = 0.02
if median_deviation > 0:
self.mpv.speed += step
else:
self.mpv.speed -= step
logger.error(
'%0.05f need to adjust speed %0.05f (%d) median %0.05f (%d) -> %s' % (self.mpv.time_pos, deviation, frames, median_deviation, median_frames, self.mpv.speed)
)
self.need_to_sync = False
self.deviations = collections.deque(maxlen=10)
self.last_sync = time.time()
elif self.mpv.time_pos > 2 and not self.need_to_sync:
logger.error(
'%0.05f need to sync %0.05f (%d) median %0.05f (%d)' % (self.mpv.time_pos, deviation, frames, median_deviation, median_frames)
)
self.need_to_sync = True
def median(self, lst):
quotient, remainder = divmod(len(lst), 2)
if remainder:
return sorted(lst)[quotient]
return float(sum(sorted(lst)[quotient - 1:quotient + 1]) / 2.0)
def sync_to_main(self):
logger.error('sync to main')
self.read_position_main()
#print(self.main.playlist_current_pos)
if self.main.playlist_current_pos != self.mpv.playlist_current_pos:
self.mpv.playlist_play_index(self.main.playlist_current_pos)
self.mpv.pause = False
self.mpv.wait_until_playing()
try:
track = self.mpv.playlist[self.mpv.playlist_current_pos]
logger.error("%s %s", datetime.now(), track["filename"])
except:
pass
self.mpv.pause = True
self.mpv.speed = 1
pos = self.main.time_pos + SYNC_JUMP_AHEAD
#print(pos, self.mpv.playlist_current_pos, self.mpv.time_pos)
self.mpv.seek(pos, 'absolute', 'exact')
time.sleep(0.1)
self.read_position_main()
sync_timer = time.time() # - 10 * 0.04
deviation = self.main.time_pos - self.mpv.time_pos
while self.active:
#print(deviation, abs(deviation) - (time.time() - sync_timer))
if abs(deviation) - (time.time() - sync_timer) < 0:
self.mpv.pause = False
try:
track = self.mpv.playlist[self.mpv.playlist_current_pos]
logger.error("%s %s %s", datetime.now(), track["filename"], pos)
except:
pass
break
self.last_sync = time.time()
def main():
prefix = os.path.expanduser('~/Videos/t_for_time')
parser = argparse.ArgumentParser(description='t_for_time sync player')
parser.add_argument('--mode', help='peer or main', default="peer")
parser.add_argument('--playlist', default='/srv/t_for_time/render/128/front.m3u', help="m3u")
parser.add_argument('--prefix', help='video location', default=prefix)
parser.add_argument('--window', action='store_true', help='run in window', default=False)
parser.add_argument('--debug', action='store_true', help='debug', default=False)
parser.add_argument('--hour', action='store_true', help='hour', default=False)
parser.add_argument('--sax', action='store_true', help='hour', default=False)
parser.add_argument('--config', help='config', default=None)
args = parser.parse_args()
DEBUG = args.debug
if DEBUG:
log_format = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_format)
if args.config:
with open(args.config) as fd:
CONFIG.update(json.load(fd))
base = os.path.dirname(os.path.abspath(__file__))
#os.chdir(base)
player = Sync(mode=args.mode, playlist=args.playlist, fullscreen=not args.window, hour=args.hour, sax=args.sax)
while player.active:
try:
player.mpv.wait_for_playback()
except:
break
player.stop()
if __name__ == "__main__":
try:
hide_gnome_overview()
except:
pass
main()

View file

@ -1,8 +0,0 @@
[Desktop Entry]
Type=Application
Exec=/usr/bin/mpv --quiet --loop /srv/t_for_time/render/Saxophone-5.1.mp4
Hidden=false
NoDisplay=false
X-GNOME-Autostart-enabled=true
Name=loop
Comment=

View file

@ -1,11 +0,0 @@
[Unit]
Description=saxophone loop
[Service]
Type=simple
Restart=always
ExecStart=/usr/bin/mpv --quiet --loop /srv/t_for_time/render/Saxophone-5.1.mp4
KillSignal=SIGINT
[Install]
WantedBy=graphical-session.target

717
render.py
View file

@ -1,717 +0,0 @@
#!/usr/bin/python3
from collections import defaultdict
from glob import glob
import json
import os
import re
import shutil
import subprocess
import sys
import time
from pathlib import Path
import ox
from .pi import random
from .render_kdenlive import KDEnliveProject, _CACHE
def random_int(seq, length):
n = n_ = length - 1
#print('len', n)
if n == 0:
return n
r = seq() / 9 * 10
base = 10
while n > 10:
n /= 10
r += seq() / 9 * 10
base += 10
r = int(round(n_ * r / base))
return r
def random_choice(seq, items, pop=False):
n = random_int(seq, len(items))
if pop:
return items.pop(n)
return items[n]
def chance(seq, chance):
return (seq() / 10) < chance
def get_clip_by_seqid(clips, seqid):
selected = None
for i, clip in enumerate(clips):
if clip['seqid'] == seqid:
selected = i
break
if selected is not None:
return clips.pop(i)
return None
def write_if_new(path, data, mode=''):
read_mode = 'r' + mode
write_mode = 'w' + mode
if os.path.exists(path):
with open(path, read_mode) as fd:
old = fd.read()
else:
old = ""
is_new = data != old
if path.endswith(".kdenlive"):
is_new = re.sub('\{.{36}\}', '', data) != re.sub('\{.{36}\}', '', old)
if is_new:
with open(path, write_mode) as fd:
fd.write(data)
def compose(clips, target=150, base=1024, voice_over=None):
length = 0
scene = {
'front': {
'V1': [],
'V2': [],
},
'back': {
'V1': [],
'V2': [],
},
'audio-back': {
'A1': [],
},
'audio-center': {
'A1': [],
},
'audio-front': {
'A1': [],
'A2': [],
'A3': [],
'A4': [],
},
'audio-rear': {
'A1': [],
'A2': [],
'A3': [],
'A4': [],
},
}
all_clips = clips.copy()
seq = random(10000 + base * 1000)
used = []
voice_overs = []
if voice_over:
vo_keys = list(sorted(voice_over))
if chance(seq, 0.5):
vo_key = vo_keys[random_int(seq, len(vo_keys))]
voice_overs.append(voice_over[vo_key])
elif len(vo_keys) >= 2:
vo1 = vo_keys.pop(random_int(seq, len(vo_keys)))
vo2 = vo_keys.pop(random_int(seq, len(vo_keys)))
voice_overs.append(voice_over[vo1])
if voice_over[vo1]["duration"] + voice_over[vo2]["duration"] < target:
print("adding second vo")
voice_overs.append(voice_over[vo2])
print("vo:", [x['src'] for x in voice_overs], list(sorted(voice_over)))
vo_min = sum([vo['duration'] for vo in voice_overs])
sub_offset = 0
if vo_min > target:
target = vo_min
elif vo_min < target:
offset = (target - vo_min) / 2
scene['audio-center']['A1'].append({
'blank': True,
'duration': offset
})
scene['audio-rear']['A1'].append({
'blank': True,
'duration': offset
})
vo_min += offset
sub_offset = offset
subs = []
for vo in voice_overs:
voc = vo.copy()
a, b = '-11', '-3'
if 'Whispered' in voc['src']:
a, b = '-8', '0'
elif 'Read' in voc['src']:
a, b = '-7.75', '0.25'
elif 'Free' in voc['src']:
a, b = '-8.8', '-0.8'
elif 'Ashley' in voc['src']:
a, b = '-9.5', '-1.50'
elif 'Melody' in voc['src']:
a, b = '-5.25', '-0.25'
voc['filter'] = {'volume': a}
scene['audio-center']['A1'].append(voc)
vo_low = vo.copy()
vo_low['filter'] = {'volume': b}
scene['audio-rear']['A1'].append(vo_low)
for sub in voc.get("subs", []):
sub = sub.copy()
sub["in"] += sub_offset
sub["out"] += sub_offset
subs.append(sub)
sub_offset += voc["duration"]
if subs:
scene["subtitles"] = subs
clip = None
while target - length > 0 and clips:
# coin flip which site is visible (50% chance)
if length:
remaining = target - length
remaining = remaining * 1.05 # allow for max of 10% over time
clips_ = [c for c in clips if c['duration'] <= remaining]
if clips_:
clips = clips_
if clip:
if chance(seq, 0.5):
next_seqid = clip['seqid'] + 1
clip = get_clip_by_seqid(clips, next_seqid)
else:
clip = None
if not clip:
clip = random_choice(seq, clips, True)
if not clips:
print("not enough clips, need to reset")
clips = [c for c in all_clips if c != clip and c not in used]
if not clips:
print("not enough clips, also consider used")
clips = [c for c in all_clips if c != clip]
if not clips:
print("not enough clips, also consider last clip")
clips = all_clips.copy()
if length + clip['duration'] > target and length >= vo_min:
break
print('%06.3f %06.3f' % (length, clip['duration']), os.path.basename(clip['original']))
length += clip['duration']
if "foreground" not in clip and "animation" in clip:
fg = clip['animation']
transparancy = 1
else:
fg = clip['foreground']
if 'animation' in clip and chance(seq, 0.15):
fg = clip['animation']
transparancy = 1
else:
if 'foreground2' in clip:
if 'foreground3' in clip:
n = seq()
if n <= 3: # 0,1,2,3
clip['foreground']
elif n <= 6: # 4,5,6
clip['foreground2']
else: # 7,8,9
clip['foreground3']
elif chance(seq, 0.5):
fg = clip['foreground2']
transparancy = seq() / 9
transparancy = 1
if 'foley' in clip:
foley = clip['foley']
else:
foley = fg
scene['front']['V2'].append({
'duration': clip['duration'],
'src': fg,
"filter": {
'transparency': transparancy,
}
})
transparency = seq() / 9
# 50% of time no transparancy of foregroudnd layer
# 50% some transparancy, 25%, 50%, 75% levels of transparancy
transparancy = 1
# coin flip which site is visible (50% chance)
#if chance(seq, 0.5):
if chance(seq, 0.8):
transparency_front = transparency
transparency_back = 0
else:
transparency_back = random_choice(seq, [0.25, 0.5, 0.75, 1])
transparency_front = 0
transparency_original = seq() / 9
transparency_original = 1
if "background" in clip:
scene['front']['V1'].append({
'duration': clip['duration'],
'src': clip['background'],
"filter": {
'transparency': transparency_front
}
})
scene['back']['V2'].append({
'duration': clip['duration'],
'src': clip['background'],
"filter": {
'transparency': transparency_back
}
})
else:
scene['front']['V1'].append({
'duration': clip['duration'],
'src': clip['animation'],
"filter": {
'transparency': 0,
}
})
scene['back']['V2'].append({
'duration': clip['duration'],
'src': clip['original'],
"filter": {
'transparency': 0,
}
})
scene['back']['V1'].append({
'duration': clip['duration'],
'src': clip['original'],
"filter": {
'transparency': transparency_original,
}
})
# 50 % chance to blur original from 0 to 30
if chance(seq, 0.5):
blur = seq() * 3
if blur:
scene['back']['V1'][-1]['filter']['blur'] = blur
scene['audio-back']['A1'].append({
'duration': clip['duration'],
'src': clip['original'],
'filter': {'volume': '-8.2'},
})
# TBD: Foley
cf_volume = '-2.5'
scene['audio-front']['A2'].append({
'duration': clip['duration'],
'src': foley,
'filter': {'volume': cf_volume},
})
scene['audio-rear']['A2'].append({
'duration': clip['duration'],
'src': foley,
'filter': {'volume': cf_volume},
})
used.append(clip)
print("scene duration %0.3f (target: %0.3f, vo_min: %0.3f)" % (length, target, vo_min))
return scene, used
def get_scene_duration(scene):
duration = 0
for key, value in scene.items():
for name, clips in value.items():
for clip in clips:
duration += clip['duration']
return duration
def get_offset_duration(prefix):
duration = 0
for root, folders, files in os.walk(prefix):
for f in files:
if f == 'scene.json':
path = os.path.join(root, f)
scene = json.load(open(path))
duration += get_scene_duration(scene)
return duration
def render(root, scene, prefix=''):
fps = 24
files = []
scene_duration = int(get_scene_duration(scene) * 24)
for timeline, data in scene.items():
if timeline == "subtitles":
path = os.path.join(root, prefix + "front.srt")
data = fix_overlaps(data)
srt = ox.srt.encode(data)
write_if_new(path, srt, 'b')
continue
#print(timeline)
project = KDEnliveProject(root)
tracks = []
track_durations = {}
for track, clips in data.items():
#print(track)
for clip in clips:
project.append_clip(track, clip)
track_durations[track] = int(sum([c['duration'] for c in clips]) * 24)
if timeline.startswith('audio-'):
track_duration = project.get_duration()
delta = scene_duration - track_duration
if delta > 0:
for track in track_durations:
if track_durations[track] == track_duration:
project.append_clip(track, {'blank': True, "duration": delta/24})
break
path = os.path.join(root, prefix + "%s.kdenlive" % timeline)
project_xml = project.to_xml()
write_if_new(path, project_xml)
files.append(path)
return files
def get_fragments(clips, voice_over, prefix):
import itemlist.models
import item.models
fragments = []
for l in itemlist.models.List.objects.filter(status='featured').order_by('name'):
if l.name.split(' ')[0].isdigit():
fragment = {
'name': l.name,
'tags': [],
'anti-tags': [],
'description': l.description
}
for con in l.query['conditions']:
if "conditions" in con:
for sub in con["conditions"]:
if sub['key'] == "tags" and sub['operator'] == '==':
fragment['tags'].append(sub['value'])
elif sub['key'] == "tags" and sub['operator'] == '!=':
fragment['tags'].append(sub['value'])
else:
print(l.name, 'unknown sub condition', sub)
elif con.get('key') == "tags" and con['operator'] == '==':
fragment['tags'].append(con['value'])
elif con.get('key') == "tags" and con['operator'] == '!=':
fragment['anti-tags'].append(con['value'])
fragment["id"] = int(fragment['name'].split(' ')[0])
originals = []
for i in l.get_items(l.user):
orig = i.files.filter(selected=True).first()
if orig:
ext = os.path.splitext(orig.data.path)[1]
type_ = i.data['type'][0].lower()
target = os.path.join(prefix, type_, i.data['title'] + ext)
originals.append(target)
fragment['clips'] = []
for clip in clips:
#if set(clip['tags']) & set(fragment['tags']) and not set(clip['tags']) & set(fragment['anti-tags']):
if clip['original'] in originals:
fragment['clips'].append(clip)
fragment["voice_over"] = voice_over.get(str(fragment["id"]), {})
fragments.append(fragment)
fragments.sort(key=lambda f: ox.sort_string(f['name']))
return fragments
def render_all(options):
prefix = options['prefix']
duration = int(options['duration'])
base = int(options['offset'])
_cache = os.path.join(prefix, "cache.json")
if os.path.exists(_cache):
with open(_cache) as fd:
_CACHE.update(json.load(fd))
with open(os.path.join(prefix, "clips.json")) as fd:
clips = json.load(fd)
with open(os.path.join(prefix, "voice_over.json")) as fd:
voice_over = json.load(fd)
fragments = get_fragments(clips, voice_over, prefix)
with open(os.path.join(prefix, "fragments.json"), "w") as fd:
json.dump(fragments, fd, indent=2, ensure_ascii=False)
position = target_position = 0
target = fragment_target = duration / len(fragments)
base_prefix = os.path.join(prefix, 'render', str(base))
clips_used = []
stats = defaultdict(lambda: 0)
fragment_base = base
for fragment in fragments:
fragment_base += 1
fragment_id = int(fragment['name'].split(' ')[0])
name = fragment['name'].replace(' ', '_')
if fragment_id < 10:
name = '0' + name
if not fragment['clips']:
print("skipping empty fragment", name)
continue
fragment_prefix = os.path.join(base_prefix, name)
os.makedirs(fragment_prefix, exist_ok=True)
fragment_clips = fragment['clips']
unused_fragment_clips = [c for c in fragment_clips if c not in clips_used]
print('fragment clips', len(fragment_clips), 'unused', len(unused_fragment_clips))
scene, used = compose(unused_fragment_clips, target=target, base=fragment_base, voice_over=fragment['voice_over'])
clips_used += used
scene_duration = get_scene_duration(scene)
print("%s %6.3f -> %6.3f (%6.3f)" % (name, target, scene_duration, fragment_target))
src = [a for a in scene['audio-rear']['A1'] if 'src' in a][0]['src']
stats[src.split('/')[-2]] += 1
position += scene_duration
target_position += fragment_target
if position > target_position:
target = fragment_target - (position-target_position)
print("adjusting target duration for next fragment: %6.3f -> %6.3f" % (fragment_target, target))
elif position < target_position:
target = target + 0.1 * fragment_target
timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/')
scene_json = json.dumps(scene, indent=2, ensure_ascii=False)
write_if_new(os.path.join(fragment_prefix, 'scene.json'), scene_json)
if not options['no_video']:
for timeline in timelines:
print(timeline)
ext = '.mp4'
if '/audio' in timeline:
ext = '.wav'
cmd = [
'xvfb-run', '-a',
'melt', timeline,
'-quiet',
'-consumer', 'avformat:%s' % timeline.replace('.kdenlive', ext),
]
if ext == '.wav':
cmd += ['vn=1']
else:
#if not timeline.endswith("back.kdenlive"):
cmd += ['an=1']
cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
subprocess.call(cmd)
if ext == '.wav' and timeline.endswith('audio.kdenlive'):
cmd = [
'ffmpeg', '-y',
'-nostats', '-loglevel', 'error',
'-i',
timeline.replace('.kdenlive', ext),
timeline.replace('.kdenlive', '.mp4')
]
subprocess.call(cmd)
os.unlink(timeline.replace('.kdenlive', ext))
fragment_prefix = Path(fragment_prefix)
cmds = []
for src, out1, out2 in (
("audio-front.wav", "fl.wav", "fr.wav"),
("audio-center.wav", "fc.wav", "lfe.wav"),
("audio-rear.wav", "bl.wav", "br.wav"),
):
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / src,
"-filter_complex",
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
"-map", "[left]", fragment_prefix / out1,
"-map", "[right]", fragment_prefix / out2,
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "fl.wav",
"-i", fragment_prefix / "fr.wav",
"-i", fragment_prefix / "fc.wav",
"-i", fragment_prefix / "lfe.wav",
"-i", fragment_prefix / "bl.wav",
"-i", fragment_prefix / "br.wav",
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]", "-c:a", "aac", fragment_prefix / "audio-5.1.mp4"
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "front.mp4",
"-i", fragment_prefix / "audio-5.1.mp4",
"-c", "copy",
fragment_prefix / "front-5.1.mp4",
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "back.mp4",
"-i", fragment_prefix / "audio-back.wav",
"-c:v", "copy",
fragment_prefix / "back-audio.mp4",
])
for cmd in cmds:
#print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd)
for a, b in (
("back-audio.mp4", "back.mp4"),
("front-5.1.mp4", "back.mp4"),
):
duration_a = ox.avinfo(str(fragment_prefix / a))['duration']
duration_b = ox.avinfo(str(fragment_prefix / b))['duration']
if duration_a != duration_b:
print('!!', duration_a, fragment_prefix / a)
print('!!', duration_b, fragment_prefix / b)
sys.exit(-1)
shutil.move(fragment_prefix / "back-audio.mp4", fragment_prefix / "back.mp4")
shutil.move(fragment_prefix / "front-5.1.mp4", fragment_prefix / "front.mp4")
for fn in (
"audio-5.1.mp4",
"audio-center.wav", "audio-rear.wav",
"audio-front.wav", "audio-back.wav", "back-audio.mp4",
"fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
):
fn = fragment_prefix / fn
if os.path.exists(fn):
os.unlink(fn)
print("Duration - Target: %s Actual: %s" % (target_position, position))
print(json.dumps(dict(stats), sort_keys=True, indent=2))
with open(_cache, "w") as fd:
json.dump(_CACHE, fd)
def add_translations(sub, lang):
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if sub.languages:
value = ox.strip_tags(value)
if lang:
for slang in lang:
if slang == "en":
slang = None
for tsub in sub.item.annotations.filter(layer="subtitles", start=sub.start, end=sub.end, languages=slang):
tvalue = tsub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if tsub.languages:
tvalue = ox.strip_tags(tvalue)
value += '\n' + tvalue
return value
def get_srt(sub, offset=0, lang=None):
sdata = sub.json(keys=['in', 'out', 'value'])
sdata['value'] = sdata['value'].replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if lang:
sdata['value'] = add_translations(sub, lang)
if offset:
sdata["in"] += offset
sdata["out"] += offset
return sdata
def fix_overlaps(data):
previous = None
for sub in data:
if previous is None:
previous = sub
else:
if sub['in'] < previous['out']:
previous['out'] = sub['in'] - 0.001
previous = sub
return data
def update_subtitles(options):
import item.models
prefix = Path(options['prefix'])
base = int(options['offset'])
lang = options["lang"]
if lang and "," in lang:
lang = lang.split(',')
if isinstance(lang, list):
tlang = lang[1:]
lang = lang[0]
else:
tlang = None
if lang == "en":
lang = None
_cache = os.path.join(prefix, "cache.json")
if os.path.exists(_cache):
with open(_cache) as fd:
_CACHE.update(json.load(fd))
base_prefix = prefix / 'render' / str(base)
for folder in os.listdir(base_prefix):
folder = base_prefix / folder
scene_json = folder / "scene.json"
if not os.path.exists(scene_json):
continue
with open(scene_json) as fd:
scene = json.load(fd)
offset = 0
subs = []
for clip in scene['audio-center']['A1']:
if not clip.get("blank"):
batch, fragment_id = clip['src'].replace('.wav', '').split('/')[-2:]
vo = item.models.Item.objects.filter(data__batch__icontains=batch, data__title__startswith=fragment_id + '_').first()
if vo:
#print("%s => %s %s" % (clip['src'], vo, vo.get('batch')))
for sub in vo.annotations.filter(layer="subtitles").filter(languages=lang).exclude(value="").order_by("start"):
sdata = get_srt(sub, offset, tlang)
subs.append(sdata)
else:
print("could not find vo for %s" % clip['src'])
offset += clip['duration']
path = folder / "front.srt"
data = fix_overlaps(subs)
srt = ox.srt.encode(subs)
write_if_new(str(path), srt, 'b')
def update_m3u(render_prefix, exclude=[]):
files = ox.sorted_strings(glob(render_prefix + "*/*/back.mp4"))
for ex in exclude:
files = [f for f in files if not f.startswith(ex + "/")]
back_m3u = "\n".join(files)
back_m3u = back_m3u.replace(render_prefix, "")
front_m3u = back_m3u.replace("back.mp4", "front.mp4")
back_m3u_f = render_prefix + "back.m3u"
front_m3u_f = render_prefix + "front.m3u"
with open(back_m3u_f + "_", "w") as fd:
fd.write(back_m3u)
with open(front_m3u_f + "_", "w") as fd:
fd.write(front_m3u)
shutil.move(front_m3u_f + "_", front_m3u_f)
cmd = ["scp", front_m3u_f, "front:" + front_m3u_f]
subprocess.check_call(cmd)
shutil.move(back_m3u_f + "_", back_m3u_f)
def render_infinity(options):
prefix = options['prefix']
duration = int(options['duration'])
state_f = os.path.join(prefix, "infinity.json")
if os.path.exists(state_f):
with open(state_f) as fd:
state = json.load(fd)
else:
state = {
"offset": 100,
"max-items": 30,
"no_video": False,
}
for key in ("prefix", "duration"):
state[key] = options[key]
while True:
render_prefix = state["prefix"] + "/render/"
current = [
f for f in os.listdir(render_prefix)
if f.isdigit() and os.path.isdir(render_prefix + f) and state["offset"] > int(f) >= 100
]
if len(current) > state["max-items"]:
current = ox.sorted_strings(current)
remove = current[:-state["max-items"]]
update_m3u(render_prefix, exclude=remove)
for folder in remove:
folder = render_prefix + folder
print("remove", folder)
shutil.rmtree(folder)
cmd = ["ssh", "front", "rm", "-rf", folder]
#print(cmd)
subprocess.check_call(cmd)
render_all(state)
path = "%s%s/" % (render_prefix, state["offset"])
cmd = ['rsync', '-a', path, "front:" + path]
subprocess.check_call(cmd)
update_m3u(render_prefix)
state["offset"] += 1
with open(state_f + "~", "w") as fd:
json.dump(state, fd, indent=2)
shutil.move(state_f + "~", state_f)

View file

@ -1,648 +0,0 @@
#!/usr/bin/python3
from collections import defaultdict
import subprocess
import lxml.etree
import uuid
import os
_CACHE = {}
_IDS = defaultdict(int)
def get_propery(element, name):
return element.xpath('property[@name="%s"]' % name)[0].text
def melt_xml(file):
out = None
real_path = os.path.realpath(file)
if file in _CACHE and isinstance(_CACHE[file], list):
ts, out = _CACHE[file]
if os.stat(real_path).st_mtime != ts:
out = None
if not out:
out = subprocess.check_output(['melt', file, '-consumer', 'xml']).decode()
_CACHE[file] = [os.stat(real_path).st_mtime, out]
return out
class KDEnliveProject:
def to_xml(self):
track = self._main_tractor.xpath(".//track")[0]
duration = self.get_duration()
values = {
"in": "0",
"out": str(duration - 1)
}
for key, value in values.items():
track.attrib[key] = value
self._sequence.attrib[key] = value
self._main_tractor.attrib[key] = value
self._audio_tractor.attrib[key] = value
self._tree.remove(self._sequence)
self._tree.append(self._sequence)
self._tree.remove(self._main_bin)
self._tree.append(self._main_bin)
self._tree.remove(self._main_tractor)
self._tree.append(self._main_tractor)
xml = lxml.etree.tostring(self._tree, pretty_print=True).decode()
xml = xml.replace('><', '>\n<')
return "<?xml version='1.0' encoding='utf-8'?>\n" + xml
def __init__(
self, root,
width="1920", height="1080",
display_aspect_num="16", display_aspect_den="9",
frame_rate_num="24", frame_rate_den="1"
):
self._duration = defaultdict(int)
self._counters = defaultdict(int)
self._uuid = '{%s}' % str(uuid.uuid1())
self._width = int(width)
self._height = int(height)
self._fps = int(frame_rate_num) / int(frame_rate_den)
self._tree = self.get_element("mlt", attrib={
"LC_NUMERIC": "C",
"producer": "main_bin",
"version": "7.18.0",
"root": root
}, children=[
self.get_element("profile", attrib={
"frame_rate_num": str(frame_rate_num),
"frame_rate_den": str(frame_rate_den),
"display_aspect_den": str(display_aspect_den),
"display_aspect_num": str(display_aspect_num),
"colorspace": "601",
"progressive": "1",
"description": "%sx%s %0.2ffps" % (self._width, self._height, self._fps),
"width": str(width),
"height": str(height),
"sample_aspect_num": "1",
"sample_aspect_den": "1"
}),
p0 := self.get_element("producer", attrib={
"in": "0",
"out": "2147483647"
}, children=[
["length", "2147483647"],
["eof", "continue"],
["resource", "black"],
["aspect_ratio", "1"],
["mlt_service", "color"],
["kdenlive:playlistid", "black_track"],
["mlt_image_format", "rgba"],
["set.test_audio", "0"],
]),
a4 := self.get_element("playlist", children=[
["kdenlive:audio_track", "1"],
]),
a4e := self.get_element("playlist", children=[
["kdenlive:audio_track", "1"],
]),
t_a4 := self.get_element("tractor", children=[
["kdenlive:audio_track", "1"],
["kdenlive:trackheight", "69"],
["kdenlive:timeline_active", "1"],
["kdenlive:collapsed", "0"],
["kdenlive:thumbs_format", None],
["kdenlive:audio_rec", None],
self.get_element("track", attrib={"hide": "video", "producer": a4.attrib["id"]}),
self.get_element("track", attrib={"hide": "video", "producer": a4e.attrib["id"]}),
self.get_element("filter", [
["window", "75"],
["max_gain", "20dB"],
["mlt_service", "volume"],
["internal_added", "237"],
["disable", "1"],
]),
self.get_element("filter", [
["channel", "-1"],
["mlt_service", "panner"],
["internal_added", "237"],
["start", "0.5"],
["disable", "1"],
]),
self.get_element("filter", [
["iec_scale", "0"],
["mlt_service", "audiolevel"],
["dbpeak", "1"],
["disable", "1"],
]),
]),
a3 := self.get_element("playlist", children=[
["kdenlive:audio_track", "1"],
]),
a3e := self.get_element("playlist", children=[
["kdenlive:audio_track", "1"],
]),
t_a3 := self.get_element("tractor", children=[
["kdenlive:audio_track", "1"],
["kdenlive:trackheight", "69"],
["kdenlive:timeline_active", "1"],
["kdenlive:collapsed", "0"],
["kdenlive:thumbs_format", None],
["kdenlive:audio_rec", None],
self.get_element("track", attrib={"hide": "video", "producer": a3.attrib["id"]}),
self.get_element("track", attrib={"hide": "video", "producer": a3e.attrib["id"]}),
self.get_element("filter", [
["window", "75"],
["max_gain", "20dB"],
["mlt_service", "volume"],
["internal_added", "237"],
["disable", "1"],
]),
self.get_element("filter", [
["channel", "-1"],
["mlt_service", "panner"],
["internal_added", "237"],
["start", "0.5"],
["disable", "1"],
]),
self.get_element("filter", [
["iec_scale", "0"],
["mlt_service", "audiolevel"],
["dbpeak", "1"],
["disable", "1"],
]),
]),
a2 := self.get_element("playlist", children=[
["kdenlive:audio_track", "1"],
]),
a2e := self.get_element("playlist", children=[
["kdenlive:audio_track", "1"],
]),
t_a2 := self.get_element("tractor", children=[
["kdenlive:audio_track", "1"],
["kdenlive:trackheight", "69"],
["kdenlive:timeline_active", "1"],
["kdenlive:collapsed", "0"],
["kdenlive:thumbs_format", None],
["kdenlive:audio_rec", None],
self.get_element("track", attrib={"hide": "video", "producer": a2.attrib["id"]}),
self.get_element("track", attrib={"hide": "video", "producer": a2e.attrib["id"]}),
self.get_element("filter", [
["window", "75"],
["max_gain", "20dB"],
["mlt_service", "volume"],
["internal_added", "237"],
["disable", "1"],
]),
self.get_element("filter", [
["channel", "-1"],
["mlt_service", "panner"],
["internal_added", "237"],
["start", "0.5"],
["disable", "1"],
]),
self.get_element("filter", [
["iec_scale", "0"],
["mlt_service", "audiolevel"],
["dbpeak", "1"],
["disable", "1"],
]),
]),
a1 := self.get_element("playlist", children=[
["kdenlive:audio_track", "1"],
]),
a1e := self.get_element("playlist", children=[
["kdenlive:audio_track", "1"],
]),
t_a1 := self.get_element("tractor", children=[
["kdenlive:audio_track", "1"],
["kdenlive:trackheight", "69"],
["kdenlive:timeline_active", "1"],
["kdenlive:collapsed", "0"],
["kdenlive:thumbs_format", None],
["kdenlive:audio_rec", None],
self.get_element("track", attrib={"hide": "video", "producer": a1.attrib["id"]}),
self.get_element("track", attrib={"hide": "video", "producer": a1e.attrib["id"]}),
self.get_element("filter", [
["window", "75"],
["max_gain", "20dB"],
["mlt_service", "volume"],
["internal_added", "237"],
["disable", "1"],
]),
self.get_element("filter", [
["channel", "-1"],
["mlt_service", "panner"],
["internal_added", "237"],
["start", "0.5"],
["disable", "1"],
]),
self.get_element("filter", [
["iec_scale", "0"],
["mlt_service", "audiolevel"],
["dbpeak", "1"],
["disable", "1"],
]),
]),
v1 := self.get_element("playlist", children=[
]),
v1e := self.get_element("playlist", children=[
]),
t2 := self.get_element("tractor", attrib={
"in": "00:00:00.000",
"out": "00:00:25.333"
}, children=[
["kdenlive:trackheight", "69"],
["kdenlive:timeline_active", "1"],
["kdenlive:collapsed", "0"],
["kdenlive:thumbs_format", None],
["kdenlive:audio_rec", None],
["kdenlive:locked_track", None],
self.get_element("track", attrib={"hide": "audio", "producer": v1.attrib["id"]}),
self.get_element("track", attrib={"hide": "audio", "producer": v1e.attrib["id"]}),
]),
v2 := self.get_element("playlist", children=[
]),
v2e := self.get_element("playlist", children=[
]),
t3 := self.get_element("tractor", attrib={
"in": "00:00:00.000"
}, children=[
["kdenlive:trackheight", "69"],
["kdenlive:timeline_active", "1"],
["kdenlive:collapsed", "0"],
["kdenlive:thumbs_format", None],
["kdenlive:audio_rec", None],
["kdenlive:locked_track", None],
self.get_element("track", attrib={"hide": "audio", "producer": v2.attrib["id"]}),
self.get_element("track", attrib={"hide": "audio", "producer": v2e.attrib["id"]}),
]),
sequence := self.get_element("tractor", [
["kdenlive:uuid", self._uuid],
["kdenlive:clipname", "Sequence 1"],
["kdenlive:sequenceproperties.hasAudio", "1"],
["kdenlive:sequenceproperties.hasVideo", "1"],
["kdenlive:sequenceproperties.activeTrack", "2"],
["kdenlive:sequenceproperties.tracksCount", "4"],
["kdenlive:sequenceproperties.documentuuid", self._uuid],
["kdenlive:duration", "00:00:25:09"],
["kdenlive:maxduration", "872"],
["kdenlive:producer_type", "17"],
["kdenlive:id", self.get_counter("kdenlive:id")],
["kdenlive:clip_type", "0"],
["kdenlive:folderid", "2"],
["kdenlive:sequenceproperties.audioChannels", "2"],
["kdenlive:sequenceproperties.audioTarget", "1"],
["kdenlive:sequenceproperties.tracks", "4"],
["kdenlive:sequenceproperties.verticalzoom", "1"],
["kdenlive:sequenceproperties.videoTarget", "2"],
["kdenlive:sequenceproperties.zonein", "0"],
["kdenlive:sequenceproperties.zoneout", "75"],
["kdenlive:sequenceproperties.zoom", "8"],
["kdenlive:sequenceproperties.groups", "[]"],
["kdenlive:sequenceproperties.guides", "[]"],
["kdenlive:sequenceproperties.position", "0"],
["kdenlive:sequenceproperties.scrollPos", "0"],
["kdenlive:sequenceproperties.disablepreview", "0"],
self.get_element("track", attrib={"producer": p0.attrib["id"]}),
self.get_element("track", attrib={"producer": t_a4.attrib["id"]}),
self.get_element("track", attrib={"producer": t_a3.attrib["id"]}),
self.get_element("track", attrib={"producer": t_a2.attrib["id"]}),
self.get_element("track", attrib={"producer": t_a1.attrib["id"]}),
self.get_element("track", attrib={"producer": t2.attrib["id"]}),
self.get_element("track", attrib={"producer": t3.attrib["id"]}),
self.get_element("transition", [
["a_track", "0"],
["b_track", "1"],
["mlt_service", "mix"],
["kdenlive_id", "mix"],
["internal_added", "237"],
["always_active", "1"],
["accepts_blanks", "1"],
["sum", "1"],
]),
self.get_element("transition", [
["a_track", "0"],
["b_track", "2"],
["mlt_service", "mix"],
["kdenlive_id", "mix"],
["internal_added", "237"],
["always_active", "1"],
["accepts_blanks", "1"],
["sum", "1"],
]),
self.get_element("transition", [
["a_track", "0"],
["b_track", "3"],
["mlt_service", "mix"],
["kdenlive_id", "mix"],
["internal_added", "237"],
["always_active", "1"],
["accepts_blanks", "1"],
["sum", "1"],
]),
self.get_element("transition", [
["a_track", "0"],
["b_track", "4"],
["mlt_service", "mix"],
["kdenlive_id", "mix"],
["internal_added", "237"],
["always_active", "1"],
["accepts_blanks", "1"],
["sum", "1"],
]),
self.get_element("transition", [
["a_track", "0"],
["b_track", "5"],
["compositing", "0"],
["distort", "0"],
["rotate_center", "0"],
["mlt_service", "qtblend"],
["kdenlive_id", "qtblend"],
["internal_added", "237"],
["always_active", "1"],
["accepts_blanks", "1"],
["sum", "1"],
]),
self.get_element("transition", [
["a_track", "0"],
["b_track", "6"],
["compositing", "0"],
["distort", "0"],
["rotate_center", "0"],
["mlt_service", "qtblend"],
["kdenlive_id", "qtblend"],
["internal_added", "237"],
["always_active", "1"],
["accepts_blanks", "1"],
["sum", "1"],
]),
self.get_element("filter", [
["window", "75"],
["max_gain", "20dB"],
["mlt_service", "volume"],
["internal_added", "237"],
["disable", "1"],
]),
self.get_element("filter", [
["channel", "-1"],
["mlt_service", "panner"],
["internal_added", "237"],
["start", "0.5"],
["disable", "1"],
]),
], {
"id": self._uuid
}),
main_bin := self.get_element("playlist", [
["kdenlive:folder.-1.2", "Sequences"],
["kdenlive:sequenceFolder", "2"],
["kdenlive:docproperties.kdenliveversion", "23.08.0"],
self.get_element("property", attrib={"name": "kdenlive:docproperties.previewextension"}),
self.get_element("property", attrib={"name": "kdenlive:docproperties.previewparameters"}),
["kdenlive:docproperties.seekOffset", "30000"],
["kdenlive:docproperties.uuid", self._uuid],
["kdenlive:docproperties.version", "1.1"],
["kdenlive:expandedFolders", None],
["kdenlive:binZoom", "4"],
self.get_element("property", attrib={"name": "kdenlive:documentnotes"}),
["kdenlive:docproperties.opensequences", self._uuid],
["kdenlive:docproperties.activetimeline", self._uuid],
["xml_retain", "1"],
self.get_element("entry", attrib={"producer": self._uuid, "in": "0", "out": "0"}),
], {
"id": "main_bin"
}),
t4 := self.get_element("tractor", [
["kdenlive:projectTractor", "1"],
self.get_element("track", attrib={"producer": self._uuid}),
])
])
self._sequence = sequence
self._main_bin = main_bin
self._main_tractor = t4
self._audio_tractor = t_a1
self._v1 = v1
self._v2 = v2
self._a1 = a1
self._a2 = a2
self._a3 = a3
self._a4 = a4
def get_counter(self, prefix):
self._counters[prefix] += 1
return str(self._counters[prefix] - 1)
def get_id(self, prefix):
return prefix + self.get_counter(prefix)
def get_chain(self, file, kdenlive_id=None):
out = melt_xml(file)
chain = lxml.etree.fromstring(out).xpath('producer')[0]
chain.tag = 'chain'
chain.attrib['id'] = self.get_id('chain')
# TBD
if kdenlive_id is None:
kdenlive_id = self.get_counter("kdenlive:id")
for name, value in [
("kdenlive:file_size", os.path.getsize(file)),
("kdenlive:clipname", None),
("kdenlive:clip_type", "0"),
("kdenlive:folderid", "-1"),
("kdenlive:id", kdenlive_id),
("set.test_audio", "0"),
("set.test_image", "0"),
("xml", "was here"),
]:
chain.append(
self.get_element(
"property",
attrib={"name": name},
text=str(value) if value is not None else None
)
)
mlt_service = chain.xpath('property[@name="mlt_service"]')[0]
mlt_service.text = "avformat-novalidate"
return chain
def get_duration(self):
if not self._duration:
return 0
return max(self._duration.values())
def get_element(self, tag, children=[], attrib={}, text=None):
element = lxml.etree.Element(tag)
if tag not in (
"blank",
"entry",
"mlt",
"profile",
"property",
"track",
) and "id" not in attrib:
element.attrib['id'] = self.get_id(tag)
if attrib:
for key, value in attrib.items():
element.attrib[key] = value
for child in children:
if isinstance(child, list) and len(child) == 2:
v = child[1]
if v is not None:
v = str(v)
child = self.get_element("property", attrib={"name": child[0]}, text=v)
if isinstance(child, dict):
child = self.get_element(**child)
elif isinstance(child, list):
child = self.get_element(*child)
element.append(child)
if text is not None:
element.text = text
return element
def get_filter(self, name, value):
if name == "transparency":
return [self.get_element("filter", [
["version", "0.9"],
["mlt_service", "frei0r.transparency"],
["kdenlive_id", "frei0r.transparency"],
["0", "00:00:00.000=%s" % value],
["kdenlive:collapsed", "0"],
])]
elif name == "blur":
return [self.get_element("filter", [
["mlt_service", "avfilter.avgblur"],
["kdenlive_id", "avfilter.avgblur"],
["av.sizeX", value],
["av.sizeY", value],
["planes", "7"],
["kdenlive:collapsed", "0"],
])]
elif name == "mask":
mask = [
self.get_element("filter", [
["mlt_service", "frei0r.saturat0r"],
["kdenlive_id", "frei0r.saturat0r"],
["Saturation", "00:00:00.000=0.001"],
]),
self.get_element("filter", [
["mlt_service", "frei0r.select0r"],
["kdenlive_id", "frei0r.select0r"],
["Color to select", "00:00:00.000=0x000000ff"],
["Invert selection", "1"],
["Selection subspace", "0"],
["Subspace shape", "0.5"],
["Edge mode", "0.9"],
["Delta R / A / Hue", "00:00:00.000=0.381"],
["Delta G / B / Chroma", "00:00:00.000=0.772"],
["Delta B / I / I", "00:00:00.000=0.522"],
["Slope", "00:00:00.000=0.515"],
["Operation", "0.5"],
])
]
return mask
elif name == "volume":
return [self.get_element("filter", [
["window", "75"],
["max_gain", "20db"],
["mlt_service", "volume"],
["kdenlive_id", "volume"],
["level", "00:00:00.000=%s" % value],
["kdenlive:collapsed", "0"],
])]
else:
return [
self.get_element("filter", [
["mlt_service", name],
["kdenlive_id", name],
] + value)
]
def properties(self, *props):
return [
self.get_element("property", attrib={"name": name}, text=str(value) if value is not None else value)
for name, value in props
]
def append_clip(self, track_id, clip):
if track_id == "V1":
track = self._v1
elif track_id == "V2":
track = self._v2
elif track_id == "A1":
track = self._a1
elif track_id == "A2":
track = self._a2
elif track_id == "A3":
track = self._a3
elif track_id == "A4":
track = self._a4
else:
print('!!', track_id)
frames = int(self._fps * clip['duration'])
self._duration[track_id] += frames
if clip.get("blank"):
track.append(
self.get_element("blank", attrib={
"length": str(frames),
})
)
return
path = clip['src']
filters = clip.get("filter", {})
#print(path, filters)
chain = self.get_chain(path)
id = get_propery(chain, "kdenlive:id")
if track_id[0] == 'A':
has_audio = False
for prop in chain.xpath('property'):
if prop.attrib['name'].endswith('stream.type') and prop.text == "audio":
has_audio = True
idx = self._tree.index(track) - 1
self._tree.insert(idx, chain)
filters_ = []
if track_id == 'V':
filters_.append({
self.get_element("filter", [
["mlt_service", "qtblend"],
["kdenlive_id", "qtblend"],
["rotate_center", "1"],
["rect", "00:00:00.000=0 0 %s %s 1.000000" % (self._width, self.height)],
["rotation", "00:00:00.000=0"],
["compositing", "0"],
["distort", "0"],
["kdenlive:collapsed", "0"],
["disable", "0"],
])
})
for ft in filters.items():
filters_ += self.get_filter(*ft)
if track_id[0] == 'A' and not has_audio:
track.append(
self.get_element("blank", attrib={
"length": str(frames),
})
)
else:
track.append(
self.get_element("entry", attrib={
"producer": chain.attrib["id"],
"in": chain.attrib["in"],
"out": str(frames - 1)
}, children=[
["kdenlive:id", id],
] + filters_),
)
chain = self.get_chain(path, id)
self._tree.append(chain)
self._main_bin.append(
self.get_element("entry", attrib={
"producer": chain.attrib["id"],
"in": chain.attrib["in"],
"out": chain.attrib["out"],
}),
)

109
sax.py
View file

@ -1,109 +0,0 @@
#!/usr/bin/python3
import os
from render_kdenlive import KDEnliveProject, _CACHE
import subprocess
def generate_sax_mix(root):
os.chdir(root)
root = os.path.abspath(".")
long_wav = "Soon_Kim_Long_Reverb_Only2.wav"
nois_wav = "Soon_Kim_Noise.wav"
reverb_wav = "Soon_Kim_Short_Reverb_Mix2.wav"
'''
i = item.models.Item.objects.get(data__title='Soon_Kim_Long_Reverb_Only2')
i.files.all()[0].data.path
'/srv/pandora/data/media/6b/44/16/3f2905e886/data.wav'
i = item.models.Item.objects.get(data__title='Soon_Kim_Short_Reverb_Mix2')
i.files.all()[0].data.path
'/srv/pandora/data/media/ee/e0/04/d4ab42c3de/data.wav'
i = item.models.Item.objects.get(data__title='Soon_Kim_Noise')
i.files.all()[0].data.path
'/srv/pandora/data/media/84/88/87/d2fb2e2dc2/data.wav'
'''
reverb = {
"src": reverb_wav,
"duration": 3600.0,
"filter": {
"volume": "3.5"
},
}
long = {
"src": long_wav,
"duration": 3600.0,
"filter": {
"volume": "-1"
},
}
noise = {
"src": nois_wav,
"duration": 3600.0,
"filter": {
"volume": "7.75"
},
}
project = KDEnliveProject(root)
project.append_clip('A1', long)
project.append_clip('A2', noise)
path = os.path.join(root, "sax-mix.kdenlive")
with open(path, 'w') as fd:
fd.write(project.to_xml())
project = KDEnliveProject(root)
project.append_clip('A1', reverb)
path = os.path.join(root, "sax-reverb-mix.kdenlive")
with open(path, 'w') as fd:
fd.write(project.to_xml())
cmds = []
cmds.append([
"melt", "sax-mix.kdenlive", '-quiet', '-consumer', 'avformat:sax-mix.wav'
])
cmds.append([
"melt", "sax-reverb-mix.kdenlive", '-quiet', '-consumer', 'avformat:sax-reverb-mix.wav'
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-f", "lavfi", "-i", "anullsrc=r=48000:cl=mono", "-t", "3600", "silence.wav"
])
for src, out1, out2 in (
('sax-reverb-mix.wav', "fl.wav", "fr.wav"),
("sax-mix.wav", "bl.wav", "br.wav"),
):
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", src,
"-filter_complex",
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
"-map", "[left]", out1,
"-map", "[right]", out2,
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", "fl.wav",
"-i", "fr.wav",
"-i", "silence.wav",
"-i", "silence.wav",
"-i", "bl.wav",
"-i", "br.wav",
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]",
"-ar", "48000",
"-c:a", "aac", "Saxophone-5.1.mp4"
])
for cmd in cmds:
print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd)

View file

@ -1,189 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from PIL import Image
from PIL import ImageDraw
import json
from optparse import OptionParser
import ox
from ox.image import drawText, wrapText
import sys
root_dir = os.path.normpath(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
static_root = os.path.join(os.path.dirname(__file__), 'data')
def render_poster(data, poster):
title = ox.decode_html(data.get('title', ''))
director = ox.decode_html(', '.join(data.get('director', [])))
if not director and "type" in data:
director = ", ".join(data["type"])
if director == "Voice Over" and data.get("batch"):
director = "%s (vo)" % (", ".join(data["batch"])).replace('Text-', '')
vo = True
else:
vo = False
year = str(data.get('year', ''))
series = data.get('isSeries', False)
oxdb_id = data['oxdbId']
imdb_id = data['id']
frame = data.get('frame')
timeline = data.get('timeline')
def get_oxdb_color(oxdb_id, series=False):
i = int(round((int(oxdb_id[2:10], 16) * 762 / pow(2, 32))))
type = data.get('type')
if type:
type = data.get('type')[0].lower()
if type == "original":
i = 0
elif type == "background":
i = 200
elif type == "foreground":
i = 400
elif type == "foreground2":
i = 500
elif type == "annimation":
i = 600
elif type == "voice over":
i = 300
if '1-' in director:
i = 300
if '2-' in director:
i = 320
if '3-' in director:
i = 340
if '4-' in director:
i = 360
if '5-' in director:
i = 380
if i < 127:
color = (127, i, 0)
elif i < 254:
color = (254 - i, 127, 0)
elif i < 381:
color = (0, 127, i - 254)
elif i < 508:
color = (0, 508 - i, 127)
elif i < 635:
color = (i - 508, 0, 127)
else:
color = (127, 0, 762 - i)
if series:
color = tuple(map(lambda x: x + 128, color))
return color
poster_width = 640
poster_height = 1024
poster_ratio = poster_width / poster_height
poster_image = Image.new('RGB', (poster_width, poster_height))
draw = ImageDraw.Draw(poster_image)
font_file = os.path.join(static_root, 'DejaVuSansCondensedBold.ttf')
font_size = {
'small': 28,
'large': 42,
}
# frame
frame_width = poster_width
frame_ratio = 4 / 3
frame_height = int(round(frame_width / frame_ratio))
if frame:
frame_image = Image.open(frame)
frame_image_ratio = frame_image.size[0] / frame_image.size[1]
if frame_ratio < frame_image_ratio:
frame_image = frame_image.resize((int(frame_height * frame_image_ratio), frame_height), Image.LANCZOS)
left = int((frame_image.size[0] - frame_width) / 2)
frame_image = frame_image.crop((left, 0, left + frame_width, frame_height))
else:
frame_image = frame_image.resize((frame_width, int(frame_width / frame_image_ratio)), Image.LANCZOS)
top = int((frame_image.size[1] - frame_height) / 2)
frame_image = frame_image.crop((0, top, frame_width, top + frame_height))
poster_image.paste(frame_image, (0, 0))
# timeline
timeline_width = poster_width
timeline_height = 64
if timeline:
timeline_image = Image.open(timeline)
timeline_image = timeline_image.resize((timeline_width, timeline_height), Image.LANCZOS)
poster_image.paste(timeline_image, (0, frame_height))
# text
text_width = poster_width
text_height = poster_height - frame_height - timeline_height
text_top = frame_height + timeline_height
text_bottom = text_top + text_height
text_margin = 16
text_color = get_oxdb_color(oxdb_id, series)
font_color = tuple(map(lambda x: x - 128 if series else x + 128, text_color))
draw.rectangle([(0, text_top), (text_width, text_bottom)], fill=text_color)
offset_top = text_top + text_margin
if vo:
vo_text = (" ".join(data["batch"])).replace("Text-", "")
drawText(poster_image, (0, 0), vo_text, font_file, 100, font_color)
if not director:
title_max_lines = 7
else:
title_max_lines = min(len(wrapText(title, text_width - 2 * text_margin, 0, font_file, font_size['large'])), 6)
director_max_lines = 9 - int((title_max_lines * 3 - 1) / 2)
if director:
lines = wrapText(director, text_width - 2 * text_margin, director_max_lines, font_file, font_size['small'])
for i, line in enumerate(lines):
size = drawText(poster_image, (text_margin, offset_top), line, font_file, font_size['small'], font_color)
offset_top += font_size['small'] + 2
offset_top += size[1] - font_size['small'] + text_margin / 2
lines = wrapText(title, text_width - 2 * text_margin, title_max_lines, font_file, font_size['large'])
for i, line in enumerate(lines):
size = drawText(poster_image, (text_margin, offset_top + 5), line, font_file, font_size['large'], font_color)
offset_top += font_size['large'] + 3
offset_top += size[1] - font_size['small'] + text_margin / 2
if year:
drawText(poster_image, (text_margin, offset_top), year, font_file, font_size['small'], font_color)
item_id = imdb_id or oxdb_id
drawText(poster_image, (text_margin, text_bottom - text_margin - font_size['large'] + 2), item_id, font_file, font_size['large'], font_color)
'''
# logo
logo_height = 32
logo_image = Image.open(os.path.join(static_root, '..', '..', 'static', 'png', 'logo.png'))
logo_width = int(round(logo_height * logo_image.size[0] / logo_image.size[1]))
logo_image = logo_image.resize((logo_width, logo_height), Image.LANCZOS)
logo_left = text_width - text_margin - logo_width
logo_top = text_bottom - text_margin - logo_height
for y in range(logo_height):
for x in range(logo_width):
poster_color = poster_image.getpixel((logo_left + x, logo_top + y))
logo_color = logo_image.getpixel((x, y))[0]
alpha = logo_image.getpixel((x, y))[3]
if series:
poster_color = tuple(map(lambda x: int(x - (logo_color - 16) * alpha / 255), poster_color))
else:
poster_color = tuple(map(lambda x: int(x + (logo_color - 16) * alpha / 255), poster_color))
poster_image.putpixel((logo_left + x, logo_top + y), poster_color)
i '''
poster_image.save(poster)
def main():
parser = OptionParser()
parser.add_option('-d', '--data', dest='data', help='json file with metadata', default=None)
parser.add_option('-p', '--poster', dest='poster', help='Poster (image file to be written)')
(options, args) = parser.parse_args()
if None in (options.data, options.poster):
parser.print_help()
sys.exit()
if options.data == '-':
data = json.load(sys.stdin)
else:
with open(options.data) as f:
data = json.load(f)
render_poster(data, options.poster)
if __name__ == "__main__":
main()

View file

@ -1,729 +0,0 @@
'use strict';
pandora.ui.infoView = function(data, isMixed) {
isMixed = isMixed || {};
var ui = pandora.user.ui,
descriptions = [],
isMultiple = arguments.length == 2,
canEdit = pandora.hasCapability('canEditMetadata') || isMultiple || data.editable,
canRemove = pandora.hasCapability('canRemoveItems'),
css = {
marginTop: '4px',
textAlign: 'justify'
},
html,
iconRatio = ui.icons == 'posters' ? data.posterRatio : 1,
iconSize = isMultiple ? 0 : ui.infoIconSize,
iconWidth = isMultiple ? 0 : iconRatio > 1 ? iconSize : Math.round(iconSize * iconRatio),
iconHeight = iconRatio < 1 ? iconSize : Math.round(iconSize / iconRatio),
iconLeft = isMultiple ? 0 : iconSize == 256 ? Math.floor((iconSize - iconWidth) / 2) : 0,
borderRadius = ui.icons == 'posters' ? 0 : iconSize / 8,
margin = 16,
nameKeys = pandora.site.itemKeys.filter(function(key) {
return key.sortType == 'person';
}).map(function(key) {
return key.id;
}),
listKeys = pandora.site.itemKeys.filter(function(key) {
return Ox.isArray(key.type);
}).map(function(key){
return key.id;
}),
specialListKeys = [].concat(
pandora.site.itemKeys.filter(function(key) {
return key.type[0] == 'date'
}).map(function(key) {
return key.id;
})
),
posterKeys = nameKeys.concat(['title', 'year']),
displayedKeys = [ // FIXME: can tis be a flag in the config?
'title', 'notes', 'name', 'summary', 'id',
'hue', 'saturation', 'lightness', 'cutsperminute', 'volume',
'user', 'rightslevel', 'bitrate', 'timesaccessed',
'numberoffiles', 'numberofannotations', 'numberofcuts', 'words', 'wordsperminute',
'duration', 'aspectratio', 'pixels', 'size', 'resolution',
'created', 'modified', 'accessed',
'random'
],
statisticsWidth = 128,
$bar = Ox.Bar({size: 16})
.bindEvent({
doubleclick: function(e) {
if ($(e.target).is('.OxBar')) {
$info.animate({scrollTop: 0}, 250);
}
}
}),
$options = Ox.MenuButton({
items: [
{
id: 'delete',
title: Ox._('Delete {0}...', [pandora.site.itemName.singular]),
disabled: !canRemove
}
],
style: 'square',
title: 'set',
tooltip: Ox._('Options'),
type: 'image',
})
.css({
float: 'left',
borderColor: 'rgba(0, 0, 0, 0)',
background: 'rgba(0, 0, 0, 0)'
})
.bindEvent({
click: function(data_) {
if (data_.id == 'delete') {
pandora.$ui.deleteItemsDialog = pandora.ui.deleteItemsDialog({
items: [data]
}).open();
}
}
})
.appendTo($bar),
$edit = Ox.MenuButton({
items: [
{
id: 'insert',
title: Ox._('Insert HTML...'),
disabled: true
}
],
style: 'square',
title: 'edit',
tooltip: Ox._('Edit'),
type: 'image',
})
.css({
float: 'right',
borderColor: 'rgba(0, 0, 0, 0)',
background: 'rgba(0, 0, 0, 0)'
})
.bindEvent({
click: function(data) {
// ...
}
})
.appendTo($bar),
$info = Ox.Element().css({overflowY: 'auto'}),
that = Ox.SplitPanel({
elements: [
{element: $bar, size: isMultiple ? 0 : 16},
{element: $info}
],
orientation: 'vertical'
});
if (!isMultiple) {
var $icon = Ox.Element({
element: '<img>',
})
.attr({
src: '/' + data.id + '/' + (
ui.icons == 'posters' ? 'poster' : 'icon'
) + '512.jpg?' + data.modified
})
.css({
position: 'absolute',
left: margin + iconLeft + 'px',
top: margin + 'px',
width: iconWidth + 'px',
height: iconHeight + 'px',
borderRadius: borderRadius + 'px',
cursor: 'pointer'
})
.bindEvent({
singleclick: toggleIconSize
})
.appendTo($info),
$reflection = $('<div>')
.addClass('OxReflection')
.css({
position: 'absolute',
left: margin + 'px',
top: margin + iconHeight + 'px',
width: iconSize + 'px',
height: iconSize / 2 + 'px',
overflow: 'hidden'
})
.appendTo($info),
$reflectionIcon = $('<img>')
.attr({
src: '/' + data.id + '/' + (
ui.icons == 'posters' ? 'poster' : 'icon'
) + '512.jpg?' + data.modified
})
.css({
position: 'absolute',
left: iconLeft + 'px',
width: iconWidth + 'px',
height: iconHeight + 'px',
borderRadius: borderRadius + 'px'
})
.appendTo($reflection),
$reflectionGradient = $('<div>')
.css({
position: 'absolute',
width: iconSize + 'px',
height: iconSize / 2 + 'px'
})
.appendTo($reflection);
}
var $text = Ox.Element()
.addClass('OxTextPage')
.css({
position: 'absolute',
left: margin + (iconSize == 256 ? 256 : iconWidth) + margin + 'px',
top: margin + 'px',
right: margin + statisticsWidth + margin + 'px',
})
.appendTo($info),
$statistics = $('<div>')
.css({
position: 'absolute',
width: statisticsWidth + 'px',
top: margin + 'px',
right: margin + 'px'
})
.appendTo($info);
[$options, $edit].forEach(function($element) {
$element.find('input').css({
borderWidth: 0,
borderRadius: 0,
padding: '3px'
});
});
listKeys.forEach(function(key) {
if (Ox.isString(data[key])) {
data[key] = [data[key]];
}
});
if (!canEdit) {
pandora.createLinks($info);
}
// Title -------------------------------------------------------------------
$('<div>')
.css({
marginTop: '-2px',
})
.append(
Ox.EditableContent({
editable: canEdit,
tooltip: canEdit ? pandora.getEditTooltip() : '',
placeholder: formatLight(Ox._( isMixed.title ? 'Mixed title' : 'Untitled')),
value: data.title || ''
})
.css({
marginBottom: '-3px',
fontWeight: 'bold',
fontSize: '13px'
})
.bindEvent({
submit: function(event) {
editMetadata('title', event.value);
}
})
)
.appendTo($text);
// Director, Year and Country, Language --------------------------------
renderGroup(['director', 'year']);
// Render any remaing keys defined in config
renderRemainingKeys();
renderCluster();
// Summary -----------------------------------------------------------------
if (canEdit || data.summary) {
$('<div>')
.append(
Ox.EditableContent({
clickLink: pandora.clickLink,
editable: canEdit,
format: function(value) {
return value.replace(
/<img src=/g,
'<img style="float: left; max-width: 256px; max-height: 256px; margin: 0 16px 16px 0" src='
);
},
maxHeight: Infinity,
placeholder: formatLight(Ox._( isMixed.summary ? 'Mixed Summary' : 'No Summary')),
tooltip: canEdit ? pandora.getEditTooltip() : '',
type: 'textarea',
value: data.summary || ''
})
.css(css)
.css({
marginTop: '12px',
overflow: 'hidden'
})
.bindEvent({
submit: function(event) {
editMetadata('summary', event.value);
}
})
)
.appendTo($text);
}
// Duration, Aspect Ratio --------------------------------------------------
if (!isMultiple) {
['duration', 'aspectratio'].forEach(function(key) {
var itemKey = Ox.getObjectById(pandora.site.itemKeys, key),
value = data[key] || 0;
$('<div>')
.css({marginBottom: '4px'})
.append(formatKey(itemKey.title, 'statistics'))
.append(
Ox.Theme.formatColor(null, 'gradient')
.css({textAlign: 'right'})
.html(
Ox['format' + Ox.toTitleCase(itemKey.format.type)]
.apply(null, [value].concat(itemKey.format.args))
)
)
.appendTo($statistics);
});
// Hue, Saturation, Lightness, Volume --------------------------------------
['hue', 'saturation', 'lightness', 'volume'].forEach(function(key) {
$('<div>')
.css({marginBottom: '4px'})
.append(formatKey(key, 'statistics'))
.append(
Ox.Theme.formatColor(
data[key] || 0, key == 'volume' ? 'lightness' : key
).css({textAlign: 'right'})
)
.appendTo($statistics);
});
// Cuts per Minute ---------------------------------------------------------
$('<div>')
.css({marginBottom: '4px'})
.append(formatKey('cuts per minute', 'statistics'))
.append(
Ox.Theme.formatColor(null, 'gradient')
.css({textAlign: 'right'})
.html(Ox.formatNumber(data['cutsperminute'] || 0, 3))
)
.appendTo($statistics);
}
// Rights Level ------------------------------------------------------------
var $rightsLevel = $('<div>');
$('<div>')
.css({marginBottom: '4px'})
.append(formatKey('Rights Level', 'statistics'))
.append($rightsLevel)
.appendTo($statistics);
pandora.renderRightsLevel(that, $rightsLevel, data, isMixed, isMultiple, canEdit);
// User and Groups ---------------------------------------------------------
if (!isMultiple || pandora.hasCapability('canEditUsers')) {
['user', 'groups'].forEach(function(key) {
if (key == 'groups' && isMultiple) {
return
};
var $input;
(canEdit || data[key] && data[key].length) && $('<div>')
.css({marginBottom: '4px'})
.append(formatKey(key, 'statistics'))
.append(
$('<div>')
.css({margin: '2px 0 0 -1px'}) // fixme: weird
.append(
$input = Ox.Editable({
placeholder: key == 'groups'
? formatLight(Ox._(isMixed[key] ? 'Mixed Groups' : 'No Groups'))
: isMixed[key] ? formatLight(Ox._('Mixed Users')) : '',
editable: key == 'user' && canEdit,
tooltip: canEdit ? pandora.getEditTooltip() : '',
value: isMixed[key]
? ''
: key == 'user' ? data[key] : data[key].join(', ')
})
.bindEvent(Ox.extend({
submit: function(event) {
editMetadata(key, event.value);
}
}, key == 'groups' ? {
doubleclick: canEdit ? function() {
setTimeout(function() {
if (window.getSelection) {
window.getSelection().removeAllRanges();
} else if (document.selection) {
document.selection.empty();
}
});
pandora.$ui.groupsDialog = pandora.ui.groupsDialog({
id: data.id,
name: data.title,
type: 'item'
})
.bindEvent({
groups: function(data) {
$input.options({
value: data.groups.join(', ')
});
}
})
.open();
} : function() {}
} : {}))
)
)
.appendTo($statistics);
});
}
// Created and Modified ----------------------------------------------------
if (!isMultiple && canEdit) {
['created', 'modified'].forEach(function(key) {
$('<div>')
.css({marginBottom: '4px'})
.append(formatKey(key, 'statistics'))
.append(
$('<div>').html(Ox.formatDate(data[key], '%B %e, %Y'))
)
.appendTo($statistics);
});
}
// Notes --------------------------------------------------------------------
if (canEdit) {
$('<div>')
.css({marginBottom: '4px'})
.append(
formatKey('Notes', 'statistics').options({
tooltip: Ox._('Only {0} can see and edit these comments', [
Object.keys(pandora.site.capabilities.canEditMetadata).map(function(level, i) {
return (
i == 0 ? ''
: i < Ox.len(pandora.site.capabilities.canEditMetadata) - 1 ? ', '
: ' ' + Ox._('and') + ' '
) + Ox.toTitleCase(level)
}).join('')])
})
)
.append(
Ox.EditableContent({
height: 128,
placeholder: formatLight(Ox._(isMixed.notes ? 'Mixed notes' : 'No notes')),
tooltip: pandora.getEditTooltip(),
type: 'textarea',
value: data.notes || '',
width: 128
})
.bindEvent({
submit: function(event) {
editMetadata('notes', event.value);
}
})
)
.appendTo($statistics);
}
$('<div>').css({height: '16px'}).appendTo($statistics);
function editMetadata(key, value) {
if (value != data[key]) {
var itemKey = Ox.getObjectById(pandora.site.itemKeys, key);
var edit = {id: isMultiple ? ui.listSelection : data.id};
if (key == 'title') {
edit[key] = value;
} else if (listKeys.indexOf(key) >= 0) {
edit[key] = value ? value.split(', ') : [];
} else if (specialListKeys.indexOf(key) > -1) {
edit[key] = value
? Ox.decodeHTMLEntities(value).split('; ').map(Ox.encodeHTMLEntities)
: [];
} else {
edit[key] = value ? value : null;
}
if (itemKey && itemKey.type && itemKey.type[0] == 'date') {
edit[key] = edit[key].map(pandora.cleanupDate);
}
pandora.api.edit(edit, function(result) {
if (!isMultiple) {
var src;
data[key] = result.data[key];
descriptions[key] && descriptions[key].options({
value: result.data[key + 'description']
});
Ox.Request.clearCache(); // fixme: too much? can change filter/list etc
if (result.data.id != data.id) {
pandora.UI.set({item: result.data.id});
pandora.$ui.browser.value(data.id, 'id', result.data.id);
}
pandora.updateItemContext();
pandora.$ui.browser.value(result.data.id, key, result.data[key]);
if (Ox.contains(posterKeys, key) && ui.icons == 'posters') {
src = pandora.getMediaURL('/' + data.id + '/poster512.jpg?' + Ox.uid());
$icon.attr({src: src});
$reflectionIcon.attr({src: src});
}
pandora.$ui.itemTitle
.options({
title: '<b>' + result.data.title
+ (Ox.len(result.data.director)
? ' (' + result.data.director.join(', ') + ')'
: '')
+ (result.data.year ? ' ' + result.data.year : '') + '</b>'
});
}
that.triggerEvent('change', Ox.extend({}, key, value));
});
}
}
function formatKey(key, mode) {
var item = Ox.getObjectById(pandora.site.itemKeys, key);
key = Ox._(item ? item.title : key);
mode = mode || 'text';
return mode == 'text'
? '<span style="font-weight: bold">' + Ox.toTitleCase(key) + ':</span> '
: mode == 'description'
? Ox.toTitleCase(key)
: Ox.Element()
.css({marginBottom: '4px', fontWeight: 'bold'})
.html(Ox.toTitleCase(key)
.replace(' Per ', ' per '));
}
function formatLight(str) {
return '<span class="OxLight">' + str + '</span>';
}
function formatLink(value, key, linkValue) {
linkValue = linkValue || value
linkValue = Ox.isArray(linkValue) ? linkValue: [linkValue]
return (Ox.isArray(value) ? value : [value]).map(function(value, idx) {
return key
? '<a href="/' + (
key == 'alternativeTitles' ? 'title' : key
) + '=' + pandora.escapeQueryValue(linkValue[idx]) + '">' + value + '</a>'
: value;
}).join(Ox.contains(specialListKeys, key) ? '; ' : ', ');
}
function formatValue(key, value) {
var ret;
if (nameKeys.indexOf(key) > -1) {
ret = formatLink(value.split(', '), 'name');
} else if (
listKeys.indexOf(key) > -1 && Ox.getObjectById(pandora.site.itemKeys, key).type[0] == 'date'
) {
ret = value.split('; ').map(function(date) {
date = pandora.cleanupDate(date)
return date ? formatLink(Ox.formatDate(date,
['', '%Y', '%B %Y', '%B %e, %Y'][date.split('-').length],
true
), key, date) : '';
}).join('; ');
} else if (listKeys.indexOf(key) > -1) {
ret = formatLink(value.split(', '), key);
} else if (specialListKeys.indexOf(key) > -1) {
ret = formatLink(
Ox.decodeHTMLEntities(value).split('; ').map(Ox.encodeHTMLEntities),
key
);
} else if (['year', 'country'].indexOf(key) > -1) {
ret = formatLink(value, key);
} else {
ret = value;
}
return ret;
}
function getValue(key, value) {
return !value ? ''
: Ox.contains(specialListKeys, key) ? value.join('; ')
: Ox.contains(listKeys, key) ? value.join(', ')
: value;
}
function renderGroup(keys) {
var $element;
keys.forEach(function(key) { displayedKeys.push(key) });
if (canEdit || keys.filter(function(key) {
return data[key];
}).length) {
$element = $('<div>').addClass('OxSelectable').css(css);
keys.forEach(function(key, i) {
if (canEdit || data[key]) {
if ($element.children().length) {
$('<span>').html('; ').appendTo($element);
}
$('<span>').html(formatKey(key)).appendTo($element);
Ox.EditableContent({
clickLink: pandora.clickLink,
editable: canEdit,
format: function(value) {
return formatValue(key, value);
},
placeholder: formatLight(Ox._(isMixed[key] ? 'mixed' : 'unknown')),
tooltip: canEdit ? pandora.getEditTooltip() : '',
value: getValue(key, data[key])
})
.bindEvent({
submit: function(data) {
editMetadata(key, data.value);
}
})
.appendTo($element);
if (isMixed[key] && Ox.contains(listKeys, key)) {
pandora.ui.addRemoveKeyDialog({
ids: ui.listSelection,
key: key,
section: ui.section
}).appendTo($element)
}
}
});
$element.appendTo($text);
}
return $element;
}
function renderRemainingKeys() {
var keys = pandora.site.itemKeys.filter(function(item) {
return item.id != '*' && item.type != 'layer' && !Ox.contains(displayedKeys, item.id);
}).map(function(item) {
return item.id;
});
if (keys.length) {
renderGroup(keys)
}
}
function renderCluster() {
if (isMultiple) {
return
}
var $element = $('<div>').addClass('OxSelectable').html("<b>Related Videos:</b>");
var title = data.title.replace('_bg', '').replace('_fg', '')
var request = {
query: {
conditions: [
{key: 'title', value: title}
],
operator: '&'
},
keys: ['title', 'type', 'id', 'batch'],
range: [0, 10]
};
$element.appendTo($text);
pandora.api.find(request, function(response) {
response.data.items.forEach(item => {
if (item.id != data.id) {
var type = item.type ? item.type[0] : 'Unknown'
if (type == 'Voice Over' && item.batch) {
type = item.batch
}
$element.append(
` <a href="/${item.id}/info">${type}</a>`
)
}
})
$element.append(`[<a href="/grid/title/title=${pandora.escapeQueryValue(title)}">all</a>]`)
pandora.createLinks($element)
})
}
function toggleIconSize() {
iconSize = iconSize == 256 ? 512 : 256;
iconWidth = iconRatio > 1 ? iconSize : Math.round(iconSize * iconRatio);
iconHeight = iconRatio < 1 ? iconSize : Math.round(iconSize / iconRatio);
iconLeft = iconSize == 256 ? Math.floor((iconSize - iconWidth) / 2) : 0,
borderRadius = ui.icons == 'posters' ? 0 : iconSize / 8;
$icon.animate({
left: margin + iconLeft + 'px',
width: iconWidth + 'px',
height: iconHeight + 'px',
borderRadius: borderRadius + 'px'
}, 250);
$reflection.animate({
top: margin + iconHeight + 'px',
width: iconSize + 'px',
height: iconSize / 2 + 'px'
}, 250);
$reflectionIcon.animate({
left: iconLeft + 'px',
width: iconWidth + 'px',
height: iconHeight + 'px',
borderRadius: borderRadius + 'px'
}, 250);
$reflectionGradient.animate({
width: iconSize + 'px',
height: iconSize / 2 + 'px'
}, 250);
$text.animate({
left: margin + (iconSize == 256 ? 256 : iconWidth) + margin + 'px',
}, 250);
pandora.UI.set({infoIconSize: iconSize});
}
that.resizeElement = function() {
// overwrite splitpanel resize
};
that.reload = function() {
var src = src = '/' + data.id + '/' + (
ui.icons == 'posters' ? 'poster' : 'icon'
) + '512.jpg?' + Ox.uid();
$icon.attr({src: src});
$reflectionIcon.attr({src: src});
iconSize = iconSize == 256 ? 512 : 256;
iconRatio = ui.icons == 'posters'
? (ui.showSitePosters ? pandora.site.posters.ratio : data.posterRatio) : 1;
toggleIconSize();
};
that.bindEvent({
mousedown: function() {
setTimeout(function() {
!Ox.Focus.focusedElementIsInput() && that.gainFocus();
});
},
pandora_icons: that.reload,
pandora_showsiteposters: function() {
ui.icons == 'posters' && that.reload();
}
});
return that;
};

BIN
title.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.1 KiB

View file

@ -1,32 +0,0 @@
def upgrade_originals():
import item.models
import itemlist.models
nt = itemlist.models.List.objects.get(name='No Type')
no = itemlist.models.List.objects.get(name='New Originals')
for i in nt.get_items(nt.user):
orig = item.models.Item.objects.get(data__title=i.get('title'), data__type=["Original"])
print(i, orig)
orig.files.all().update(selected=False)
i.files.all().update(item=orig)
orig.save()
orig.remove_poster()
orig.make_poster()
i.data['type'] = ['Empty']
i.save()
no.items.add(orig)
def remove_deselected_files():
import itemlist.models
il = itemlist.models.List.objects.get(name='New Originals')
for i in il.items.all():
changed = False
for f in i.files.filter(selected=False):
f.data.delete()
f.delete()
changed = True
if changed:
i.save()