Compare commits

..

No commits in common. "main" and "infinity" have entirely different histories.

18 changed files with 265 additions and 1083 deletions

View file

@ -63,7 +63,6 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"canExportAnnotations": {"member": true, "staff": true, "admin": true}, "canExportAnnotations": {"member": true, "staff": true, "admin": true},
"canImportAnnotations": {"member": true, "staff": true, "admin": true}, "canImportAnnotations": {"member": true, "staff": true, "admin": true},
"canImportItems": {"member": true, "staff": true, "admin": true}, "canImportItems": {"member": true, "staff": true, "admin": true},
"canTranscribeAudio": {},
"canManageDocuments": {"member": true, "staff": true, "admin": true}, "canManageDocuments": {"member": true, "staff": true, "admin": true},
"canManageEntities": {"member": true, "staff": true, "admin": true}, "canManageEntities": {"member": true, "staff": true, "admin": true},
"canManageHome": {"staff": true, "admin": true}, "canManageHome": {"staff": true, "admin": true},
@ -1015,7 +1014,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
the system (from). the system (from).
*/ */
"site": { "site": {
"description": "T for Time - pan.do/ra", "description": "This is a demo of pan.do/ra - a free, open source media archive. It allows you to manage large, decentralized collections of video, to collaboratively create metadata and time-based annotations, and to serve your archive as a cutting-edge web application.",
"email": { "email": {
// E-mail address in contact form (to) // E-mail address in contact form (to)
"contact": "system@time.0x2620.org", "contact": "system@time.0x2620.org",

View file

@ -9,7 +9,7 @@ User=pandora
Group=pandora Group=pandora
Nice=19 Nice=19
WorkingDirectory=/srv/pandora/pandora WorkingDirectory=/srv/pandora/pandora
ExecStart=/srv/pandora/pandora/manage.py infinity --config /etc/infinity.json ExecStart=/srv/pandora/pandora/manage.py infinity
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target

View file

@ -100,7 +100,7 @@ if os.path.exists('__init__.py'):
local_settings += '\nLOCAL_APPS = ["%s"]\n' % name local_settings += '\nLOCAL_APPS = ["%s"]\n' % name
local_settings_changed = True local_settings_changed = True
else: else:
apps = re.compile(r'(LOCAL_APPS.*?)\]', re.DOTALL).findall(local_settings)[0] apps = re.compile('(LOCAL_APPS.*?)\]', re.DOTALL).findall(local_settings)[0]
if name not in apps: if name not in apps:
new_apps = apps.strip() + ',\n"%s"\n' % name new_apps = apps.strip() + ',\n"%s"\n' % name
local_settings = local_settings.replace(apps, new_apps) local_settings = local_settings.replace(apps, new_apps)

View file

@ -2,43 +2,24 @@ import json
import os import os
import subprocess import subprocess
import ox
from django.core.management.base import BaseCommand from django.core.management.base import BaseCommand
from django.conf import settings from django.conf import settings
from ...render import add_translations
class Command(BaseCommand): class Command(BaseCommand):
help = 'export all subtitles for translations' help = 'export all subtitles for translations'
def add_arguments(self, parser): def add_arguments(self, parser):
parser.add_argument('--lang', action='store', dest='lang', default=None, help='subtitle language') pass
def handle(self, **options): def handle(self, **options):
import annotation.models import annotation.models
import item.models import item.models
lang = options["lang"]
if lang:
lang = lang.split(',')
tlang = lang[1:]
lang = lang[0]
else:
tlang = None
if lang == "en":
lang = None
for i in item.models.Item.objects.filter(data__type__contains='Voice Over').order_by('sort__title'): for i in item.models.Item.objects.filter(data__type__contains='Voice Over').order_by('sort__title'):
print("## %s %s" % (i.get("title"), i.public_id)) print("## %s %s" % (i.get("title"), i.public_id))
for sub in i.annotations.all().filter(layer='subtitles').exclude(value='').filter(languages=lang).order_by("start"): for sub in i.annotations.all().filter(layer='subtitles').exclude(value='').order_by("start"):
if tlang: if not sub.languages:
value = add_translations(sub, tlang) print(sub.value.strip() + "\n")
value = ox.strip_tags(value)
else:
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if sub.languages:
value = ox.strip_tags(value)
print(value.strip() + "\n")
print("\n\n\n") print("\n\n\n")

View file

@ -1,6 +1,27 @@
from django.core.management.base import BaseCommand import json
import os
import re
from collections import defaultdict
from ...render import generate_clips from django.core.management.base import BaseCommand
from django.conf import settings
import item.models
import itemlist.models
from ...render import get_srt
def resolve_roman(s):
extra = re.compile('^\d+(.*?)$').findall(s)
if extra:
extra = extra[0].lower()
new = {
'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5',
'vi': '6', 'vii': 7, 'viii': '8', 'ix': '9', 'x': '10'
}.get(extra, extra)
return s.replace(extra, new)
return s
class Command(BaseCommand): class Command(BaseCommand):
@ -10,4 +31,84 @@ class Command(BaseCommand):
parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in') parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in')
def handle(self, **options): def handle(self, **options):
return generate_clips(options) prefix = options['prefix']
clips = []
for i in item.models.Item.objects.filter(sort__type='original'):
qs = item.models.Item.objects.filter(data__title=i.data['title']).exclude(id=i.id)
if qs.count() >= 1:
clip = {}
durations = []
for e in item.models.Item.objects.filter(data__title=i.data['title']):
if 'type' not in e.data:
print("ignoring invalid video %s (no type)" % e)
continue
if not e.files.filter(selected=True).exists():
continue
source = e.files.filter(selected=True)[0].data.path
ext = os.path.splitext(source)[1]
type_ = e.data['type'][0].lower()
target = os.path.join(prefix, type_, i.data['title'] + ext)
os.makedirs(os.path.dirname(target), exist_ok=True)
if os.path.islink(target):
os.unlink(target)
os.symlink(source, target)
clip[type_] = target
durations.append(e.files.filter(selected=True)[0].duration)
clip["duration"] = min(durations)
if not clip["duration"]:
print('!!', durations, clip)
continue
clip['tags'] = i.data.get('tags', [])
clip['editingtags'] = i.data.get('editingtags', [])
name = os.path.basename(clip['original'])
seqid = re.sub("Hotel Aporia_(\d+)", "S\\1_", name)
seqid = re.sub("Night March_(\d+)", "S\\1_", seqid)
seqid = re.sub("_(\d+)H_(\d+)", "_S\\1\\2_", seqid)
seqid = seqid.split('_')[:2]
seqid = [b[1:] if b[0] in ('B', 'S') else '0' for b in seqid]
seqid[1] = resolve_roman(seqid[1])
seqid[1] = ''.join([b for b in seqid[1] if b.isdigit()])
if not seqid[1]:
seqid[1] = '0'
try:
clip['seqid'] = int(''.join(['%06d' % int(b) for b in seqid]))
except:
print(name, seqid, 'failed')
raise
if "original" in clip and "foreground" in clip and "background" in clip:
clips.append(clip)
elif "original" in clip and "animation" in clip:
clips.append(clip)
else:
print("ignoring incomplete video", i)
with open(os.path.join(prefix, 'clips.json'), 'w') as fd:
json.dump(clips, fd, indent=2, ensure_ascii=False)
print("using", len(clips), "clips")
voice_over = defaultdict(dict)
for vo in item.models.Item.objects.filter(
data__type__contains="Voice Over",
):
fragment_id = int(vo.get('title').split('_')[0])
source = vo.files.filter(selected=True)[0]
batch = vo.get('batch')[0].replace('Text-', '')
src = source.data.path
target = os.path.join(prefix, 'voice_over', batch, '%s.wav' % fragment_id)
os.makedirs(os.path.dirname(target), exist_ok=True)
if os.path.islink(target):
os.unlink(target)
os.symlink(src, target)
subs = []
for sub in vo.annotations.filter(layer="subtitles").exclude(value="").order_by("start"):
sdata = get_srt(sub)
subs.append(sdata)
voice_over[fragment_id][batch] = {
"src": target,
"duration": source.duration,
"subs": subs
}
with open(os.path.join(prefix, 'voice_over.json'), 'w') as fd:
json.dump(voice_over, fd, indent=2, ensure_ascii=False)

View file

@ -1,109 +0,0 @@
import json
import os
import subprocess
import ox
from django.core.management.base import BaseCommand
from django.conf import settings
from item.models import Item
from annotation.models import Annotation
class Command(BaseCommand):
help = 'export all subtitles for translations'
def add_arguments(self, parser):
parser.add_argument('--lang', action='store', dest='lang', default=None, help='subtitle language')
parser.add_argument('--test', action='store_true', dest='test', default=False, help='test run')
parser.add_argument('args', metavar='args', type=str, nargs='*', help='file or url')
def handle(self, filename, **options):
if not options["lang"]:
print("--lang is required")
return
lang = options["lang"]
if filename.startswith("http"):
data = ox.net.read_url(filename).decode()
else:
with open(filename) as fd:
data = fd.read()
data = ('\n' + data.strip()).split('\n## ')[1:]
invalid = []
valid = []
for block in data:
title, block = block.split('\n', 1)
block = block.strip()
title = title.strip()
item_id = title.split(' ')[-1]
item = Item.objects.get(public_id=item_id)
subtitles_en = item.annotations.filter(layer="subtitles", languages=None).exclude(value='')
lines = block.split('\n\n')
if len(lines) != subtitles_en.count():
print('%s: number of subtitles does not match, en: %s vs %s: %s' % (title, subtitles_en.count(), lang, len(lines)))
if options["test"]:
print(json.dumps(lines, indent=2, ensure_ascii=False))
print(json.dumps([s.value for s in subtitles_en.order_by('start')], indent=2, ensure_ascii=False))
continue
if options["test"]:
print('%s: valid %s subtitles' % (title, len(lines)))
else:
n = 0
item.annotations.filter(layer="subtitles", languages=lang).delete()
for sub_en in subtitles_en.order_by('start'):
sub = Annotation()
sub.item = sub_en.item
sub.user = sub_en.user
sub.layer = sub_en.layer
sub.start = sub_en.start
sub.end = sub_en.end
sub.value = '<span lang="%s">%s</span>' % (lang, lines[n])
sub.save()
n += 1
'''
srt = 'vocals_txt/%s/%s' % (title[0], title.replace('.wav', '.srt'))
filename = 'vocals_txt/%s/%s' % (title[0], title.replace('.wav', '.' + lang + '.srt'))
folder = os.path.dirname(filename)
if not os.path.exists(folder):
os.makedirs(folder)
data = json.load(open(srt + '.json'))
subs = block.replace('\n\n', '\n').split('\n')
if len(data) != len(subs):
print('invalid', title, 'expected', len(data), 'got', len(subs))
invalid.append('## %s\n\n%s' % (title, block))
valid.append('## %s\n\n%s' % (title, '\n\n'.join([d['value'] for d in data])))
continue
for i, sub in enumerate(data):
sub['value'] = subs[i]
kodata = ox.srt.encode(data)
current = None
if os.path.exists(filename):
with open(filename, 'rb') as fd:
current = fd.read()
if current != kodata:
print('update', title, filename)
with open(filename, 'wb') as fd:
fd.write(kodata)
with open(filename + '.json', 'w') as fd:
ko = [{
'in': s['in'],
'out': s['out'],
'value': s['value'],
} for s in data]
json.dump(ko, fd, ensure_ascii=False, indent=4)
if invalid:
with open('invalid_%s_subtitles.txt' % lang, 'w') as fd:
fd.write('\n\n\n\n'.join(invalid))
with open('invalid_%s_subtitles_en.txt' % lang, 'w') as fd:
fd.write('\n\n\n\n'.join(valid))
'''

View file

@ -13,19 +13,7 @@ class Command(BaseCommand):
def add_arguments(self, parser): def add_arguments(self, parser):
parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in') parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in')
parser.add_argument('--config', action='store', dest='config', default=None, help='config')
parser.add_argument('--duration', action='store', dest='duration', default="3600", help='target duration of all fragments in seconds') parser.add_argument('--duration', action='store', dest='duration', default="3600", help='target duration of all fragments in seconds')
parser.add_argument('--single-file', action='store_true', dest='single_file', default=False, help='render to single video')
parser.add_argument('--keep-audio', action='store_true', dest='keep_audio', default=False, help='keep independent audio tracks')
parser.add_argument('--stereo-downmix', action='store_true', dest='stereo_downmix', default=False, help='stereo downmix')
parser.add_argument('--debug', action='store_true', dest='debug', default=False, help='output more info')
def handle(self, **options): def handle(self, **options):
if options.get("config"):
if os.path.exists(options["config"]):
with open(options["config"]) as fd:
config = json.load(fd)
options.update(config)
else:
print("unable to load config %s" % options["config"])
render_infinity(options) render_infinity(options)

View file

@ -16,10 +16,6 @@ class Command(BaseCommand):
parser.add_argument('--duration', action='store', dest='duration', default="3600", help='target duration of all fragments in seconds') parser.add_argument('--duration', action='store', dest='duration', default="3600", help='target duration of all fragments in seconds')
parser.add_argument('--offset', action='store', dest='offset', default="1024", help='inital offset in pi') parser.add_argument('--offset', action='store', dest='offset', default="1024", help='inital offset in pi')
parser.add_argument('--no-video', action='store_true', dest='no_video', default=False, help='don\'t render video') parser.add_argument('--no-video', action='store_true', dest='no_video', default=False, help='don\'t render video')
parser.add_argument('--single-file', action='store_true', dest='single_file', default=False, help='render to single video')
parser.add_argument('--keep-audio', action='store_true', dest='keep_audio', default=False, help='keep independent audio tracks')
parser.add_argument('--stereo-downmix', action='store_true', dest='stereo_downmix', default=False, help='stereo downmix')
parser.add_argument('--debug', action='store_true', dest='debug', default=False, help='output more info')
def handle(self, **options): def handle(self, **options):
render_all(options) render_all(options)

View file

@ -13,7 +13,8 @@ class Command(BaseCommand):
def add_arguments(self, parser): def add_arguments(self, parser):
parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in') parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in')
parser.add_argument('--offset', action='store', dest='offset', default=None, help='inital offset in pi') parser.add_argument('--duration', action='store', dest='duration', default="3600", help='target duration of all fragments in seconds')
parser.add_argument('--offset', action='store', dest='offset', default="1024", help='inital offset in pi')
def handle(self, **options): def handle(self, **options):
update_subtitles(options) update_subtitles(options)

View file

@ -7,7 +7,7 @@ Wants=network-online.target
Type=simple Type=simple
Restart=on-failure Restart=on-failure
KillSignal=SIGINT KillSignal=SIGINT
ExecStart=/srv/pandora/t_for_time/player/player.py --mode peer --playlist /srv/t_for_time/render/back.m3u --config /srv/t_for_time/render/back.json ExecStart=/srv/pandora/t_for_time/player/player.py --mode peer --playlist /srv/t_for_time/render/back.m3u
[Install] [Install]
WantedBy=graphical-session.target WantedBy=graphical-session.target

View file

@ -6,7 +6,7 @@ After=gnome-session.target network-online.target
Type=simple Type=simple
Restart=on-failure Restart=on-failure
KillSignal=SIGINT KillSignal=SIGINT
ExecStart=/srv/pandora/t_for_time/player/player.py --mode main --playlist /srv/t_for_time/render/front.m3u --config /srv/t_for_time/render/front.json ExecStart=/srv/pandora/t_for_time/player/player.py --mode main --playlist /srv/t_for_time/render/front.m3u
[Install] [Install]
WantedBy=graphical-session.target WantedBy=graphical-session.target

View file

@ -8,7 +8,6 @@ import time
from threading import Thread from threading import Thread
from datetime import datetime from datetime import datetime
import ox
import mpv import mpv
@ -20,17 +19,10 @@ SYNC_GRACE_TIME = 5
SYNC_JUMP_AHEAD = 1 SYNC_JUMP_AHEAD = 1
PORT = 9067 PORT = 9067
DEBUG = False DEBUG = False
FONT = 'Menlo'
CONFIG = { FONT_SIZE = 30
"font": "Menlo", FONT_BORDER = 4
"font_size": 30, SUB_MARGIN = 2 * 36 + 6
"font_border": 4,
"sub_border_color": "0.0/0.0/0.0/0.75",
"sub_margin": 2 * 36 + 6,
"sub_spacing": 0,
"vf": None,
"sync_group": None,
}
def hide_gnome_overview(): def hide_gnome_overview():
@ -52,7 +44,6 @@ class Main:
class Sync(Thread): class Sync(Thread):
active = True active = True
is_main = True is_main = True
is_paused = False
ready = False ready = False
destination = "255.255.255.255" destination = "255.255.255.255"
reload_check = None reload_check = None
@ -62,52 +53,32 @@ class Sync(Thread):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self.is_main = kwargs.get('mode', 'main') == 'main' self.is_main = kwargs.get('mode', 'main') == 'main'
self.start_at_hour = kwargs.get("hour", False)
self.sock = self.init_socket() self.sock = self.init_socket()
self.main = Main() self.main = Main()
if self.is_main: if self.is_main:
self.socket_enable_broadcast() self.socket_enable_broadcast()
if kwargs.get("sax"):
self.sax = mpv.MPV(
log_handler=mpv_log, input_default_bindings=True,
input_vo_keyboard=True,
)
self.sax.loop_file = True
self.sax.play("/srv/t_for_time/render/Saxophone-5.1.mp4")
else:
self.sax = None
if mpv.MPV_VERSION >= (2, 2): if mpv.MPV_VERSION >= (2, 2):
self.mpv = mpv.MPV( self.mpv = mpv.MPV(
log_handler=mpv_log, input_default_bindings=True, log_handler=mpv_log, input_default_bindings=True,
input_vo_keyboard=True, input_vo_keyboard=True,
sub_font_size=CONFIG["font_size"], sub_font=CONFIG["font"], sub_font_size=FONT_SIZE, sub_font=FONT,
sub_border_size=CONFIG["font_border"], sub_border_size=FONT_BORDER,
sub_border_color=CONFIG["sub_border_color"], sub_margin_y=SUB_MARGIN,
sub_margin_y=CONFIG["sub_margin"],
sub_ass_line_spacing=CONFIG["sub_spacing"],
) )
else: else:
self.mpv = mpv.MPV( self.mpv = mpv.MPV(
log_handler=mpv_log, input_default_bindings=True, log_handler=mpv_log, input_default_bindings=True,
input_vo_keyboard=True, input_vo_keyboard=True,
sub_text_font_size=CONFIG["font_size"], sub_text_font=CONFIG["font"], sub_text_font_size=FONT_SIZE, sub_text_font=FONT,
sub_border_size=CONFIG["font_border"], sub_border_size=FONT_BORDER,
sub_border_color=CONFIG["sub_border_color"], sub_margin_y=SUB_MARGIN,
sub_margin_y=CONFIG["sub_margin"],
sub_ass_line_spacing=CONFIG["sub_spacing"],
) )
if CONFIG.get("vf"):
self.mpv.vf = CONFIG["vf"]
self.mpv.observe_property('time-pos', self.time_pos_cb) self.mpv.observe_property('time-pos', self.time_pos_cb)
self.mpv.fullscreen = kwargs.get('fullscreen', False) self.mpv.fullscreen = kwargs.get('fullscreen', False)
self.mpv.loop_file = False self.mpv.loop_file = False
self.mpv.loop_playlist = True self.mpv.loop_playlist = True
self.mpv.register_key_binding('q', self.q_binding) self.mpv.register_key_binding('q', self.q_binding)
self.mpv.register_key_binding('s', self.s_binding)
self.mpv.register_key_binding('p', self.p_binding)
self.mpv.register_key_binding('SPACE', self.space_binding)
self.playlist = kwargs['playlist'] self.playlist = kwargs['playlist']
self.playlist_mtime = os.stat(self.playlist).st_mtime self.playlist_mtime = os.stat(self.playlist).st_mtime
self.mpv.loadlist(self.playlist) self.mpv.loadlist(self.playlist)
@ -119,31 +90,6 @@ class Sync(Thread):
time.sleep(0.1) time.sleep(0.1)
self.mpv.pause = True self.mpv.pause = True
self.sync_to_main() self.sync_to_main()
elif self.start_at_hour:
self.mpv.pause = True
fmt = '%Y-%m-%d %H'
now = datetime.now()
offset = (now - datetime.strptime(now.strftime(fmt), fmt)).total_seconds()
if self.sax:
self.sax.wait_until_playing()
self.sax.seek(offset, 'absolute', 'exact')
self.sax.pause = True
position = 0
for idx, item in enumerate(self.mpv.playlist):
duration = ox.avinfo(item['filename'])['duration']
if position + duration > offset:
pos = offset - position
self.mpv.playlist_play_index(idx)
self.mpv.pause = False
self.mpv.wait_until_playing()
self.mpv.seek(pos, 'absolute', 'exact')
time.sleep(0.1)
break
else:
position += duration
if self.sax:
self.sax.pause = False
self.ready = True self.ready = True
Thread.__init__(self) Thread.__init__(self)
self.start() self.start()
@ -160,45 +106,15 @@ class Sync(Thread):
else: else:
self.read_position_main() self.read_position_main()
self.reload_playlist() self.reload_playlist()
if not self.is_paused and self._tick and abs(time.time() - self._tick) > 60: if self._tick and abs(time.time() - self._tick) > 60:
logger.error("player is stuck") logger.error("player is stuck")
self._tick = 0 self._tick = 0
self.stop() self.stop()
self.mpv.stop() self.mpv.stop()
def is_keydown(self, args):
if args and args[0] and args[0][0] == 'd':
return True
return False
def q_binding(self, *args): def q_binding(self, *args):
if self.is_keydown(args): self.stop()
self.stop() self.mpv.stop()
self.mpv.stop()
def space_binding(self, *args):
if self.is_keydown(args):
if self.mpv.pause:
self.p_binding(*args)
else:
self.s_binding(*args)
def s_binding(self, *args):
if self.is_keydown(args):
self.is_paused = True
self.mpv.pause = True
if self.sax:
self.sax.pause = True
self.send_playback_state()
def p_binding(self, *args):
if self.is_keydown(args):
self.is_paused = False
self._tick = 0
self.mpv.pause = False
if self.sax:
self.sax.pause = False
self.send_playback_state()
def stop(self, *args): def stop(self, *args):
self.active = False self.active = False
@ -279,8 +195,6 @@ class Sync(Thread):
"%0.4f %s" "%0.4f %s"
% (self.mpv.time_pos, self.mpv.playlist_current_pos) % (self.mpv.time_pos, self.mpv.playlist_current_pos)
).encode() ).encode()
if CONFIG.get("sync_group"):
msg = ("%s " % CONFIG["sync_group"]).encode() + msg
except: except:
return return
try: try:
@ -288,47 +202,18 @@ class Sync(Thread):
except socket.error as e: except socket.error as e:
logger.error("send failed: %s", e) logger.error("send failed: %s", e)
def send_playback_state(self):
state = 'pause' if self.mpv.pause else 'play'
msg = ("%s -1" % state).encode()
try:
self.sock.send(msg)
except socket.error as e:
logger.error("send failed: %s", e)
# #
# follower specific # follower specific
# #
_last_ping = None
def read_position_main(self): def read_position_main(self):
self.sock.settimeout(5) self.sock.settimeout(5)
while True: try:
try: data = self.sock.recvfrom(1024)[0].decode().split(" ", 1)
data = self.sock.recvfrom(1024)[0].decode().split(" ", 1) except socket.timeout:
except socket.timeout: logger.error("failed to receive data from main")
if self._last_ping != "pause": except OSError:
logger.error("failed to receive data from main") logger.error("socket closed")
return
except OSError:
logger.error("socket closed")
return
if CONFIG.get("sync_group"):
if data[0] == str(CONFIG["sync_group"]):
data = data[1].split(" ", 1)
break
else:
break
self._last_ping = data[0]
if data[0] == "pause":
self.is_paused = True
self.mpv.pause = True
elif data[0] == "play":
self.is_paused = False
self._tick = 0
self.mpv.pause = False
else: else:
self.main.time_pos = float(data[0]) self.main.time_pos = float(data[0])
self.main.playlist_current_pos = int(data[1]) self.main.playlist_current_pos = int(data[1])
@ -417,26 +302,16 @@ def main():
parser.add_argument('--prefix', help='video location', default=prefix) parser.add_argument('--prefix', help='video location', default=prefix)
parser.add_argument('--window', action='store_true', help='run in window', default=False) parser.add_argument('--window', action='store_true', help='run in window', default=False)
parser.add_argument('--debug', action='store_true', help='debug', default=False) parser.add_argument('--debug', action='store_true', help='debug', default=False)
parser.add_argument('--hour', action='store_true', help='hour', default=False)
parser.add_argument('--sax', action='store_true', help='hour', default=False)
parser.add_argument('--config', help='config', default=None)
args = parser.parse_args() args = parser.parse_args()
DEBUG = args.debug DEBUG = args.debug
if DEBUG: if DEBUG:
log_format = '%(asctime)s:%(levelname)s:%(name)s:%(message)s' log_format = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_format) logging.basicConfig(level=logging.DEBUG, format=log_format)
if args.config:
if os.path.exists(args.config):
with open(args.config) as fd:
CONFIG.update(json.load(fd))
else:
logger.error("config file %s does not exist, skipping", args.config)
base = os.path.dirname(os.path.abspath(__file__)) base = os.path.dirname(os.path.abspath(__file__))
#os.chdir(base) #os.chdir(base)
player = Sync(mode=args.mode, playlist=args.playlist, fullscreen=not args.window, hour=args.hour, sax=args.sax) player = Sync(mode=args.mode, playlist=args.playlist, fullscreen=not args.window)
while player.active: while player.active:
try: try:
player.mpv.wait_for_playback() player.mpv.wait_for_playback()

661
render.py
View file

@ -11,11 +11,42 @@ import time
from pathlib import Path from pathlib import Path
import ox import ox
import lxml.etree
from .pi import random from .pi import random
from .render_kdenlive import KDEnliveProject, _CACHE, melt_xml, get_melt from .render_kdenlive import KDEnliveProject, _CACHE
from .render_utils import *
def random_int(seq, length):
n = n_ = length - 1
#print('len', n)
if n == 0:
return n
r = seq() / 9 * 10
base = 10
while n > 10:
n /= 10
r += seq() / 9 * 10
base += 10
r = int(round(n_ * r / base))
return r
def random_choice(seq, items, pop=False):
n = random_int(seq, len(items))
if pop:
return items.pop(n)
return items[n]
def chance(seq, chance):
return (seq() / 10) < chance
def get_clip_by_seqid(clips, seqid):
selected = None
for i, clip in enumerate(clips):
if clip['seqid'] == seqid:
selected = i
break
if selected is not None:
return clips.pop(i)
return None
def write_if_new(path, data, mode=''): def write_if_new(path, data, mode=''):
@ -28,18 +59,13 @@ def write_if_new(path, data, mode=''):
old = "" old = ""
is_new = data != old is_new = data != old
if path.endswith(".kdenlive"): if path.endswith(".kdenlive"):
is_new = re.sub(r'\{.{36}\}', '', data) != re.sub(r'\{.{36}\}', '', old) is_new = re.sub('\{.{36}\}', '', data) != re.sub('\{.{36}\}', '', old)
if is_new: if is_new:
with open(path, write_mode) as fd: with open(path, write_mode) as fd:
fd.write(data) fd.write(data)
def format_duration(duration, fps):
return float('%0.5f' % (round(duration * fps) / fps))
def compose(clips, target=150, base=1024, voice_over=None, options=None): def compose(clips, target=150, base=1024, voice_over=None):
if options is None:
options = {}
fps = 24
length = 0 length = 0
scene = { scene = {
'front': { 'front': {
@ -74,7 +100,6 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
used = [] used = []
voice_overs = [] voice_overs = []
sub_offset = 0
if voice_over: if voice_over:
vo_keys = list(sorted(voice_over)) vo_keys = list(sorted(voice_over))
if chance(seq, 0.5): if chance(seq, 0.5):
@ -93,7 +118,7 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
if vo_min > target: if vo_min > target:
target = vo_min target = vo_min
elif vo_min < target: elif vo_min < target:
offset = format_duration((target - vo_min) / 2, fps) offset = (target - vo_min) / 2
scene['audio-center']['A1'].append({ scene['audio-center']['A1'].append({
'blank': True, 'blank': True,
'duration': offset 'duration': offset
@ -107,29 +132,17 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
subs = [] subs = []
for vo in voice_overs: for vo in voice_overs:
voc = vo.copy() voc = vo.copy()
a, b = '-11', '-3' a, b = '3', '-6'
if 'Whispered' in voc['src']: if 'Whispered' in voc['src']:
a, b = '-8', '0' a, b = '6', '-3'
elif 'Read' in voc['src']: elif 'Read' in voc['src']:
a, b = '-7.75', '0.25' a, b = '6.25', '-2.75'
elif 'Free' in voc['src']: elif 'Free' in voc['src']:
a, b = '-8.8', '-0.8' a, b = '5.2', '-3.8'
elif 'Ashley' in voc['src']: elif 'Ashley' in voc['src']:
a, b = '-9.5', '-1.50' a, b = '3.75', '-5.25'
elif 'Melody' in voc['src']: elif 'Melody' in voc['src']:
a, b = '-5.25', '-0.25' a, b = '4.25', '-4.75'
if options.get('stereo_downmix'):
a, b = '-9', '-1'
if 'Whispered' in voc['src']:
a, b = '-6', '2'
elif 'Read' in voc['src']:
a, b = '-5.75', '2.25'
elif 'Free' in voc['src']:
a, b = '-6.8', '3.2'
elif 'Ashley' in voc['src']:
a, b = '-7.5', '0.50'
elif 'Melody' in voc['src']:
a, b = '-3.25', '1.75'
voc['filter'] = {'volume': a} voc['filter'] = {'volume': a}
scene['audio-center']['A1'].append(voc) scene['audio-center']['A1'].append(voc)
vo_low = vo.copy() vo_low = vo.copy()
@ -173,7 +186,7 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
if length + clip['duration'] > target and length >= vo_min: if length + clip['duration'] > target and length >= vo_min:
break break
print('%06.3f %06.3f' % (length, clip['duration']), os.path.basename(clip['original'])) print('%06.3f %06.3f' % (length, clip['duration']), os.path.basename(clip['original']))
length += int(clip['duration'] * fps) / fps length += clip['duration']
if "foreground" not in clip and "animation" in clip: if "foreground" not in clip and "animation" in clip:
fg = clip['animation'] fg = clip['animation']
@ -266,16 +279,13 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
blur = seq() * 3 blur = seq() * 3
if blur: if blur:
scene['back']['V1'][-1]['filter']['blur'] = blur scene['back']['V1'][-1]['filter']['blur'] = blur
volume_back = '-8.2'
if options.get('stereo_downmix'):
volume_back = '-7.2'
scene['audio-back']['A1'].append({ scene['audio-back']['A1'].append({
'duration': clip['duration'], 'duration': clip['duration'],
'src': clip['original'], 'src': clip['original'],
'filter': {'volume': volume_back}, 'filter': {'volume': '+0.2'},
}) })
# TBD: Foley # TBD: Foley
cf_volume = '-2.5' cf_volume = '-5.5'
scene['audio-front']['A2'].append({ scene['audio-front']['A2'].append({
'duration': clip['duration'], 'duration': clip['duration'],
'src': foley, 'src': foley,
@ -288,54 +298,26 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
}) })
used.append(clip) used.append(clip)
print("scene duration %0.3f (target: %0.3f, vo_min: %0.3f)" % (length, target, vo_min)) print("scene duration %0.3f (target: %0.3f, vo_min: %0.3f)" % (length, target, vo_min))
scene_duration = int(get_scene_duration(scene) * fps)
sub_offset = int(sub_offset * fps)
if sub_offset < scene_duration:
delta = format_duration((scene_duration - sub_offset) / fps, fps)
print(">> add %0.3f of silence.. %0.3f (scene_duration)" % (delta, scene_duration / fps))
scene['audio-center']['A1'].append({
'blank': True,
'duration': delta
})
scene['audio-rear']['A1'].append({
'blank': True,
'duration': delta
})
elif sub_offset > scene_duration:
delta = format_duration((scene_duration - sub_offset) / fps, fps)
scene['audio-center']['A1'][-1]["duration"] += delta
scene['audio-rear']['A1'][-1]["duration"] += delta
print("WTF, needed to cut %s new duration: %s" % (delta, scene['audio-center']['A1'][-1]["duration"]))
print(scene['audio-center']['A1'][-1])
return scene, used return scene, used
def write_subtitles(data, folder, options): def get_scene_duration(scene):
data = fix_overlaps(data) duration = 0
path = folder / "front.srt" for key, value in scene.items():
if options.get("subtitle_format") == "srt": for name, clips in value.items():
srt = ox.srt.encode(data) for clip in clips:
write_if_new(str(path), srt, 'b') duration += clip['duration']
path = folder / "front.ass" return duration
if os.path.exists(path):
os.unlink(path)
else:
if os.path.exists(path):
os.unlink(path)
path = folder / "front.ass"
ass = ass_encode(data, options)
write_if_new(str(path), ass, '')
def render(root, scene, prefix=''):
def render(root, scene, prefix='', options=None):
if options is None:
options = {}
fps = 24 fps = 24
files = [] files = []
scene_duration = int(get_scene_duration(scene) * fps) scene_duration = int(get_scene_duration(scene) * 24)
for timeline, data in scene.items(): for timeline, data in scene.items():
if timeline == "subtitles": if timeline == "subtitles":
folder = Path(root) / prefix path = os.path.join(root, prefix + "front.srt")
write_subtitles(data, folder, options) data = fix_overlaps(data)
srt = ox.srt.encode(data)
write_if_new(path, srt, 'b')
continue continue
#print(timeline) #print(timeline)
project = KDEnliveProject(root) project = KDEnliveProject(root)
@ -346,34 +328,18 @@ def render(root, scene, prefix='', options=None):
#print(track) #print(track)
for clip in clips: for clip in clips:
project.append_clip(track, clip) project.append_clip(track, clip)
track_durations[track] = sum([int(c['duration'] * fps) for c in clips]) track_durations[track] = int(sum([c['duration'] for c in clips]) * 24)
if timeline.startswith('audio-'): if timeline.startswith('audio-'):
track_duration = project.get_duration() track_duration = project.get_duration()
delta = scene_duration - track_duration delta = scene_duration - track_duration
if delta > 0: if delta > 0:
for track in track_durations: for track in track_durations:
if track_durations[track] == track_duration: if track_durations[track] == track_duration:
project.append_clip(track, {'blank': True, "duration": delta/fps}) project.append_clip(track, {'blank': True, "duration": delta/24})
break
path = os.path.join(root, prefix + "%s.kdenlive" % timeline) path = os.path.join(root, prefix + "%s.kdenlive" % timeline)
project_xml = project.to_xml() project_xml = project.to_xml()
write_if_new(path, project_xml) write_if_new(path, project_xml)
if options["debug"]:
# check duration
out_duration = get_project_duration(path)
p_duration = project.get_duration()
print(path, 'out: %s, project: %s, scene: %s' %(out_duration, p_duration, scene_duration))
if p_duration != scene_duration:
print(path, 'FAIL project: %s, scene: %s' %(p_duration, scene_duration))
_cache = os.path.join(root, "cache.json")
with open(_cache, "w") as fd:
json.dump(_CACHE, fd)
sys.exit(1)
if out_duration != p_duration:
print(path, 'fail got: %s expected: %s' %(out_duration, p_duration))
sys.exit(1)
files.append(path) files.append(path)
return files return files
@ -417,19 +383,15 @@ def get_fragments(clips, voice_over, prefix):
fragment['clips'] = [] fragment['clips'] = []
for clip in clips: for clip in clips:
#if set(clip['tags']) & set(fragment['tags']) and not set(clip['tags']) & set(fragment['anti-tags']): #if set(clip['tags']) & set(fragment['tags']) and not set(clip['tags']) & set(fragment['anti-tags']):
key = 'original' if clip['original'] in originals:
original = clip['original']
if 'original_censored' in clip:
original = clip['original_censored']
if original in originals:
fragment['clips'].append(clip) fragment['clips'].append(clip)
fragment["voice_over"] = voice_over.get(str(fragment["id"]), {}) fragment["voice_over"] = voice_over.get(str(fragment["id"]), {})
fragments.append(fragment) fragments.append(fragment)
fragments.sort(key=lambda f: ox.sort_string(f['name'])) fragments.sort(key=lambda f: ox.sort_string(f['name']))
return fragments return fragments
def render_all(options): def render_all(options):
options = load_defaults(options)
prefix = options['prefix'] prefix = options['prefix']
duration = int(options['duration']) duration = int(options['duration'])
base = int(options['offset']) base = int(options['offset'])
@ -467,13 +429,7 @@ def render_all(options):
fragment_clips = fragment['clips'] fragment_clips = fragment['clips']
unused_fragment_clips = [c for c in fragment_clips if c not in clips_used] unused_fragment_clips = [c for c in fragment_clips if c not in clips_used]
print('fragment clips', len(fragment_clips), 'unused', len(unused_fragment_clips)) print('fragment clips', len(fragment_clips), 'unused', len(unused_fragment_clips))
scene, used = compose( scene, used = compose(unused_fragment_clips, target=target, base=fragment_base, voice_over=fragment['voice_over'])
unused_fragment_clips,
target=target,
base=fragment_base,
voice_over=fragment['voice_over'],
options=options
)
clips_used += used clips_used += used
scene_duration = get_scene_duration(scene) scene_duration = get_scene_duration(scene)
print("%s %6.3f -> %6.3f (%6.3f)" % (name, target, scene_duration, fragment_target)) print("%s %6.3f -> %6.3f (%6.3f)" % (name, target, scene_duration, fragment_target))
@ -488,19 +444,20 @@ def render_all(options):
elif position < target_position: elif position < target_position:
target = target + 0.1 * fragment_target target = target + 0.1 * fragment_target
timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/', options) timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/')
scene_json = json.dumps(scene, indent=2, ensure_ascii=False) scene_json = json.dumps(scene, indent=2, ensure_ascii=False)
write_if_new(os.path.join(fragment_prefix, 'scene.json'), scene_json) write_if_new(os.path.join(fragment_prefix, 'scene.json'), scene_json)
if not options['no_video'] and not options["single_file"]: if not options['no_video']:
for timeline in timelines: for timeline in timelines:
print(timeline) print(timeline)
ext = '.mp4' ext = '.mp4'
if '/audio' in timeline: if '/audio' in timeline:
ext = '.wav' ext = '.wav'
cmd = get_melt() + [ cmd = [
timeline, 'xvfb-run', '-a',
'melt', timeline,
'-quiet', '-quiet',
'-consumer', 'avformat:%s' % timeline.replace('.kdenlive', ext), '-consumer', 'avformat:%s' % timeline.replace('.kdenlive', ext),
] ]
@ -522,8 +479,8 @@ def render_all(options):
subprocess.call(cmd) subprocess.call(cmd)
os.unlink(timeline.replace('.kdenlive', ext)) os.unlink(timeline.replace('.kdenlive', ext))
cmds = []
fragment_prefix = Path(fragment_prefix) fragment_prefix = Path(fragment_prefix)
cmds = []
for src, out1, out2 in ( for src, out1, out2 in (
("audio-front.wav", "fl.wav", "fr.wav"), ("audio-front.wav", "fl.wav", "fr.wav"),
("audio-center.wav", "fc.wav", "lfe.wav"), ("audio-center.wav", "fc.wav", "lfe.wav"),
@ -550,304 +507,79 @@ def render_all(options):
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]", "-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]", "-c:a", "aac", fragment_prefix / "audio-5.1.mp4" "-map", "[a]", "-c:a", "aac", fragment_prefix / "audio-5.1.mp4"
]) ])
audio_front = "audio-5.1.mp4"
audio_back = "audio-back.wav"
copy = '-c'
if options["stereo_downmix"]:
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "audio-front.wav",
"-i", fragment_prefix / "audio-center.wav",
"-i", fragment_prefix / "audio-rear.wav",
"-i", fragment_prefix / audio_back,
"-filter_complex",
"amix=inputs=4:duration=longest:dropout_transition=0",
'-ac', '2', fragment_prefix / "audio-stereo.wav"
])
audio_front = "audio-stereo.wav"
audio_back = "audio-stereo.wav"
copy = '-c:v'
cmds.append([ cmds.append([
"ffmpeg", "-y", "ffmpeg", "-y",
"-nostats", "-loglevel", "error", "-nostats", "-loglevel", "error",
"-i", fragment_prefix / "front.mp4", "-i", fragment_prefix / "front.mp4",
"-i", fragment_prefix / audio_front, "-i", fragment_prefix / "audio-5.1.mp4",
copy, "copy", "-c", "copy",
"-movflags", "+faststart", fragment_prefix / "front-5.1.mp4",
fragment_prefix / "front-mixed.mp4",
]) ])
cmds.append([ cmds.append([
"ffmpeg", "-y", "ffmpeg", "-y",
"-nostats", "-loglevel", "error", "-nostats", "-loglevel", "error",
"-i", fragment_prefix / "back.mp4", "-i", fragment_prefix / "back.mp4",
"-i", fragment_prefix / audio_back, "-i", fragment_prefix / "audio-back.wav",
"-c:v", "copy", "-c:v", "copy",
"-movflags", "+faststart",
fragment_prefix / "back-audio.mp4", fragment_prefix / "back-audio.mp4",
]) ])
for cmd in cmds: for cmd in cmds:
if options["debug"]: #print(" ".join([str(x) for x in cmd]))
print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd) subprocess.call(cmd)
if options.get("debug"):
for a, b in (
("back-audio.mp4", "back.mp4"),
("front-mixed.mp4", "front.mp4"),
):
duration_a = ox.avinfo(str(fragment_prefix / a))['duration']
duration_b = ox.avinfo(str(fragment_prefix / b))['duration']
if duration_a != duration_b:
print('!!', duration_a, fragment_prefix / a)
print('!!', duration_b, fragment_prefix / b)
sys.exit(-1)
for a, b in ( for a, b in (
("back-audio.mp4", "front-mixed.mp4"), ("back-audio.mp4", "back.mp4"),
("front-5.1.mp4", "back.mp4"),
): ):
duration_a = ox.avinfo(str(fragment_prefix / a))['duration'] duration_a = ox.avinfo(str(fragment_prefix / a))['duration']
duration_b = ox.avinfo(str(fragment_prefix / b))['duration'] duration_b = ox.avinfo(str(fragment_prefix / b))['duration']
if duration_a != duration_b: if duration_a != duration_b:
print('!!', duration_a, fragment_prefix / a) print('!!', duration_a, fragment_prefix / a)
print('!!', duration_b, fragment_prefix / b) print('!!', duration_b, fragment_prefix / b)
sys.exit(-1)
shutil.move(fragment_prefix / "back-audio.mp4", fragment_prefix / "back.mp4") shutil.move(fragment_prefix / "back-audio.mp4", fragment_prefix / "back.mp4")
shutil.move(fragment_prefix / "front-mixed.mp4", fragment_prefix / "front.mp4") shutil.move(fragment_prefix / "front-5.1.mp4", fragment_prefix / "front.mp4")
if options["keep_audio"]:
shutil.move(fragment_prefix / "audio-center.wav", fragment_prefix / "vocals.wav")
shutil.move(fragment_prefix / "audio-front.wav", fragment_prefix / "foley.wav")
shutil.move(fragment_prefix / "audio-back.wav", fragment_prefix / "original.wav")
for fn in ( for fn in (
"audio-5.1.mp4", "audio-5.1.mp4",
"audio-center.wav", "audio-rear.wav", "audio-center.wav", "audio-rear.wav", "audio-center.wav",
"audio-front.wav", "audio-back.wav", "back-audio.mp4", "audio-front.wav", "audio-back.wav", "back-audio.mp4",
"fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav", "fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
"audio-stereo.wav",
): ):
fn = fragment_prefix / fn fn = fragment_prefix / fn
if os.path.exists(fn): if os.path.exists(fn):
os.unlink(fn) os.unlink(fn)
if options["single_file"]:
cmds = []
base_prefix = Path(base_prefix)
for timeline in (
"front",
"back",
"audio-back",
"audio-center",
"audio-front",
"audio-rear",
):
timelines = list(sorted(glob('%s/*/%s.kdenlive' % (base_prefix, timeline))))
ext = '.mp4'
if '/audio' in timelines[0]:
ext = '.wav'
out = base_prefix / (timeline + ext)
cmd = get_melt() + timelines + [
'-quiet',
'-consumer', 'avformat:%s' % out,
]
if ext == '.wav':
cmd += ['vn=1']
else:
cmd += ['an=1']
cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
cmds.append(cmd)
for src, out1, out2 in (
("audio-front.wav", "fl.wav", "fr.wav"),
("audio-center.wav", "fc.wav", "lfe.wav"),
("audio-rear.wav", "bl.wav", "br.wav"),
):
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / src,
"-filter_complex",
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
"-map", "[left]", base_prefix / out1,
"-map", "[right]", base_prefix / out2,
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / "fl.wav",
"-i", base_prefix / "fr.wav",
"-i", base_prefix / "fc.wav",
"-i", base_prefix / "lfe.wav",
"-i", base_prefix / "bl.wav",
"-i", base_prefix / "br.wav",
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]", "-c:a", "aac", base_prefix / "audio-5.1.mp4"
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / "front.mp4",
"-i", base_prefix / "audio-5.1.mp4",
"-c", "copy",
"-movflags", "+faststart",
base_prefix / "front-mixed.mp4",
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / "back.mp4",
"-i", base_prefix / "audio-back.wav",
"-c:v", "copy",
"-movflags", "+faststart",
base_prefix / "back-audio.mp4",
])
for cmd in cmds:
if options["debug"]:
print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd)
for a, b in (
("back-audio.mp4", "back.mp4"),
("front-mixed.mp4", "back.mp4"),
):
duration_a = ox.avinfo(str(base_prefix / a))['duration']
duration_b = ox.avinfo(str(base_prefix / b))['duration']
if duration_a != duration_b:
print('!!', duration_a, base_prefix / a)
print('!!', duration_b, base_prefix / b)
sys.exit(-1)
shutil.move(base_prefix / "back-audio.mp4", base_prefix / "back.mp4")
shutil.move(base_prefix / "front-mixed.mp4", base_prefix / "front.mp4")
if options["keep_audio"]:
shutil.move(base_prefix / "audio-center.wav", base_prefix / "vocals.wav")
shutil.move(base_prefix / "audio-front.wav", base_prefix / "foley.wav")
shutil.move(base_prefix / "audio-back.wav", base_prefix / "original.wav")
for fn in (
"audio-5.1.mp4",
"audio-center.wav", "audio-rear.wav",
"audio-front.wav", "audio-back.wav", "back-audio.mp4",
"fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
):
fn = base_prefix / fn
if os.path.exists(fn):
os.unlink(fn)
join_subtitles(base_prefix, options)
print("Duration - Target: %s Actual: %s" % (target_position, position)) print("Duration - Target: %s Actual: %s" % (target_position, position))
print(json.dumps(dict(stats), sort_keys=True, indent=2)) print(json.dumps(dict(stats), sort_keys=True, indent=2))
with open(_cache, "w") as fd: with open(_cache, "w") as fd:
json.dump(_CACHE, fd) json.dump(_CACHE, fd)
def add_translations(sub, lang): def get_srt(sub, offset=0):
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if sub.languages:
value = ox.strip_tags(value)
if lang:
for slang in lang:
if slang == "en":
slang = None
for tsub in sub.item.annotations.filter(layer="subtitles", start=sub.start, end=sub.end, languages=slang):
tvalue = tsub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if tsub.languages:
tvalue = ox.strip_tags(tvalue)
value += '\n' + tvalue
return value
def add_translations_dict(sub, langs):
values = {}
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if sub.languages:
value = ox.strip_tags(value)
values[sub.languages] = value
else:
values["en"] = value
for slang in langs:
slang_value = None if slang == "en" else slang
if sub.languages == slang_value:
continue
for tsub in sub.item.annotations.filter(
layer="subtitles", start=sub.start, end=sub.end,
languages=slang_value
):
tvalue = tsub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if tsub.languages:
tvalue = ox.strip_tags(tvalue)
values[slang] = tvalue
return values
def get_srt(sub, offset, lang, tlang):
sdata = sub.json(keys=['in', 'out', 'value']) sdata = sub.json(keys=['in', 'out', 'value'])
sdata['value'] = sdata['value'].replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip() sdata['value'] = sdata['value'].replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n')
if tlang:
sdata['value'] = add_translations(sub, tlang)
langs = [lang]
if tlang:
langs += tlang
sdata['values'] = add_translations_dict(sub, langs)
if offset: if offset:
sdata["in"] += offset sdata["in"] += offset
sdata["out"] += offset sdata["out"] += offset
return sdata return sdata
def scene_subtitles(scene, options): def fix_overlaps(data):
import item.models previous = None
offset = 0 for sub in data:
subs = [] if previous is None:
lang, tlang = parse_lang(options["lang"]) previous = sub
for clip in scene['audio-center']['A1']: else:
if not clip.get("blank"): if sub['in'] < previous['out']:
batch, fragment_id = clip['src'].replace('.wav', '').split('/')[-2:] previous['out'] = sub['in'] - 0.001
vo = item.models.Item.objects.filter( previous = sub
data__batch__icontains=batch, data__title__startswith=fragment_id + '_' return data
).first()
if vo:
#print("%s => %s %s" % (clip['src'], vo, vo.get('batch')))
for sub in vo.annotations.filter(
layer="subtitles"
).filter(
languages=None if lang == "en" else lang
).exclude(value="").order_by("start"):
sdata = get_srt(sub, offset, lang, tlang)
subs.append(sdata)
else:
print("could not find vo for %s" % clip['src'])
offset += clip['duration']
return subs
def load_defaults(options):
path = os.path.join(options["prefix"], "options.json")
if os.path.exists(path):
with open(path) as fd:
defaults = json.load(fd)
else:
print("no defaults defined at %s" % path)
defaults = {
"lang": "en",
"censored": None
}
for key in defaults:
if key not in options:
options[key] = defaults[key]
return options
def update_subtitles(options): def update_subtitles(options):
import item.models import item.models
options = load_defaults(options)
prefix = Path(options['prefix']) prefix = Path(options['prefix'])
duration = int(options['duration'])
if options['offset'] is None:
offsets = [int(folder) for folder in os.listdir(prefix / 'render') if folder.isdigit()]
for offset in offsets:
options['offset'] = offset
update_subtitles(options)
return
base = int(options['offset']) base = int(options['offset'])
lang, tlang = parse_lang(options["lang"])
_cache = os.path.join(prefix, "cache.json") _cache = os.path.join(prefix, "cache.json")
if os.path.exists(_cache): if os.path.exists(_cache):
@ -857,13 +589,27 @@ def update_subtitles(options):
base_prefix = prefix / 'render' / str(base) base_prefix = prefix / 'render' / str(base)
for folder in os.listdir(base_prefix): for folder in os.listdir(base_prefix):
folder = base_prefix / folder folder = base_prefix / folder
scene_json = folder / "scene.json" with open(folder / "scene.json") as fd:
if not os.path.exists(scene_json):
continue
with open(scene_json) as fd:
scene = json.load(fd) scene = json.load(fd)
subs = scene_subtitles(scene, options) offset = 0
write_subtitles(subs, folder, options) subs = []
for clip in scene['audio-center']['A1']:
if not clip.get("blank"):
batch, fragment_id = clip['src'].replace('.wav', '').split('/')[-2:]
vo = item.models.Item.objects.filter(data__batch__icontains=batch, data__title__startswith=fragment_id + '_').first()
if vo:
#print("%s => %s %s" % (clip['src'], vo, vo.get('batch')))
for sub in vo.annotations.filter(layer="subtitles").exclude(value="").order_by("start"):
sdata = get_srt(sub, offset)
subs.append(sdata)
else:
print("could not find vo for %s" % clip['src'])
offset += clip['duration']
path = folder / "front.srt"
data = fix_overlaps(subs)
srt = ox.srt.encode(subs)
write_if_new(str(path), srt, 'b')
def update_m3u(render_prefix, exclude=[]): def update_m3u(render_prefix, exclude=[]):
files = ox.sorted_strings(glob(render_prefix + "*/*/back.mp4")) files = ox.sorted_strings(glob(render_prefix + "*/*/back.mp4"))
@ -890,24 +636,19 @@ def render_infinity(options):
prefix = options['prefix'] prefix = options['prefix']
duration = int(options['duration']) duration = int(options['duration'])
defaults = {
"offset": 100,
"max-items": 30,
"no_video": False,
}
state_f = os.path.join(prefix, "infinity.json") state_f = os.path.join(prefix, "infinity.json")
if os.path.exists(state_f): if os.path.exists(state_f):
with open(state_f) as fd: with open(state_f) as fd:
state = json.load(fd) state = json.load(fd)
else: else:
state = {} state = {
for key in ("prefix", "duration", "debug", "single_file", "keep_audio", "stereo_downmix"): "offset": 100,
"max-items": 30,
"no_video": False,
}
for key in ("prefix", "duration"):
state[key] = options[key] state[key] = options[key]
for key in defaults:
if key not in state:
state[key] = defaults[key]
while True: while True:
render_prefix = state["prefix"] + "/render/" render_prefix = state["prefix"] + "/render/"
current = [ current = [
@ -915,8 +656,8 @@ def render_infinity(options):
if f.isdigit() and os.path.isdir(render_prefix + f) and state["offset"] > int(f) >= 100 if f.isdigit() and os.path.isdir(render_prefix + f) and state["offset"] > int(f) >= 100
] ]
if len(current) > state["max-items"]: if len(current) > state["max-items"]:
current = ox.sorted_strings(current) current = list(reversed(ox.sorted_strings(current)))
remove = current[:-state["max-items"]] remove = list(reversed(current[-state["max-items"]:]))
update_m3u(render_prefix, exclude=remove) update_m3u(render_prefix, exclude=remove)
for folder in remove: for folder in remove:
folder = render_prefix + folder folder = render_prefix + folder
@ -934,157 +675,3 @@ def render_infinity(options):
with open(state_f + "~", "w") as fd: with open(state_f + "~", "w") as fd:
json.dump(state, fd, indent=2) json.dump(state, fd, indent=2)
shutil.move(state_f + "~", state_f) shutil.move(state_f + "~", state_f)
def join_subtitles(base_prefix, options):
'''
subtitles = list(sorted(glob('%s/*/front.srt' % base_prefix)))
data = []
position = 0
for srt in subtitles:
scene = srt.replace('front.srt', 'scene.json')
data += ox.srt.load(srt, offset=position)
position += get_scene_duration(scene)
with open(base_prefix / 'front.srt', 'wb') as fd:
fd.write(ox.srt.encode(data))
'''
scenes = list(sorted(glob('%s/*/scene.json' % base_prefix)))
data = []
position = 0
for scene_json in scenes:
with open(scene_json) as fd:
scene = json.load(fd)
subs = scene_subtitles(scene, options)
data += shift_clips(subs, position)
position += get_scene_duration(scene)
write_subtitles(data, base_prefix, options)
def resolve_roman(s):
extra = re.compile(r'^\d+(.*?)$').findall(s)
if extra:
extra = extra[0].lower()
new = {
'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5',
'vi': '6', 'vii': 7, 'viii': '8', 'ix': '9', 'x': '10'
}.get(extra, extra)
return s.replace(extra, new)
return s
def generate_clips(options):
import item.models
import itemlist.models
options = load_defaults(options)
prefix = options['prefix']
lang, tlang = parse_lang(options["lang"])
if options.get('censored'):
censored_list = itemlist.models.List.get(options["censored"])
censored = list(censored_list.get_items(
censored_list.user
).all().values_list('public_id', flat=True))
clips = []
for i in item.models.Item.objects.filter(sort__type='original'):
original_target = ""
qs = item.models.Item.objects.filter(data__title=i.data['title']).exclude(id=i.id)
if qs.count() >= 1:
clip = {}
durations = []
for e in item.models.Item.objects.filter(data__title=i.data['title']):
if 'type' not in e.data:
print("ignoring invalid video %s (no type)" % e)
continue
if not e.files.filter(selected=True).exists():
continue
source = e.files.filter(selected=True)[0].data.path
ext = os.path.splitext(source)[1]
type_ = e.data['type'][0].lower()
target = os.path.join(prefix, type_, i.data['title'] + ext)
os.makedirs(os.path.dirname(target), exist_ok=True)
if os.path.islink(target):
os.unlink(target)
os.symlink(source, target)
if type_ == "original":
original_target = target
if options.get('censored') and e.public_id in censored:
clip[type_ + "_censored"] = target
target = '/srv/t_for_time/censored.mp4'
clip[type_] = target
durations.append(e.files.filter(selected=True)[0].duration)
clip["duration"] = min(durations)
if not clip["duration"]:
print('!!', durations, clip)
continue
cd = format_duration(clip["duration"], 24)
#if cd != clip["duration"]:
# print(clip["duration"], '->', cd, durations, clip)
clip["duration"] = cd
clip['tags'] = i.data.get('tags', [])
clip['editingtags'] = i.data.get('editingtags', [])
name = os.path.basename(original_target)
seqid = re.sub(r"Hotel Aporia_(\d+)", "S\\1_", name)
seqid = re.sub(r"Night March_(\d+)", "S\\1_", seqid)
seqid = re.sub(r"_(\d+)H_(\d+)", "_S\\1\\2_", seqid)
seqid = seqid.split('_')[:2]
seqid = [b[1:] if b[0] in ('B', 'S') else '0' for b in seqid]
seqid[1] = resolve_roman(seqid[1])
seqid[1] = ''.join([b for b in seqid[1] if b.isdigit()])
if not seqid[1]:
seqid[1] = '0'
try:
clip['seqid'] = int(''.join(['%06d' % int(b) for b in seqid]))
except:
print(name, seqid, 'failed')
raise
if "original" in clip and "foreground" in clip and "background" in clip:
clips.append(clip)
elif "original" in clip and "animation" in clip:
clips.append(clip)
else:
print("ignoring incomplete video", i)
with open(os.path.join(prefix, 'clips.json'), 'w') as fd:
json.dump(clips, fd, indent=2, ensure_ascii=False)
print("using", len(clips), "clips")
voice_over = defaultdict(dict)
for vo in item.models.Item.objects.filter(
data__type__contains="Voice Over",
):
fragment_id = int(vo.get('title').split('_')[0])
source = vo.files.filter(selected=True)[0]
batch = vo.get('batch')[0].replace('Text-', '')
src = source.data.path
target = os.path.join(prefix, 'voice_over', batch, '%s.wav' % fragment_id)
os.makedirs(os.path.dirname(target), exist_ok=True)
if os.path.islink(target):
os.unlink(target)
os.symlink(src, target)
subs = []
for sub in vo.annotations.filter(
layer="subtitles", languages=lang
).exclude(value="").order_by("start"):
sdata = get_srt(sub, 0, lang, tlang)
subs.append(sdata)
voice_over[fragment_id][batch] = {
"src": target,
"duration": format_duration(source.duration, 24),
"subs": subs
}
with open(os.path.join(prefix, 'voice_over.json'), 'w') as fd:
json.dump(voice_over, fd, indent=2, ensure_ascii=False)
if options.get('censored'):
censored_mp4 = '/srv/t_for_time/censored.mp4'
if not os.path.exists(censored_mp4):
cmd = [
"ffmpeg",
"-nostats", "-loglevel", "error",
"-f", "lavfi",
"-i", "color=color=white:size=1920x1080:rate=24",
"-t", "3600",
"-c:v", "libx264",
"-pix_fmt", "yuv420p",
censored_mp4
]
subprocess.call(cmd)

View file

@ -4,7 +4,6 @@ import subprocess
import lxml.etree import lxml.etree
import uuid import uuid
import os import os
import sys
_CACHE = {} _CACHE = {}
_IDS = defaultdict(int) _IDS = defaultdict(int)
@ -13,14 +12,6 @@ def get_propery(element, name):
return element.xpath('property[@name="%s"]' % name)[0].text return element.xpath('property[@name="%s"]' % name)[0].text
def get_melt():
cmd = ['melt']
if 'XDG_RUNTIME_DIR' not in os.environ:
os.environ['XDG_RUNTIME_DIR'] = '/tmp/runtime-pandora'
if 'DISPLAY' not in os.environ:
cmd = ['xvfb-run', '-a'] + cmd
return cmd
def melt_xml(file): def melt_xml(file):
out = None out = None
real_path = os.path.realpath(file) real_path = os.path.realpath(file)
@ -29,8 +20,7 @@ def melt_xml(file):
if os.stat(real_path).st_mtime != ts: if os.stat(real_path).st_mtime != ts:
out = None out = None
if not out: if not out:
cmd = get_melt() + [file, '-consumer', 'xml'] out = subprocess.check_output(['melt', file, '-consumer', 'xml']).decode()
out = subprocess.check_output(cmd).decode()
_CACHE[file] = [os.stat(real_path).st_mtime, out] _CACHE[file] = [os.stat(real_path).st_mtime, out]
return out return out
@ -564,6 +554,7 @@ class KDEnliveProject:
] + value) ] + value)
] ]
def properties(self, *props): def properties(self, *props):
return [ return [
self.get_element("property", attrib={"name": name}, text=str(value) if value is not None else value) self.get_element("property", attrib={"name": name}, text=str(value) if value is not None else value)

View file

@ -1,195 +0,0 @@
import re
import os
import lxml.etree
import ox
from .render_kdenlive import melt_xml
from .utils import format_duration
def parse_lang(lang):
if lang and "," in lang:
lang = lang.split(",")
if isinstance(lang, list):
tlang = lang[1:]
lang = lang[0]
else:
tlang = None
if lang == "en":
lang = None
return lang, tlang
def random_int(seq, length):
n = n_ = length - 1
# print('len', n)
if n == 0:
return n
r = seq() / 9 * 10
base = 10
while n > 10:
n /= 10
r += seq() / 9 * 10
base += 10
r = int(round(n_ * r / base))
return r
def random_choice(seq, items, pop=False):
n = random_int(seq, len(items))
if pop:
return items.pop(n)
return items[n]
def chance(seq, chance):
return (seq() / 10) < chance
def get_clip_by_seqid(clips, seqid):
selected = None
for i, clip in enumerate(clips):
if clip["seqid"] == seqid:
selected = i
break
if selected is not None:
return clips.pop(i)
return None
def get_scene_duration(scene, fps=24, track=None):
if isinstance(scene, str):
with open(scene) as fd:
scene = json.load(fd)
duration = 0
for key, value in scene.items():
for name, clips in value.items():
if track and '%s:%s' % (key, name) != track:
continue
if clips:
for clip in clips:
duration += round(clip["duration"] * fps)
#print("scene duration based on %s:%s is %s %s" % (key, name, duration / fps, format_duration(duration / fps, fps)))
return duration / fps
def get_offset_duration(prefix):
duration = 0
for root, folders, files in os.walk(prefix):
for f in files:
if f == "scene.json":
duration += get_scene_duration(scene)
return duration
def get_track_duration(scene, k, n):
duration = 0
for key, value in scene.items():
if key == k:
for name, clips in value.items():
if name == n:
for clip in clips:
duration += int(clip["duration"] * 24)
return duration / 24
def get_project_duration(file):
out = melt_xml(file)
chain = lxml.etree.fromstring(out).xpath("producer")[0]
duration = int(chain.attrib["out"]) + 1
return duration
def fix_overlaps(data):
previous = None
for sub in data:
if previous is None:
previous = sub
else:
if sub["in"] < previous["out"]:
previous["out"] = sub["in"] - 0.001
previous = sub
return data
def shift_clips(data, offset):
for clip in data:
clip["in"] += offset
clip["out"] += offset
return data
def ass_timestamp(seconds):
hours = int(seconds // 3600)
minutes = int((seconds % 3600) // 60)
secs = seconds % 60
whole_seconds = int(secs)
centiseconds = int(round((secs - whole_seconds) * 100))
# Handle centisecond rollover (e.g., 59.999 → 60.00)
if centiseconds == 100:
whole_seconds += 1
centiseconds = 0
if whole_seconds == 60:
whole_seconds = 0
minutes += 1
if minutes == 60:
minutes = 0
hours += 1
return f"{hours}:{minutes:02d}:{whole_seconds:02d}.{centiseconds:02d}"
def ass_encode(subs, options):
if "lang" in options:
langs = options["lang"].split(",")
else:
langs = list(subs[0]["values"])
# print('ass_encode', langs, options)
# print(subs)
header = """[Script Info]
ScriptType: v4.00+
PlayResX: 1920
PlayResY: 1080
ScaledBorderAndShadow: yes
YCbCr Matrix: None
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
"""
ass = header
offset = options.get("sub_margin", 10)
spacing = options.get("sub_spacing", 20)
height = 42
styles = []
for lang in reversed(langs):
if isinstance(options.get("font"), list) and lang in options["font"]:
font = options["font"][lang]
else:
font = "SimHei" if lang in ("zh", "jp") else "Menlo"
if isinstance(options.get("font_size"), list) and lang in options["font_size"]:
size = options["font_size"][lang]
else:
size = 46 if font == "SimHei" else 42
styles.append(
f"Style: {lang},{font},{size},&Hffffff,&Hffffff,&H0,&H0,0,0,0,0,100,100,0,0,1,1,0,2,10,10,{offset},1"
)
offset += size + spacing
ass += "\n".join(reversed(styles)) + "\n"
events = [
"Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text"
]
for sub in subs:
start = ass_timestamp(sub["in"])
stop = ass_timestamp(sub["out"])
for lang in reversed(langs):
value = sub["values"][lang]
value = value.replace('\n', '\\N')
event = f"Dialogue: 0,{start},{stop},{lang},,0,0,0,,{value}"
events.append(event)
ass += "\n\n[Events]\n" + "\n".join(events) + "\n"
return ass

6
sax.py
View file

@ -31,7 +31,7 @@ reverb = {
"src": reverb_wav, "src": reverb_wav,
"duration": 3600.0, "duration": 3600.0,
"filter": { "filter": {
"volume": "3.5" "volume": "0.5"
}, },
} }
@ -39,14 +39,14 @@ long = {
"src": long_wav, "src": long_wav,
"duration": 3600.0, "duration": 3600.0,
"filter": { "filter": {
"volume": "-1" "volume": "-4"
}, },
} }
noise = { noise = {
"src": nois_wav, "src": nois_wav,
"duration": 3600.0, "duration": 3600.0,
"filter": { "filter": {
"volume": "7.75" "volume": "4.75"
}, },
} }

BIN
title.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.1 KiB

View file

@ -1,16 +1,4 @@
import os
import re
def resolve_roman(s):
extra = re.compile(r'^\d+(.*?)$').findall(s)
if extra:
extra = extra[0].lower()
new = {
'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5',
'vi': '6', 'vii': 7, 'viii': '8', 'ix': '9', 'x': '10'
}.get(extra, extra)
return s.replace(extra, new)
return s
def upgrade_originals(): def upgrade_originals():
import item.models import item.models
@ -42,24 +30,3 @@ def remove_deselected_files():
if changed: if changed:
i.save() i.save()
def write_if_new(path, data, mode=''):
read_mode = 'r' + mode
write_mode = 'w' + mode
if os.path.exists(path):
with open(path, read_mode) as fd:
old = fd.read()
else:
old = ""
is_new = data != old
if path.endswith(".kdenlive"):
is_new = re.sub(r'\{.{36}\}', '', data) != re.sub(r'\{.{36}\}', '', old)
if is_new:
with open(path, write_mode) as fd:
fd.write(data)
def format_duration(duration, fps, audio=False):
if audio:
return float('%0.5f' % (int(duration * fps) / fps))
else:
return float('%0.5f' % (round(duration * fps) / fps))