Compare commits

..

No commits in common. "main" and "infinity" have entirely different histories.

16 changed files with 208 additions and 922 deletions

View file

@ -63,7 +63,6 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"canExportAnnotations": {"member": true, "staff": true, "admin": true},
"canImportAnnotations": {"member": true, "staff": true, "admin": true},
"canImportItems": {"member": true, "staff": true, "admin": true},
"canTranscribeAudio": {},
"canManageDocuments": {"member": true, "staff": true, "admin": true},
"canManageEntities": {"member": true, "staff": true, "admin": true},
"canManageHome": {"staff": true, "admin": true},
@ -1015,7 +1014,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
the system (from).
*/
"site": {
"description": "T for Time - pan.do/ra",
"description": "This is a demo of pan.do/ra - a free, open source media archive. It allows you to manage large, decentralized collections of video, to collaboratively create metadata and time-based annotations, and to serve your archive as a cutting-edge web application.",
"email": {
// E-mail address in contact form (to)
"contact": "system@time.0x2620.org",

View file

@ -9,7 +9,7 @@ User=pandora
Group=pandora
Nice=19
WorkingDirectory=/srv/pandora/pandora
ExecStart=/srv/pandora/pandora/manage.py infinity --config /etc/infinity.json
ExecStart=/srv/pandora/pandora/manage.py infinity
[Install]
WantedBy=multi-user.target

View file

@ -100,7 +100,7 @@ if os.path.exists('__init__.py'):
local_settings += '\nLOCAL_APPS = ["%s"]\n' % name
local_settings_changed = True
else:
apps = re.compile(r'(LOCAL_APPS.*?)\]', re.DOTALL).findall(local_settings)[0]
apps = re.compile('(LOCAL_APPS.*?)\]', re.DOTALL).findall(local_settings)[0]
if name not in apps:
new_apps = apps.strip() + ',\n"%s"\n' % name
local_settings = local_settings.replace(apps, new_apps)

View file

@ -2,43 +2,24 @@ import json
import os
import subprocess
import ox
from django.core.management.base import BaseCommand
from django.conf import settings
from ...render import add_translations
class Command(BaseCommand):
help = 'export all subtitles for translations'
def add_arguments(self, parser):
parser.add_argument('--lang', action='store', dest='lang', default=None, help='subtitle language')
pass
def handle(self, **options):
import annotation.models
import item.models
lang = options["lang"]
if lang:
lang = lang.split(',')
tlang = lang[1:]
lang = lang[0]
else:
tlang = None
if lang == "en":
lang = None
for i in item.models.Item.objects.filter(data__type__contains='Voice Over').order_by('sort__title'):
print("## %s %s" % (i.get("title"), i.public_id))
for sub in i.annotations.all().filter(layer='subtitles').exclude(value='').filter(languages=lang).order_by("start"):
if tlang:
value = add_translations(sub, tlang)
value = ox.strip_tags(value)
else:
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if sub.languages:
value = ox.strip_tags(value)
print(value.strip() + "\n")
for sub in i.annotations.all().filter(layer='subtitles').exclude(value='').order_by("start"):
if not sub.languages:
print(sub.value.strip() + "\n")
print("\n\n\n")

View file

@ -1,6 +1,27 @@
from django.core.management.base import BaseCommand
import json
import os
import re
from collections import defaultdict
from ...render import generate_clips
from django.core.management.base import BaseCommand
from django.conf import settings
import item.models
import itemlist.models
from ...render import get_srt
def resolve_roman(s):
extra = re.compile('^\d+(.*?)$').findall(s)
if extra:
extra = extra[0].lower()
new = {
'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5',
'vi': '6', 'vii': 7, 'viii': '8', 'ix': '9', 'x': '10'
}.get(extra, extra)
return s.replace(extra, new)
return s
class Command(BaseCommand):
@ -10,4 +31,84 @@ class Command(BaseCommand):
parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in')
def handle(self, **options):
return generate_clips(options)
prefix = options['prefix']
clips = []
for i in item.models.Item.objects.filter(sort__type='original'):
qs = item.models.Item.objects.filter(data__title=i.data['title']).exclude(id=i.id)
if qs.count() >= 1:
clip = {}
durations = []
for e in item.models.Item.objects.filter(data__title=i.data['title']):
if 'type' not in e.data:
print("ignoring invalid video %s (no type)" % e)
continue
if not e.files.filter(selected=True).exists():
continue
source = e.files.filter(selected=True)[0].data.path
ext = os.path.splitext(source)[1]
type_ = e.data['type'][0].lower()
target = os.path.join(prefix, type_, i.data['title'] + ext)
os.makedirs(os.path.dirname(target), exist_ok=True)
if os.path.islink(target):
os.unlink(target)
os.symlink(source, target)
clip[type_] = target
durations.append(e.files.filter(selected=True)[0].duration)
clip["duration"] = min(durations)
if not clip["duration"]:
print('!!', durations, clip)
continue
clip['tags'] = i.data.get('tags', [])
clip['editingtags'] = i.data.get('editingtags', [])
name = os.path.basename(clip['original'])
seqid = re.sub("Hotel Aporia_(\d+)", "S\\1_", name)
seqid = re.sub("Night March_(\d+)", "S\\1_", seqid)
seqid = re.sub("_(\d+)H_(\d+)", "_S\\1\\2_", seqid)
seqid = seqid.split('_')[:2]
seqid = [b[1:] if b[0] in ('B', 'S') else '0' for b in seqid]
seqid[1] = resolve_roman(seqid[1])
seqid[1] = ''.join([b for b in seqid[1] if b.isdigit()])
if not seqid[1]:
seqid[1] = '0'
try:
clip['seqid'] = int(''.join(['%06d' % int(b) for b in seqid]))
except:
print(name, seqid, 'failed')
raise
if "original" in clip and "foreground" in clip and "background" in clip:
clips.append(clip)
elif "original" in clip and "animation" in clip:
clips.append(clip)
else:
print("ignoring incomplete video", i)
with open(os.path.join(prefix, 'clips.json'), 'w') as fd:
json.dump(clips, fd, indent=2, ensure_ascii=False)
print("using", len(clips), "clips")
voice_over = defaultdict(dict)
for vo in item.models.Item.objects.filter(
data__type__contains="Voice Over",
):
fragment_id = int(vo.get('title').split('_')[0])
source = vo.files.filter(selected=True)[0]
batch = vo.get('batch')[0].replace('Text-', '')
src = source.data.path
target = os.path.join(prefix, 'voice_over', batch, '%s.wav' % fragment_id)
os.makedirs(os.path.dirname(target), exist_ok=True)
if os.path.islink(target):
os.unlink(target)
os.symlink(src, target)
subs = []
for sub in vo.annotations.filter(layer="subtitles").exclude(value="").order_by("start"):
sdata = get_srt(sub)
subs.append(sdata)
voice_over[fragment_id][batch] = {
"src": target,
"duration": source.duration,
"subs": subs
}
with open(os.path.join(prefix, 'voice_over.json'), 'w') as fd:
json.dump(voice_over, fd, indent=2, ensure_ascii=False)

View file

@ -1,109 +0,0 @@
import json
import os
import subprocess
import ox
from django.core.management.base import BaseCommand
from django.conf import settings
from item.models import Item
from annotation.models import Annotation
class Command(BaseCommand):
help = 'export all subtitles for translations'
def add_arguments(self, parser):
parser.add_argument('--lang', action='store', dest='lang', default=None, help='subtitle language')
parser.add_argument('--test', action='store_true', dest='test', default=False, help='test run')
parser.add_argument('args', metavar='args', type=str, nargs='*', help='file or url')
def handle(self, filename, **options):
if not options["lang"]:
print("--lang is required")
return
lang = options["lang"]
if filename.startswith("http"):
data = ox.net.read_url(filename).decode()
else:
with open(filename) as fd:
data = fd.read()
data = ('\n' + data.strip()).split('\n## ')[1:]
invalid = []
valid = []
for block in data:
title, block = block.split('\n', 1)
block = block.strip()
title = title.strip()
item_id = title.split(' ')[-1]
item = Item.objects.get(public_id=item_id)
subtitles_en = item.annotations.filter(layer="subtitles", languages=None).exclude(value='')
lines = block.split('\n\n')
if len(lines) != subtitles_en.count():
print('%s: number of subtitles does not match, en: %s vs %s: %s' % (title, subtitles_en.count(), lang, len(lines)))
if options["test"]:
print(json.dumps(lines, indent=2, ensure_ascii=False))
print(json.dumps([s.value for s in subtitles_en.order_by('start')], indent=2, ensure_ascii=False))
continue
if options["test"]:
print('%s: valid %s subtitles' % (title, len(lines)))
else:
n = 0
item.annotations.filter(layer="subtitles", languages=lang).delete()
for sub_en in subtitles_en.order_by('start'):
sub = Annotation()
sub.item = sub_en.item
sub.user = sub_en.user
sub.layer = sub_en.layer
sub.start = sub_en.start
sub.end = sub_en.end
sub.value = '<span lang="%s">%s</span>' % (lang, lines[n])
sub.save()
n += 1
'''
srt = 'vocals_txt/%s/%s' % (title[0], title.replace('.wav', '.srt'))
filename = 'vocals_txt/%s/%s' % (title[0], title.replace('.wav', '.' + lang + '.srt'))
folder = os.path.dirname(filename)
if not os.path.exists(folder):
os.makedirs(folder)
data = json.load(open(srt + '.json'))
subs = block.replace('\n\n', '\n').split('\n')
if len(data) != len(subs):
print('invalid', title, 'expected', len(data), 'got', len(subs))
invalid.append('## %s\n\n%s' % (title, block))
valid.append('## %s\n\n%s' % (title, '\n\n'.join([d['value'] for d in data])))
continue
for i, sub in enumerate(data):
sub['value'] = subs[i]
kodata = ox.srt.encode(data)
current = None
if os.path.exists(filename):
with open(filename, 'rb') as fd:
current = fd.read()
if current != kodata:
print('update', title, filename)
with open(filename, 'wb') as fd:
fd.write(kodata)
with open(filename + '.json', 'w') as fd:
ko = [{
'in': s['in'],
'out': s['out'],
'value': s['value'],
} for s in data]
json.dump(ko, fd, ensure_ascii=False, indent=4)
if invalid:
with open('invalid_%s_subtitles.txt' % lang, 'w') as fd:
fd.write('\n\n\n\n'.join(invalid))
with open('invalid_%s_subtitles_en.txt' % lang, 'w') as fd:
fd.write('\n\n\n\n'.join(valid))
'''

View file

@ -13,19 +13,7 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in')
parser.add_argument('--config', action='store', dest='config', default=None, help='config')
parser.add_argument('--duration', action='store', dest='duration', default="3600", help='target duration of all fragments in seconds')
parser.add_argument('--single-file', action='store_true', dest='single_file', default=False, help='render to single video')
parser.add_argument('--keep-audio', action='store_true', dest='keep_audio', default=False, help='keep independent audio tracks')
parser.add_argument('--stereo-downmix', action='store_true', dest='stereo_downmix', default=False, help='stereo downmix')
parser.add_argument('--debug', action='store_true', dest='debug', default=False, help='output more info')
def handle(self, **options):
if options.get("config"):
if os.path.exists(options["config"]):
with open(options["config"]) as fd:
config = json.load(fd)
options.update(config)
else:
print("unable to load config %s" % options["config"])
render_infinity(options)

View file

@ -16,10 +16,6 @@ class Command(BaseCommand):
parser.add_argument('--duration', action='store', dest='duration', default="3600", help='target duration of all fragments in seconds')
parser.add_argument('--offset', action='store', dest='offset', default="1024", help='inital offset in pi')
parser.add_argument('--no-video', action='store_true', dest='no_video', default=False, help='don\'t render video')
parser.add_argument('--single-file', action='store_true', dest='single_file', default=False, help='render to single video')
parser.add_argument('--keep-audio', action='store_true', dest='keep_audio', default=False, help='keep independent audio tracks')
parser.add_argument('--stereo-downmix', action='store_true', dest='stereo_downmix', default=False, help='stereo downmix')
parser.add_argument('--debug', action='store_true', dest='debug', default=False, help='output more info')
def handle(self, **options):
render_all(options)

View file

@ -13,6 +13,7 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in')
parser.add_argument('--duration', action='store', dest='duration', default="3600", help='target duration of all fragments in seconds')
parser.add_argument('--offset', action='store', dest='offset', default="1024", help='inital offset in pi')
def handle(self, **options):

View file

@ -7,7 +7,7 @@ Wants=network-online.target
Type=simple
Restart=on-failure
KillSignal=SIGINT
ExecStart=/srv/pandora/t_for_time/player/player.py --mode peer --playlist /srv/t_for_time/render/back.m3u --config /srv/t_for_time/render/back.json
ExecStart=/srv/pandora/t_for_time/player/player.py --mode peer --playlist /srv/t_for_time/render/back.m3u
[Install]
WantedBy=graphical-session.target

View file

@ -6,7 +6,7 @@ After=gnome-session.target network-online.target
Type=simple
Restart=on-failure
KillSignal=SIGINT
ExecStart=/srv/pandora/t_for_time/player/player.py --mode main --playlist /srv/t_for_time/render/front.m3u --config /srv/t_for_time/render/front.json
ExecStart=/srv/pandora/t_for_time/player/player.py --mode main --playlist /srv/t_for_time/render/front.m3u
[Install]
WantedBy=graphical-session.target

View file

@ -8,7 +8,6 @@ import time
from threading import Thread
from datetime import datetime
import ox
import mpv
@ -20,17 +19,10 @@ SYNC_GRACE_TIME = 5
SYNC_JUMP_AHEAD = 1
PORT = 9067
DEBUG = False
CONFIG = {
"font": "Menlo",
"font_size": 30,
"font_border": 4,
"sub_border_color": "0.0/0.0/0.0/0.75",
"sub_margin": 2 * 36 + 6,
"sub_spacing": 0,
"vf": None,
"sync_group": None,
}
FONT = 'Menlo'
FONT_SIZE = 30
FONT_BORDER = 4
SUB_MARGIN = 2 * 36 + 6
def hide_gnome_overview():
@ -52,7 +44,6 @@ class Main:
class Sync(Thread):
active = True
is_main = True
is_paused = False
ready = False
destination = "255.255.255.255"
reload_check = None
@ -62,52 +53,32 @@ class Sync(Thread):
def __init__(self, *args, **kwargs):
self.is_main = kwargs.get('mode', 'main') == 'main'
self.start_at_hour = kwargs.get("hour", False)
self.sock = self.init_socket()
self.main = Main()
if self.is_main:
self.socket_enable_broadcast()
if kwargs.get("sax"):
self.sax = mpv.MPV(
log_handler=mpv_log, input_default_bindings=True,
input_vo_keyboard=True,
)
self.sax.loop_file = True
self.sax.play("/srv/t_for_time/render/Saxophone-5.1.mp4")
else:
self.sax = None
if mpv.MPV_VERSION >= (2, 2):
self.mpv = mpv.MPV(
log_handler=mpv_log, input_default_bindings=True,
input_vo_keyboard=True,
sub_font_size=CONFIG["font_size"], sub_font=CONFIG["font"],
sub_border_size=CONFIG["font_border"],
sub_border_color=CONFIG["sub_border_color"],
sub_margin_y=CONFIG["sub_margin"],
sub_ass_line_spacing=CONFIG["sub_spacing"],
sub_font_size=FONT_SIZE, sub_font=FONT,
sub_border_size=FONT_BORDER,
sub_margin_y=SUB_MARGIN,
)
else:
self.mpv = mpv.MPV(
log_handler=mpv_log, input_default_bindings=True,
input_vo_keyboard=True,
sub_text_font_size=CONFIG["font_size"], sub_text_font=CONFIG["font"],
sub_border_size=CONFIG["font_border"],
sub_border_color=CONFIG["sub_border_color"],
sub_margin_y=CONFIG["sub_margin"],
sub_ass_line_spacing=CONFIG["sub_spacing"],
sub_text_font_size=FONT_SIZE, sub_text_font=FONT,
sub_border_size=FONT_BORDER,
sub_margin_y=SUB_MARGIN,
)
if CONFIG.get("vf"):
self.mpv.vf = CONFIG["vf"]
self.mpv.observe_property('time-pos', self.time_pos_cb)
self.mpv.fullscreen = kwargs.get('fullscreen', False)
self.mpv.loop_file = False
self.mpv.loop_playlist = True
self.mpv.register_key_binding('q', self.q_binding)
self.mpv.register_key_binding('s', self.s_binding)
self.mpv.register_key_binding('p', self.p_binding)
self.mpv.register_key_binding('SPACE', self.space_binding)
self.playlist = kwargs['playlist']
self.playlist_mtime = os.stat(self.playlist).st_mtime
self.mpv.loadlist(self.playlist)
@ -119,31 +90,6 @@ class Sync(Thread):
time.sleep(0.1)
self.mpv.pause = True
self.sync_to_main()
elif self.start_at_hour:
self.mpv.pause = True
fmt = '%Y-%m-%d %H'
now = datetime.now()
offset = (now - datetime.strptime(now.strftime(fmt), fmt)).total_seconds()
if self.sax:
self.sax.wait_until_playing()
self.sax.seek(offset, 'absolute', 'exact')
self.sax.pause = True
position = 0
for idx, item in enumerate(self.mpv.playlist):
duration = ox.avinfo(item['filename'])['duration']
if position + duration > offset:
pos = offset - position
self.mpv.playlist_play_index(idx)
self.mpv.pause = False
self.mpv.wait_until_playing()
self.mpv.seek(pos, 'absolute', 'exact')
time.sleep(0.1)
break
else:
position += duration
if self.sax:
self.sax.pause = False
self.ready = True
Thread.__init__(self)
self.start()
@ -160,45 +106,16 @@ class Sync(Thread):
else:
self.read_position_main()
self.reload_playlist()
if not self.is_paused and self._tick and abs(time.time() - self._tick) > 60:
if self._tick and abs(time.time() - self._tick) > 60:
logger.error("player is stuck")
self._tick = 0
self.stop()
self.mpv.stop()
def q_binding(self, *args):
if args[0] != 'd-':
return
self.stop()
self.mpv.stop()
def space_binding(self, *args):
if args[0] != 'd-':
return
if self.mpv.pause:
self.p_binding(*args)
else:
self.s_binding(*args)
def s_binding(self, *args):
if args[0] != 'd-':
return
self.is_paused = True
self.mpv.pause = True
if self.sax:
self.sax.pause = True
self.send_playback_state()
def p_binding(self, *args):
if args[0] != 'd-':
return
self.is_paused = False
self._tick = 0
self.mpv.pause = False
if self.sax:
self.sax.pause = False
self.send_playback_state()
def stop(self, *args):
self.active = False
if self.sock:
@ -278,8 +195,6 @@ class Sync(Thread):
"%0.4f %s"
% (self.mpv.time_pos, self.mpv.playlist_current_pos)
).encode()
if CONFIG.get("sync_group"):
msg = ("%s " % CONFIG["sync_group"]).encode() + msg
except:
return
try:
@ -287,47 +202,18 @@ class Sync(Thread):
except socket.error as e:
logger.error("send failed: %s", e)
def send_playback_state(self):
state = 'pause' if self.mpv.pause else 'play'
msg = ("%s -1" % state).encode()
try:
self.sock.send(msg)
except socket.error as e:
logger.error("send failed: %s", e)
#
# follower specific
#
_last_ping = None
def read_position_main(self):
self.sock.settimeout(5)
while True:
try:
data = self.sock.recvfrom(1024)[0].decode().split(" ", 1)
except socket.timeout:
if self._last_ping != "pause":
logger.error("failed to receive data from main")
return
except OSError:
logger.error("socket closed")
return
if CONFIG.get("sync_group"):
if data[0] == str(CONFIG["sync_group"]):
data = data[1].split(" ", 1)
break
else:
break
self._last_ping = data[0]
if data[0] == "pause":
self.is_paused = True
self.mpv.pause = True
elif data[0] == "play":
self.is_paused = False
self._tick = 0
self.mpv.pause = False
try:
data = self.sock.recvfrom(1024)[0].decode().split(" ", 1)
except socket.timeout:
logger.error("failed to receive data from main")
except OSError:
logger.error("socket closed")
else:
self.main.time_pos = float(data[0])
self.main.playlist_current_pos = int(data[1])
@ -416,26 +302,16 @@ def main():
parser.add_argument('--prefix', help='video location', default=prefix)
parser.add_argument('--window', action='store_true', help='run in window', default=False)
parser.add_argument('--debug', action='store_true', help='debug', default=False)
parser.add_argument('--hour', action='store_true', help='hour', default=False)
parser.add_argument('--sax', action='store_true', help='hour', default=False)
parser.add_argument('--config', help='config', default=None)
args = parser.parse_args()
DEBUG = args.debug
if DEBUG:
log_format = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_format)
if args.config:
if os.path.exists(args.config):
with open(args.config) as fd:
CONFIG.update(json.load(fd))
else:
logger.error("config file %s does not exist, skipping", args.config)
base = os.path.dirname(os.path.abspath(__file__))
#os.chdir(base)
player = Sync(mode=args.mode, playlist=args.playlist, fullscreen=not args.window, hour=args.hour, sax=args.sax)
player = Sync(mode=args.mode, playlist=args.playlist, fullscreen=not args.window)
while player.active:
try:
player.mpv.wait_for_playback()

678
render.py
View file

@ -11,10 +11,8 @@ import time
from pathlib import Path
import ox
import lxml.etree
from .pi import random
from .render_kdenlive import KDEnliveProject, _CACHE, melt_xml, get_melt
from .render_kdenlive import KDEnliveProject, _CACHE
def random_int(seq, length):
@ -61,18 +59,13 @@ def write_if_new(path, data, mode=''):
old = ""
is_new = data != old
if path.endswith(".kdenlive"):
is_new = re.sub(r'\{.{36}\}', '', data) != re.sub(r'\{.{36}\}', '', old)
is_new = re.sub('\{.{36}\}', '', data) != re.sub('\{.{36}\}', '', old)
if is_new:
with open(path, write_mode) as fd:
fd.write(data)
def format_duration(duration, fps):
return float('%0.5f' % (round(duration * fps) / fps))
def compose(clips, target=150, base=1024, voice_over=None, options=None):
if options is None:
options = {}
fps = 24
def compose(clips, target=150, base=1024, voice_over=None):
length = 0
scene = {
'front': {
@ -107,7 +100,6 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
used = []
voice_overs = []
sub_offset = 0
if voice_over:
vo_keys = list(sorted(voice_over))
if chance(seq, 0.5):
@ -126,7 +118,7 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
if vo_min > target:
target = vo_min
elif vo_min < target:
offset = format_duration((target - vo_min) / 2, fps)
offset = (target - vo_min) / 2
scene['audio-center']['A1'].append({
'blank': True,
'duration': offset
@ -140,29 +132,17 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
subs = []
for vo in voice_overs:
voc = vo.copy()
a, b = '-11', '-3'
a, b = '3', '-6'
if 'Whispered' in voc['src']:
a, b = '-8', '0'
a, b = '6', '-3'
elif 'Read' in voc['src']:
a, b = '-7.75', '0.25'
a, b = '6.25', '-2.75'
elif 'Free' in voc['src']:
a, b = '-8.8', '-0.8'
a, b = '5.2', '-3.8'
elif 'Ashley' in voc['src']:
a, b = '-9.5', '-1.50'
a, b = '3.75', '-5.25'
elif 'Melody' in voc['src']:
a, b = '-5.25', '-0.25'
if options.get('stereo_downmix'):
a, b = '-9', '-1'
if 'Whispered' in voc['src']:
a, b = '-6', '2'
elif 'Read' in voc['src']:
a, b = '-5.75', '2.25'
elif 'Free' in voc['src']:
a, b = '-6.8', '3.2'
elif 'Ashley' in voc['src']:
a, b = '-7.5', '0.50'
elif 'Melody' in voc['src']:
a, b = '-3.25', '1.75'
a, b = '4.25', '-4.75'
voc['filter'] = {'volume': a}
scene['audio-center']['A1'].append(voc)
vo_low = vo.copy()
@ -206,7 +186,7 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
if length + clip['duration'] > target and length >= vo_min:
break
print('%06.3f %06.3f' % (length, clip['duration']), os.path.basename(clip['original']))
length += int(clip['duration'] * fps) / fps
length += clip['duration']
if "foreground" not in clip and "animation" in clip:
fg = clip['animation']
@ -299,16 +279,13 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
blur = seq() * 3
if blur:
scene['back']['V1'][-1]['filter']['blur'] = blur
volume_back = '-8.2'
if options.get('stereo_downmix'):
volume_back = '-7.2'
scene['audio-back']['A1'].append({
'duration': clip['duration'],
'src': clip['original'],
'filter': {'volume': volume_back},
'filter': {'volume': '+0.2'},
})
# TBD: Foley
cf_volume = '-2.5'
cf_volume = '-5.5'
scene['audio-front']['A2'].append({
'duration': clip['duration'],
'src': foley,
@ -321,83 +298,26 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
})
used.append(clip)
print("scene duration %0.3f (target: %0.3f, vo_min: %0.3f)" % (length, target, vo_min))
scene_duration = int(get_scene_duration(scene) * fps)
sub_offset = int(sub_offset * fps)
if sub_offset < scene_duration:
delta = format_duration((scene_duration - sub_offset) / fps, fps)
print(">> add %0.3f of silence.. %0.3f (scene_duration)" % (delta, scene_duration / fps))
scene['audio-center']['A1'].append({
'blank': True,
'duration': delta
})
scene['audio-rear']['A1'].append({
'blank': True,
'duration': delta
})
elif sub_offset > scene_duration:
delta = format_duration((scene_duration - sub_offset) / fps, fps)
scene['audio-center']['A1'][-1]["duration"] += delta
scene['audio-rear']['A1'][-1]["duration"] += delta
print("WTF, needed to cut %s new duration: %s" % (delta, scene['audio-center']['A1'][-1]["duration"]))
print(scene['audio-center']['A1'][-1])
return scene, used
def get_track_duration(scene, k, n):
duration = 0
for key, value in scene.items():
if key == k:
for name, clips in value.items():
if name == n:
for clip in clips:
duration += int(clip['duration'] * 24)
return duration / 24
def get_scene_duration(scene):
if isinstance(scene, str):
with open(scene) as fd:
scene = json.load(fd)
duration = 0
for key, value in scene.items():
for name, clips in value.items():
for clip in clips:
duration += int(clip['duration'] * 24)
return duration / 24
duration += clip['duration']
return duration
def get_offset_duration(prefix):
duration = 0
for root, folders, files in os.walk(prefix):
for f in files:
if f == 'scene.json':
duration += get_scene_duration(scene)
return duration
def write_subtitles(data, folder, options):
data = fix_overlaps(data)
path = folder / "front.srt"
if options.get("subtitle_format") == "srt":
srt = ox.srt.encode(data)
write_if_new(str(path), srt, 'b')
path = folder / "front.ass"
if os.path.exists(path):
os.unlink(path)
else:
if os.path.exists(path):
os.unlink(path)
path = folder / "front.ass"
ass = ass_encode(data, options)
write_if_new(str(path), ass, '')
def render(root, scene, prefix='', options=None):
if options is None:
options = {}
def render(root, scene, prefix=''):
fps = 24
files = []
scene_duration = int(get_scene_duration(scene) * fps)
scene_duration = int(get_scene_duration(scene) * 24)
for timeline, data in scene.items():
if timeline == "subtitles":
folder = Path(root) / prefix
write_subtitles(data, folder, options)
path = os.path.join(root, prefix + "front.srt")
data = fix_overlaps(data)
srt = ox.srt.encode(data)
write_if_new(path, srt, 'b')
continue
#print(timeline)
project = KDEnliveProject(root)
@ -408,43 +328,21 @@ def render(root, scene, prefix='', options=None):
#print(track)
for clip in clips:
project.append_clip(track, clip)
track_durations[track] = sum([int(c['duration'] * fps) for c in clips])
track_durations[track] = int(sum([c['duration'] for c in clips]) * 24)
if timeline.startswith('audio-'):
track_duration = project.get_duration()
delta = scene_duration - track_duration
if delta > 0:
for track in track_durations:
if track_durations[track] == track_duration:
project.append_clip(track, {'blank': True, "duration": delta/fps})
project.append_clip(track, {'blank': True, "duration": delta/24})
break
path = os.path.join(root, prefix + "%s.kdenlive" % timeline)
project_xml = project.to_xml()
write_if_new(path, project_xml)
if options["debug"]:
# check duration
out_duration = get_project_duration(path)
p_duration = project.get_duration()
print(path, 'out: %s, project: %s, scene: %s' %(out_duration, p_duration, scene_duration))
if p_duration != scene_duration:
print(path, 'FAIL project: %s, scene: %s' %(p_duration, scene_duration))
_cache = os.path.join(root, "cache.json")
with open(_cache, "w") as fd:
json.dump(_CACHE, fd)
sys.exit(1)
if out_duration != p_duration:
print(path, 'fail got: %s expected: %s' %(out_duration, p_duration))
sys.exit(1)
files.append(path)
return files
def get_project_duration(file):
out = melt_xml(file)
chain = lxml.etree.fromstring(out).xpath('producer')[0]
duration = int(chain.attrib['out']) + 1
return duration
def get_fragments(clips, voice_over, prefix):
import itemlist.models
import item.models
@ -485,32 +383,15 @@ def get_fragments(clips, voice_over, prefix):
fragment['clips'] = []
for clip in clips:
#if set(clip['tags']) & set(fragment['tags']) and not set(clip['tags']) & set(fragment['anti-tags']):
key = 'original'
original = clip['original']
if 'original_censored' in clip:
original = clip['original_censored']
if original in originals:
if clip['original'] in originals:
fragment['clips'].append(clip)
fragment["voice_over"] = voice_over.get(str(fragment["id"]), {})
fragments.append(fragment)
fragments.sort(key=lambda f: ox.sort_string(f['name']))
return fragments
def parse_lang(lang):
if lang and "," in lang:
lang = lang.split(',')
if isinstance(lang, list):
tlang = lang[1:]
lang = lang[0]
else:
tlang = None
if lang == "en":
lang = None
return lang, tlang
def render_all(options):
options = load_defaults(options)
prefix = options['prefix']
duration = int(options['duration'])
base = int(options['offset'])
@ -548,13 +429,7 @@ def render_all(options):
fragment_clips = fragment['clips']
unused_fragment_clips = [c for c in fragment_clips if c not in clips_used]
print('fragment clips', len(fragment_clips), 'unused', len(unused_fragment_clips))
scene, used = compose(
unused_fragment_clips,
target=target,
base=fragment_base,
voice_over=fragment['voice_over'],
options=options
)
scene, used = compose(unused_fragment_clips, target=target, base=fragment_base, voice_over=fragment['voice_over'])
clips_used += used
scene_duration = get_scene_duration(scene)
print("%s %6.3f -> %6.3f (%6.3f)" % (name, target, scene_duration, fragment_target))
@ -569,19 +444,20 @@ def render_all(options):
elif position < target_position:
target = target + 0.1 * fragment_target
timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/', options)
timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/')
scene_json = json.dumps(scene, indent=2, ensure_ascii=False)
write_if_new(os.path.join(fragment_prefix, 'scene.json'), scene_json)
if not options['no_video'] and not options["single_file"]:
if not options['no_video']:
for timeline in timelines:
print(timeline)
ext = '.mp4'
if '/audio' in timeline:
ext = '.wav'
cmd = get_melt() + [
timeline,
cmd = [
'xvfb-run', '-a',
'melt', timeline,
'-quiet',
'-consumer', 'avformat:%s' % timeline.replace('.kdenlive', ext),
]
@ -603,8 +479,8 @@ def render_all(options):
subprocess.call(cmd)
os.unlink(timeline.replace('.kdenlive', ext))
cmds = []
fragment_prefix = Path(fragment_prefix)
cmds = []
for src, out1, out2 in (
("audio-front.wav", "fl.wav", "fr.wav"),
("audio-center.wav", "fc.wav", "lfe.wav"),
@ -631,51 +507,29 @@ def render_all(options):
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]", "-c:a", "aac", fragment_prefix / "audio-5.1.mp4"
])
audio_front = "audio-5.1.mp4"
audio_back = "audio-back.wav"
copy = '-c'
if options["stereo_downmix"]:
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "audio-front.wav",
"-i", fragment_prefix / "audio-center.wav",
"-i", fragment_prefix / "audio-rear.wav",
"-i", fragment_prefix / audio_back,
"-filter_complex",
"amix=inputs=4:duration=longest:dropout_transition=0",
'-ac', '2', fragment_prefix / "audio-stereo.wav"
])
audio_front = "audio-stereo.wav"
audio_back = "audio-stereo.wav"
copy = '-c:v'
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "front.mp4",
"-i", fragment_prefix / audio_front,
copy, "copy",
"-movflags", "+faststart",
fragment_prefix / "front-mixed.mp4",
"-i", fragment_prefix / "audio-5.1.mp4",
"-c", "copy",
fragment_prefix / "front-5.1.mp4",
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "back.mp4",
"-i", fragment_prefix / audio_back,
"-i", fragment_prefix / "audio-back.wav",
"-c:v", "copy",
"-movflags", "+faststart",
fragment_prefix / "back-audio.mp4",
])
for cmd in cmds:
if options["debug"]:
print(" ".join([str(x) for x in cmd]))
#print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd)
for a, b in (
("back-audio.mp4", "back.mp4"),
("front-mixed.mp4", "front.mp4"),
("front-5.1.mp4", "back.mp4"),
):
duration_a = ox.avinfo(str(fragment_prefix / a))['duration']
duration_b = ox.avinfo(str(fragment_prefix / b))['duration']
@ -684,178 +538,26 @@ def render_all(options):
print('!!', duration_b, fragment_prefix / b)
sys.exit(-1)
shutil.move(fragment_prefix / "back-audio.mp4", fragment_prefix / "back.mp4")
shutil.move(fragment_prefix / "front-mixed.mp4", fragment_prefix / "front.mp4")
if options["keep_audio"]:
shutil.move(fragment_prefix / "audio-center.wav", fragment_prefix / "vocals.wav")
shutil.move(fragment_prefix / "audio-front.wav", fragment_prefix / "foley.wav")
shutil.move(fragment_prefix / "audio-back.wav", fragment_prefix / "original.wav")
shutil.move(fragment_prefix / "front-5.1.mp4", fragment_prefix / "front.mp4")
for fn in (
"audio-5.1.mp4",
"audio-center.wav", "audio-rear.wav",
"audio-center.wav", "audio-rear.wav", "audio-center.wav",
"audio-front.wav", "audio-back.wav", "back-audio.mp4",
"fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
"audio-stereo.wav",
):
fn = fragment_prefix / fn
if os.path.exists(fn):
os.unlink(fn)
if options["single_file"]:
cmds = []
base_prefix = Path(base_prefix)
for timeline in (
"front",
"back",
"audio-back",
"audio-center",
"audio-front",
"audio-rear",
):
timelines = list(sorted(glob('%s/*/%s.kdenlive' % (base_prefix, timeline))))
ext = '.mp4'
if '/audio' in timelines[0]:
ext = '.wav'
out = base_prefix / (timeline + ext)
cmd = get_melt() + timelines + [
'-quiet',
'-consumer', 'avformat:%s' % out,
]
if ext == '.wav':
cmd += ['vn=1']
else:
cmd += ['an=1']
cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
cmds.append(cmd)
for src, out1, out2 in (
("audio-front.wav", "fl.wav", "fr.wav"),
("audio-center.wav", "fc.wav", "lfe.wav"),
("audio-rear.wav", "bl.wav", "br.wav"),
):
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / src,
"-filter_complex",
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
"-map", "[left]", base_prefix / out1,
"-map", "[right]", base_prefix / out2,
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / "fl.wav",
"-i", base_prefix / "fr.wav",
"-i", base_prefix / "fc.wav",
"-i", base_prefix / "lfe.wav",
"-i", base_prefix / "bl.wav",
"-i", base_prefix / "br.wav",
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]", "-c:a", "aac", base_prefix / "audio-5.1.mp4"
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / "front.mp4",
"-i", base_prefix / "audio-5.1.mp4",
"-c", "copy",
"-movflags", "+faststart",
base_prefix / "front-mixed.mp4",
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / "back.mp4",
"-i", base_prefix / "audio-back.wav",
"-c:v", "copy",
"-movflags", "+faststart",
base_prefix / "back-audio.mp4",
])
for cmd in cmds:
if options["debug"]:
print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd)
for a, b in (
("back-audio.mp4", "back.mp4"),
("front-mixed.mp4", "back.mp4"),
):
duration_a = ox.avinfo(str(base_prefix / a))['duration']
duration_b = ox.avinfo(str(base_prefix / b))['duration']
if duration_a != duration_b:
print('!!', duration_a, base_prefix / a)
print('!!', duration_b, base_prefix / b)
sys.exit(-1)
shutil.move(base_prefix / "back-audio.mp4", base_prefix / "back.mp4")
shutil.move(base_prefix / "front-mixed.mp4", base_prefix / "front.mp4")
if options["keep_audio"]:
shutil.move(base_prefix / "audio-center.wav", base_prefix / "vocals.wav")
shutil.move(base_prefix / "audio-front.wav", base_prefix / "foley.wav")
shutil.move(base_prefix / "audio-back.wav", base_prefix / "original.wav")
for fn in (
"audio-5.1.mp4",
"audio-center.wav", "audio-rear.wav",
"audio-front.wav", "audio-back.wav", "back-audio.mp4",
"fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
):
fn = base_prefix / fn
if os.path.exists(fn):
os.unlink(fn)
join_subtitles(base_prefix, options)
print("Duration - Target: %s Actual: %s" % (target_position, position))
print(json.dumps(dict(stats), sort_keys=True, indent=2))
with open(_cache, "w") as fd:
json.dump(_CACHE, fd)
def add_translations(sub, lang):
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if sub.languages:
value = ox.strip_tags(value)
if lang:
for slang in lang:
if slang == "en":
slang = None
for tsub in sub.item.annotations.filter(layer="subtitles", start=sub.start, end=sub.end, languages=slang):
tvalue = tsub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if tsub.languages:
tvalue = ox.strip_tags(tvalue)
value += '\n' + tvalue
return value
def add_translations_dict(sub, langs):
values = {}
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if sub.languages:
value = ox.strip_tags(value)
values[sub.languages] = value
else:
values["en"] = value
for slang in langs:
slang_value = None if slang == "en" else slang
if sub.languages == slang_value:
continue
for tsub in sub.item.annotations.filter(
layer="subtitles", start=sub.start, end=sub.end,
languages=slang_value
):
tvalue = tsub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if tsub.languages:
tvalue = ox.strip_tags(tvalue)
values[slang] = tvalue
return values
def get_srt(sub, offset, lang, tlang):
def get_srt(sub, offset=0):
sdata = sub.json(keys=['in', 'out', 'value'])
sdata['value'] = sdata['value'].replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if tlang:
sdata['value'] = add_translations(sub, tlang)
langs = [lang]
if tlang:
langs += tlang
sdata['values'] = add_translations_dict(sub, langs)
sdata['value'] = sdata['value'].replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n')
if offset:
sdata["in"] += offset
sdata["out"] += offset
@ -872,56 +574,12 @@ def fix_overlaps(data):
previous = sub
return data
def shift_clips(data, offset):
for clip in data:
clip['in'] += offset
clip['out'] += offset
return data
def scene_subtitles(scene, options):
import item.models
offset = 0
subs = []
lang, tlang = parse_lang(options["lang"])
for clip in scene['audio-center']['A1']:
if not clip.get("blank"):
batch, fragment_id = clip['src'].replace('.wav', '').split('/')[-2:]
vo = item.models.Item.objects.filter(
data__batch__icontains=batch, data__title__startswith=fragment_id + '_'
).first()
if vo:
#print("%s => %s %s" % (clip['src'], vo, vo.get('batch')))
for sub in vo.annotations.filter(
layer="subtitles"
).filter(
languages=None if lang == "en" else lang
).exclude(value="").order_by("start"):
sdata = get_srt(sub, offset, lang, tlang)
subs.append(sdata)
else:
print("could not find vo for %s" % clip['src'])
offset += clip['duration']
return subs
def load_defaults(options):
path = os.path.join(options["prefix"], "options.json")
if os.path.exists(path):
with open(path) as fd:
defaults = json.load(fd)
for key in defaults:
if key not in options:
options[key] = defaults[key]
return options
def update_subtitles(options):
import item.models
options = load_defaults(options)
prefix = Path(options['prefix'])
duration = int(options['duration'])
base = int(options['offset'])
lang, tlang = parse_lang(options["lang"])
_cache = os.path.join(prefix, "cache.json")
if os.path.exists(_cache):
@ -931,64 +589,27 @@ def update_subtitles(options):
base_prefix = prefix / 'render' / str(base)
for folder in os.listdir(base_prefix):
folder = base_prefix / folder
scene_json = folder / "scene.json"
if not os.path.exists(scene_json):
continue
with open(scene_json) as fd:
with open(folder / "scene.json") as fd:
scene = json.load(fd)
subs = scene_subtitles(scene, options)
write_subtitles(subs, folder, options)
offset = 0
subs = []
for clip in scene['audio-center']['A1']:
if not clip.get("blank"):
batch, fragment_id = clip['src'].replace('.wav', '').split('/')[-2:]
vo = item.models.Item.objects.filter(data__batch__icontains=batch, data__title__startswith=fragment_id + '_').first()
if vo:
#print("%s => %s %s" % (clip['src'], vo, vo.get('batch')))
for sub in vo.annotations.filter(layer="subtitles").exclude(value="").order_by("start"):
sdata = get_srt(sub, offset)
subs.append(sdata)
else:
print("could not find vo for %s" % clip['src'])
offset += clip['duration']
path = folder / "front.srt"
data = fix_overlaps(subs)
srt = ox.srt.encode(subs)
write_if_new(str(path), srt, 'b')
def ass_encode(subs, options):
if "lang" in options:
langs = options["lang"].split(',')
else:
langs = list(subs[0]["values"])
#print('ass_encode', langs, options)
#print(subs)
header = '''[Script Info]
ScriptType: v4.00+
PlayResX: 1920
PlayResY: 1080
ScaledBorderAndShadow: yes
YCbCr Matrix: None
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
'''
ass = header
offset = options.get("sub_margin", 10)
spacing = options.get("sub_spacing", 20)
height = 42
styles = []
for lang in reversed(langs):
if isinstance(options.get("font"), list) and lang in options["font"]:
font = options["font"][lang]
else:
font = 'SimHei' if lang in ('zh', 'jp') else 'Menlo'
if isinstance(options.get("font_size"), list) and lang in options["font_size"]:
size = options["font_size"][lang]
else:
size = 46 if font == 'SimHei' else 42
styles.append(
f'Style: {lang},{font},{size},&Hffffff,&Hffffff,&H0,&H0,0,0,0,0,100,100,0,0,1,1,0,2,10,10,{offset},1'
)
offset += size + spacing
ass += '\n'.join(reversed(styles)) + '\n'
events = [
'Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text'
]
for sub in subs:
start = ox.format_timecode(sub['in']).rstrip('0')
stop = ox.format_timecode(sub['out']).rstrip('0')
for lang in reversed(langs):
value = sub['values'][lang]
event = f'Dialogue: 0,{start},{stop},{lang},,0,0,0,,{value}'
events.append(event)
ass += '\n\n[Events]\n' + '\n'.join(events) + '\n'
return ass
def update_m3u(render_prefix, exclude=[]):
files = ox.sorted_strings(glob(render_prefix + "*/*/back.mp4"))
@ -1015,24 +636,19 @@ def render_infinity(options):
prefix = options['prefix']
duration = int(options['duration'])
defaults = {
"offset": 100,
"max-items": 30,
"no_video": False,
}
state_f = os.path.join(prefix, "infinity.json")
if os.path.exists(state_f):
with open(state_f) as fd:
state = json.load(fd)
else:
state = {}
for key in ("prefix", "duration", "debug", "single_file", "keep_audio", "stereo_downmix"):
state = {
"offset": 100,
"max-items": 30,
"no_video": False,
}
for key in ("prefix", "duration"):
state[key] = options[key]
for key in defaults:
if key not in state:
state[key] = defaults[key]
while True:
render_prefix = state["prefix"] + "/render/"
current = [
@ -1040,8 +656,8 @@ def render_infinity(options):
if f.isdigit() and os.path.isdir(render_prefix + f) and state["offset"] > int(f) >= 100
]
if len(current) > state["max-items"]:
current = ox.sorted_strings(current)
remove = current[:-state["max-items"]]
current = list(reversed(ox.sorted_strings(current)))
remove = list(reversed(current[-state["max-items"]:]))
update_m3u(render_prefix, exclude=remove)
for folder in remove:
folder = render_prefix + folder
@ -1059,157 +675,3 @@ def render_infinity(options):
with open(state_f + "~", "w") as fd:
json.dump(state, fd, indent=2)
shutil.move(state_f + "~", state_f)
def join_subtitles(base_prefix, options):
'''
subtitles = list(sorted(glob('%s/*/front.srt' % base_prefix)))
data = []
position = 0
for srt in subtitles:
scene = srt.replace('front.srt', 'scene.json')
data += ox.srt.load(srt, offset=position)
position += get_scene_duration(scene)
with open(base_prefix / 'front.srt', 'wb') as fd:
fd.write(ox.srt.encode(data))
'''
scenes = list(sorted(glob('%s/*/scene.json' % base_prefix)))
data = []
position = 0
for scene_json in scenes:
with open(scene_json) as fd:
scene = json.load(fd)
subs = scene_subtitles(scene, options)
data += shift_clips(subs, position)
position += get_scene_duration(scene)
write_subtitles(data, base_prefix, options)
def resolve_roman(s):
extra = re.compile(r'^\d+(.*?)$').findall(s)
if extra:
extra = extra[0].lower()
new = {
'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5',
'vi': '6', 'vii': 7, 'viii': '8', 'ix': '9', 'x': '10'
}.get(extra, extra)
return s.replace(extra, new)
return s
def generate_clips(options):
import item.models
import itemlist.models
options = load_defaults(options)
prefix = options['prefix']
lang, tlang = parse_lang(options["lang"])
if options['censored']:
censored_list = itemlist.models.List.get(options["censored"])
censored = list(censored_list.get_items(
censored_list.user
).all().values_list('public_id', flat=True))
clips = []
for i in item.models.Item.objects.filter(sort__type='original'):
original_target = ""
qs = item.models.Item.objects.filter(data__title=i.data['title']).exclude(id=i.id)
if qs.count() >= 1:
clip = {}
durations = []
for e in item.models.Item.objects.filter(data__title=i.data['title']):
if 'type' not in e.data:
print("ignoring invalid video %s (no type)" % e)
continue
if not e.files.filter(selected=True).exists():
continue
source = e.files.filter(selected=True)[0].data.path
ext = os.path.splitext(source)[1]
type_ = e.data['type'][0].lower()
target = os.path.join(prefix, type_, i.data['title'] + ext)
os.makedirs(os.path.dirname(target), exist_ok=True)
if os.path.islink(target):
os.unlink(target)
os.symlink(source, target)
if type_ == "original":
original_target = target
if options['censored'] and e.public_id in censored:
clip[type_ + "_censored"] = target
target = '/srv/t_for_time/censored.mp4'
clip[type_] = target
durations.append(e.files.filter(selected=True)[0].duration)
clip["duration"] = min(durations)
if not clip["duration"]:
print('!!', durations, clip)
continue
cd = format_duration(clip["duration"], 24)
#if cd != clip["duration"]:
# print(clip["duration"], '->', cd, durations, clip)
clip["duration"] = cd
clip['tags'] = i.data.get('tags', [])
clip['editingtags'] = i.data.get('editingtags', [])
name = os.path.basename(original_target)
seqid = re.sub(r"Hotel Aporia_(\d+)", "S\\1_", name)
seqid = re.sub(r"Night March_(\d+)", "S\\1_", seqid)
seqid = re.sub(r"_(\d+)H_(\d+)", "_S\\1\\2_", seqid)
seqid = seqid.split('_')[:2]
seqid = [b[1:] if b[0] in ('B', 'S') else '0' for b in seqid]
seqid[1] = resolve_roman(seqid[1])
seqid[1] = ''.join([b for b in seqid[1] if b.isdigit()])
if not seqid[1]:
seqid[1] = '0'
try:
clip['seqid'] = int(''.join(['%06d' % int(b) for b in seqid]))
except:
print(name, seqid, 'failed')
raise
if "original" in clip and "foreground" in clip and "background" in clip:
clips.append(clip)
elif "original" in clip and "animation" in clip:
clips.append(clip)
else:
print("ignoring incomplete video", i)
with open(os.path.join(prefix, 'clips.json'), 'w') as fd:
json.dump(clips, fd, indent=2, ensure_ascii=False)
print("using", len(clips), "clips")
voice_over = defaultdict(dict)
for vo in item.models.Item.objects.filter(
data__type__contains="Voice Over",
):
fragment_id = int(vo.get('title').split('_')[0])
source = vo.files.filter(selected=True)[0]
batch = vo.get('batch')[0].replace('Text-', '')
src = source.data.path
target = os.path.join(prefix, 'voice_over', batch, '%s.wav' % fragment_id)
os.makedirs(os.path.dirname(target), exist_ok=True)
if os.path.islink(target):
os.unlink(target)
os.symlink(src, target)
subs = []
for sub in vo.annotations.filter(
layer="subtitles", languages=lang
).exclude(value="").order_by("start"):
sdata = get_srt(sub, 0, lang, tlang)
subs.append(sdata)
voice_over[fragment_id][batch] = {
"src": target,
"duration": format_duration(source.duration, 24),
"subs": subs
}
with open(os.path.join(prefix, 'voice_over.json'), 'w') as fd:
json.dump(voice_over, fd, indent=2, ensure_ascii=False)
if options['censored']:
censored_mp4 = '/srv/t_for_time/censored.mp4'
if not os.path.exists(censored_mp4):
cmd = [
"ffmpeg",
"-nostats", "-loglevel", "error",
"-f", "lavfi",
"-i", "color=color=white:size=1920x1080:rate=24",
"-t", "3600",
"-c:v", "libx264",
"-pix_fmt", "yuv420p",
censored_mp4
]
subprocess.call(cmd)

View file

@ -4,7 +4,6 @@ import subprocess
import lxml.etree
import uuid
import os
import sys
_CACHE = {}
_IDS = defaultdict(int)
@ -13,14 +12,6 @@ def get_propery(element, name):
return element.xpath('property[@name="%s"]' % name)[0].text
def get_melt():
cmd = ['melt']
if 'XDG_RUNTIME_DIR' not in os.environ:
os.environ['XDG_RUNTIME_DIR'] = '/tmp/runtime-pandora'
if 'DISPLAY' not in os.environ:
cmd = ['xvfb-run', '-a'] + cmd
return cmd
def melt_xml(file):
out = None
real_path = os.path.realpath(file)
@ -29,8 +20,7 @@ def melt_xml(file):
if os.stat(real_path).st_mtime != ts:
out = None
if not out:
cmd = get_melt() + [file, '-consumer', 'xml']
out = subprocess.check_output(cmd).decode()
out = subprocess.check_output(['melt', file, '-consumer', 'xml']).decode()
_CACHE[file] = [os.stat(real_path).st_mtime, out]
return out
@ -564,6 +554,7 @@ class KDEnliveProject:
] + value)
]
def properties(self, *props):
return [
self.get_element("property", attrib={"name": name}, text=str(value) if value is not None else value)

6
sax.py
View file

@ -31,7 +31,7 @@ reverb = {
"src": reverb_wav,
"duration": 3600.0,
"filter": {
"volume": "3.5"
"volume": "0.5"
},
}
@ -39,14 +39,14 @@ long = {
"src": long_wav,
"duration": 3600.0,
"filter": {
"volume": "-1"
"volume": "-4"
},
}
noise = {
"src": nois_wav,
"duration": 3600.0,
"filter": {
"volume": "7.75"
"volume": "4.75"
},
}

BIN
title.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.1 KiB