pandora_p_for_power/render.py

1138 lines
42 KiB
Python
Raw Normal View History

2023-10-08 12:19:05 +01:00
#!/usr/bin/python3
2023-11-12 18:30:06 +01:00
from collections import defaultdict
2024-01-22 15:06:40 +01:00
from glob import glob
2023-10-08 12:19:05 +01:00
import json
import os
2023-11-16 09:08:03 +01:00
import re
import shutil
2023-10-08 12:19:05 +01:00
import subprocess
import sys
import time
from pathlib import Path
2023-10-08 12:19:05 +01:00
import ox
2023-10-09 14:10:34 +01:00
from .pi import random
2026-01-06 15:04:26 +01:00
from .render_kdenlive import KDEnliveProject, _CACHE, get_melt
from .utils import resolve_roman, write_if_new, format_duration
from .render_utils import *
2023-10-08 12:19:05 +01:00
2025-11-27 18:09:06 +01:00
default_prefix = "/srv/p_for_power"
2023-10-08 12:19:05 +01:00
2023-11-16 09:08:03 +01:00
2026-01-24 13:26:30 +01:00
def get_loudnorm(file):
if "loudnorm" in file.info:
return file.info["loudnorm"]
source = file.data.path
cmd = [
"ffmpeg",
"-i", source,
"-vn",
"-af", "loudnorm=print_format=json",
"-f", "null",
"-"
]
result = subprocess.run(cmd, capture_output=True, text=True)
json_match = re.search(r"\{[\s\S]*\}", result.stderr)
if not json_match:
raise RuntimeError("Could not find loudnorm JSON output in ffmpeg output")
loudnorm_data = json.loads(json_match.group(0))
input_i = float(loudnorm_data.get("input_i", 0)) # Integrated loudness
input_lra = float(loudnorm_data.get("input_lra", 0)) # Loudness range
input_tp = float(loudnorm_data.get("input_tp", 0)) # True peak
input_thresh = float(loudnorm_data.get("input_thresh", 0)) # Threshold
loudnorm = f"L: {input_i:.6f}\tR: {input_lra:.6f}\tP {input_tp:.6f}"
file.info["loudnorm"] = loudnorm
file.save()
return loudnorm
def compose(clips, fragment, target=150, base=1024, voice_over=None, options=None):
2025-05-21 13:17:50 +01:00
if options is None:
options = {}
2024-12-03 19:35:37 +00:00
fps = 24
2023-10-08 12:19:05 +01:00
length = 0
scene = {
'front': {
'V1': [],
'V2': [],
},
2023-11-03 22:15:20 +01:00
'audio-center': {
'A1': [],
'A2': [],
2023-11-03 22:15:20 +01:00
},
'audio-front': {
'A1': [],
'A2': [],
'A3': [],
'A4': [],
2023-10-08 12:19:05 +01:00
},
2023-11-03 22:15:20 +01:00
'audio-rear': {
2023-10-08 12:19:05 +01:00
'A1': [],
'A2': [],
'A3': [],
'A4': [],
2023-11-03 22:15:20 +01:00
},
2023-10-08 12:19:05 +01:00
}
all_clips = clips.copy()
2023-11-12 18:30:06 +01:00
seq = random(10000 + base * 1000)
2023-11-01 09:23:00 +01:00
used = []
2026-01-17 11:39:35 +00:00
selected_clips_length = 0
2026-01-24 13:26:30 +01:00
ai_length = 0
2026-01-17 11:39:35 +00:00
selected_clips = []
2026-01-24 13:26:30 +01:00
tags = []
2026-01-25 20:18:10 +01:00
while selected_clips_length < target * 1.1:
2026-01-24 13:26:30 +01:00
if not tags:
tags = fragment["tags"].copy()
tag = random_choice(seq, tags, pop=True)
non_ai_clips = []
ai_clips = []
for clip in clips:
if tag in clip["tags"]:
if 'ai' in clip:
ai_clips.append(clip)
else:
non_ai_clips.append(clip)
if ai_length < target * 0.6 and ai_clips:
clip = random_choice(seq, ai_clips, pop=True)
clip["use_ai"] = True
2026-01-17 11:39:35 +00:00
selected_clips.append(clip)
selected_clips_length += clip['duration']
2026-01-24 13:26:30 +01:00
ai_length += clip['duration']
clips = [c for c in clips if c['id'] != clip['id']]
continue
2026-01-17 11:39:35 +00:00
2026-01-24 13:26:30 +01:00
available_clips = non_ai_clips + ai_clips
if available_clips:
clip = random_choice(seq, available_clips, pop=True)
clip["use_ai"] = False
selected_clips.append(clip)
selected_clips_length += clip['duration']
clips = [c for c in clips if c['id'] != clip['id']]
2026-01-17 11:39:35 +00:00
clips = selected_clips
clip = None
2023-10-10 16:16:59 +01:00
while target - length > 0 and clips:
2026-01-06 19:00:43 +01:00
'''
if clip:
if chance(seq, 0.5):
next_seqid = clip['seqid'] + 1
clip = get_clip_by_seqid(clips, next_seqid)
else:
clip = None
2026-01-06 19:00:43 +01:00
'''
clip = None
if not clip:
2026-01-06 20:21:18 +01:00
# FIXME: while not all clips have AI versions make sure we have one 50% of the time
clip = random_choice(seq, clips, True)
2026-01-24 13:26:30 +01:00
next_length = length + clip['duration']
if target - next_length < -target*0.1:
2023-10-08 12:19:05 +01:00
break
clip_duration = format_duration(clip['duration'], fps)
if clip['duration'] != clip_duration:
print("WTF", clip, clip['duration'], clip_duration)
2026-01-25 20:18:10 +01:00
length += clip_duration
2023-10-08 12:19:05 +01:00
2026-01-13 12:09:41 +00:00
# 50/50 source or ai
src = clip['source']
audio = clip['source']
# select ai if we have one
2026-01-25 20:18:10 +01:00
if 'ai' in clip and clip.get("use_ai"):
src = random_choice(seq, list(clip['ai'].values()), False)
2026-01-24 13:26:30 +01:00
print('%07.3f-%07.3f %07.3f %s (%s)' % (
2026-01-25 20:18:10 +01:00
length-clip_duration,
2026-01-24 13:26:30 +01:00
length,
clip_duration,
2026-01-24 13:26:30 +01:00
os.path.basename(clip['source']),
src.split('/')[-2]
))
2026-01-06 19:00:43 +01:00
scene['front']['V2'].append({
'duration': clip_duration,
2026-01-29 14:15:31 +01:00
'id': clip['id'],
2026-01-06 19:00:43 +01:00
'src': src,
2023-10-08 12:19:05 +01:00
"filter": {
}
})
2026-01-24 19:55:40 +01:00
volume_front = '-17'
2026-01-14 21:17:23 +00:00
if clip.get('volume') is not None:
volume_front = '%0.2f' % (float(volume_front) + clip['volume'])
2026-01-22 12:24:32 +01:00
2026-01-24 13:26:30 +01:00
'''
2026-01-22 12:24:32 +01:00
'dynamic_loudness': [
["target_loudness", "-35"],
["min_gain", "-15"],
["max_gin", "15"],
],
2026-01-24 13:26:30 +01:00
'''
audio_filter = {
'mono': [
["channels", "2"],
],
'loudness': [
2026-01-24 20:44:48 +01:00
["program", "-17"],
2026-01-24 13:26:30 +01:00
["results", clip["loudnorm"]],
],
2026-01-22 12:24:32 +01:00
'volume': volume_front,
'fadein': '00:00:00.125'
}
2023-11-03 22:15:20 +01:00
scene['audio-front']['A2'].append({
'duration': clip_duration,
2026-01-29 14:15:31 +01:00
'id': clip['id'],
2026-01-06 20:22:16 +01:00
'src': audio,
2026-01-22 12:24:32 +01:00
'filter': audio_filter.copy()
2023-11-03 22:15:20 +01:00
})
length = format_duration(length, fps)
ad = get_scene_duration(scene, track='audio-front:A2')
vd = get_scene_duration(scene, track='front:V2')
if ad == vd and abs(ad-length) > 1/48:
print('v: ', vd, 'ad', ad, 'length:', length, 'fixup')
length = ad
if abs(length -vd) > 1/48 or abs(length - ad) > 1/48 or ad != vd:
print('vd: ', vd, 'ad', ad, 'length:', length)
print(clip)
sys.exit(-1)
2023-11-01 09:23:00 +01:00
used.append(clip)
2026-01-24 13:26:30 +01:00
if not clips and target - length > 0:
print("not enough clips, need to reset")
used_ids = {c['id'] for c in used}
clips = [c for c in all_clips if c != clip and c['id'] not in used_ids]
if not clips:
print("not enough clips, also consider used")
clips = [c for c in all_clips if c != clip]
if not clips:
print("not enough clips, also consider last clip")
clips = all_clips.copy()
2026-01-25 20:18:10 +01:00
for clip in clips:
if "ai" in clip:
clip["use_ai"] = True
2026-01-24 13:26:30 +01:00
scene_duration = int(round(get_scene_duration(scene) * fps))
2026-01-24 13:26:30 +01:00
voice_overs = []
sub_offset = 0
subs = []
print("--")
print("Voice Over:")
if voice_over:
vo_keys = list(sorted(voice_over))
2026-01-24 18:47:14 +01:00
while int(sub_offset * fps) < scene_duration and vo_keys:
2026-01-24 13:26:30 +01:00
if sub_offset:
gap = (5 * fps + random_int(seq, 10 * fps)) / fps
else:
gap = (2 * fps + random_int(seq, 5 * fps)) / fps
2026-01-24 19:10:12 +01:00
gap = format_duration(gap, fps)
if int((sub_offset + gap) * fps) > scene_duration:
2026-01-24 13:26:30 +01:00
gap = format_duration((scene_duration - int(sub_offset * fps)) / fps, fps)
for tl, track in (
('audio-center', 'A1'),
('audio-center', 'A2'),
('audio-rear', 'A1'),
('audio-rear', 'A2'),
):
scene[tl][track].append({
'blank': True,
'duration': gap
})
2026-01-24 13:26:30 +01:00
print('%07.3f-%07.3f %07.3f' % (sub_offset, sub_offset+gap, gap), 'silence')
sub_offset += gap
vo_key = random_choice(seq, vo_keys, pop=True)
variant = random_int(seq, len(voice_over[vo_key]))
vo = voice_over[vo_key][variant]
if isinstance(vo, list):
vo, vo_b = vo
2026-01-27 12:34:22 +01:00
else:
vo_b = None
min_end = 2
while int((vo['duration'] + sub_offset + min_end) * fps) > scene_duration:
2026-01-24 13:26:30 +01:00
if not vo_keys:
vo = None
break
vo_key = random_choice(seq, vo_keys, pop=True)
variant = random_int(seq, len(voice_over[vo_key]))
vo = voice_over[vo_key][variant]
if isinstance(vo, list):
vo, vo_b = vo
else:
vo_b = None
2026-01-24 13:26:30 +01:00
if vo is None:
break
print('%07.3f-%07.3f %07.3f' % (sub_offset, sub_offset+vo["duration"], vo["duration"]), vo["src"].split('/')[-1])
voice_overs.append(vo)
voc = vo.copy()
a, b = '-11', '-3'
if options.get('stereo_downmix'):
a, b = '-9', '-1'
voc['filter'] = {'volume': a}
scene['audio-center']['A1'].append(voc)
vo_low = vo.copy()
vo_low['filter'] = {'volume': b}
scene['audio-rear']['A1'].append(vo_low)
for sub in voc.get("subs", []):
sub = sub.copy()
sub["in"] += sub_offset
sub["out"] += sub_offset
subs.append(sub)
if vo_b:
vo_b = vo_b.copy()
vo_b['filter'] = {'volume': a}
scene['audio-center']['A2'].append(vo_b)
vo_b = vo_b.copy()
vo_b['filter'] = {'volume': b}
2026-01-27 12:34:22 +01:00
scene['audio-rear']['A2'].append(vo_b)
else:
for tl, track in (
('audio-center', 'A2'),
('audio-rear', 'A2'),
):
scene[tl][track].append({
'blank': True,
'duration': voc["duration"]
})
2026-01-24 13:26:30 +01:00
sub_offset += voc["duration"]
if subs:
scene["subtitles"] = subs
sub_offset = format_duration(sub_offset, fps)
2026-01-24 13:26:30 +01:00
if sub_offset < scene_duration/fps:
gap = scene_duration/fps - sub_offset
2026-01-24 13:26:30 +01:00
print('%07.3f-%07.3f %07.3f' % (sub_offset, sub_offset+gap, gap), 'silence')
2024-12-03 19:35:37 +00:00
scene['audio-center']['A1'].append({
'blank': True,
2026-01-24 13:26:30 +01:00
'duration': gap
2024-12-03 19:35:37 +00:00
})
scene['audio-rear']['A1'].append({
'blank': True,
2026-01-24 13:26:30 +01:00
'duration': gap
2024-12-03 19:35:37 +00:00
})
2026-01-24 13:26:30 +01:00
sub_offset += gap
'''
print("scene duration: %0.3f vo: %0.3f (length: %0.3f, target: %0.3f)" % (
get_scene_duration(scene),
sub_offset,
length,
target
))
'''
print("scene duration: %0.3f (target: %0.3f)" % (
get_scene_duration(scene),
target
))
2023-11-01 09:23:00 +01:00
return scene, used
2023-10-08 12:19:05 +01:00
def write_subtitles(data, folder, options):
data = fix_overlaps(data)
path = folder / "front.srt"
2025-11-21 11:00:06 +01:00
if options.get("subtitle_format") == "srt":
srt = ox.srt.encode(data)
write_if_new(str(path), srt, 'b')
2026-01-27 14:25:31 +01:00
path = folder / "segment.ass"
2025-11-27 18:02:00 +01:00
if os.path.exists(path):
os.unlink(path)
2025-11-21 11:00:06 +01:00
else:
if os.path.exists(path):
os.unlink(path)
2026-01-27 14:25:31 +01:00
path = folder / "segment.ass"
2025-11-21 11:00:06 +01:00
ass = ass_encode(data, options)
write_if_new(str(path), ass, '')
def render(root, scene, prefix='', options=None):
2025-11-27 18:02:00 +01:00
if options is None:
options = {}
2023-10-08 12:19:05 +01:00
fps = 24
files = []
2024-12-03 19:35:37 +00:00
scene_duration = int(get_scene_duration(scene) * fps)
2023-10-08 12:19:05 +01:00
for timeline, data in scene.items():
2023-11-14 17:48:55 +01:00
if timeline == "subtitles":
folder = Path(root) / prefix
write_subtitles(data, folder, options)
2023-11-14 17:48:55 +01:00
continue
2023-10-10 16:16:59 +01:00
#print(timeline)
2023-10-08 12:19:05 +01:00
project = KDEnliveProject(root)
tracks = []
2023-11-09 00:33:06 +01:00
track_durations = {}
2023-10-08 12:19:05 +01:00
for track, clips in data.items():
2023-10-10 16:16:59 +01:00
#print(track)
2023-10-08 12:19:05 +01:00
for clip in clips:
project.append_clip(track, clip)
track_durations[track] = sum([int(c['duration'] * fps) for c in clips])
2023-11-09 00:33:06 +01:00
if timeline.startswith('audio-'):
track_duration = project.get_duration()
delta = scene_duration - track_duration
if delta > 0:
for track in track_durations:
if track_durations[track] == track_duration:
2024-12-03 19:35:37 +00:00
project.append_clip(track, {'blank': True, "duration": delta/fps})
path = os.path.join(root, prefix + "%s.kdenlive" % timeline)
2023-11-16 09:08:03 +01:00
project_xml = project.to_xml()
write_if_new(path, project_xml)
if options["debug"]:
# check duration
out_duration = get_project_duration(path)
p_duration = project.get_duration()
print(path, 'out: %s, project: %s, scene: %s' %(out_duration, p_duration, scene_duration))
if p_duration != scene_duration:
print(path, 'FAIL project: %s, scene: %s' %(p_duration, scene_duration))
_cache = os.path.join(root, "cache.json")
with open(_cache, "w") as fd:
json.dump(_CACHE, fd)
sys.exit(1)
if out_duration != p_duration:
print(path, 'fail got: %s expected: %s' %(out_duration, p_duration))
sys.exit(1)
files.append(path)
return files
2023-10-10 16:16:59 +01:00
2023-11-08 11:01:20 +01:00
def get_fragments(clips, voice_over, prefix):
2023-10-10 16:16:59 +01:00
import itemlist.models
import item.models
2023-10-10 16:16:59 +01:00
fragments = []
2023-10-10 16:16:59 +01:00
for l in itemlist.models.List.objects.filter(status='featured').order_by('name'):
if l.name.split(' ')[0].isdigit():
2026-01-24 13:26:30 +01:00
fragment_id = l.name.split(' ')[0]
2023-10-10 16:16:59 +01:00
fragment = {
2026-01-24 13:26:30 +01:00
'id': fragment_id,
2023-10-10 16:16:59 +01:00
'name': l.name,
2023-11-08 11:01:20 +01:00
'tags': [],
'anti-tags': [],
2023-10-10 16:16:59 +01:00
'description': l.description
}
2023-11-08 11:01:20 +01:00
for con in l.query['conditions']:
if "conditions" in con:
for sub in con["conditions"]:
if sub['key'] == "tags" and sub['operator'] == '==':
fragment['tags'].append(sub['value'])
elif sub['key'] == "tags" and sub['operator'] == '!=':
fragment['tags'].append(sub['value'])
2026-01-22 12:24:32 +01:00
elif sub['key'] == 'type' and sub['value'] in ('source', ''):
pass
2023-11-08 11:01:20 +01:00
else:
2023-11-18 21:31:45 +01:00
print(l.name, 'unknown sub condition', sub)
2023-11-08 11:01:20 +01:00
elif con.get('key') == "tags" and con['operator'] == '==':
fragment['tags'].append(con['value'])
elif con.get('key') == "tags" and con['operator'] == '!=':
fragment['anti-tags'].append(con['value'])
fragment["id"] = int(fragment['name'].split(' ')[0])
2026-01-13 12:09:41 +00:00
sources = []
2023-11-08 11:01:20 +01:00
for i in l.get_items(l.user):
orig = i.files.filter(selected=True).first()
if orig:
ext = os.path.splitext(orig.data.path)[1]
2026-01-06 20:08:31 +01:00
if 'type' not in i.data:
print("FIXME", i)
continue
2023-11-08 11:01:20 +01:00
type_ = i.data['type'][0].lower()
2026-01-24 13:26:30 +01:00
target = os.path.join(prefix, 'video', type_, i.data['title'] + ext)
2026-01-13 12:09:41 +00:00
sources.append(target)
2023-10-10 16:16:59 +01:00
fragment['clips'] = []
for clip in clips:
2023-11-08 11:01:20 +01:00
#if set(clip['tags']) & set(fragment['tags']) and not set(clip['tags']) & set(fragment['anti-tags']):
2026-01-13 12:09:41 +00:00
key = 'source'
source = clip['source']
if source in sources:
2023-10-10 16:16:59 +01:00
fragment['clips'].append(clip)
2026-01-24 13:26:30 +01:00
fragment["voice_over"] = voice_over.get(fragment_id, {})
2023-10-10 16:16:59 +01:00
fragments.append(fragment)
fragments.sort(key=lambda f: ox.sort_string(f['name']))
return fragments
def render_all(options):
options = load_defaults(options)
2023-10-10 16:16:59 +01:00
prefix = options['prefix']
duration = int(options['duration'])
base = int(options['offset'])
_cache = os.path.join(prefix, "cache.json")
if os.path.exists(_cache):
with open(_cache) as fd:
_CACHE.update(json.load(fd))
2023-10-10 16:16:59 +01:00
with open(os.path.join(prefix, "clips.json")) as fd:
clips = json.load(fd)
with open(os.path.join(prefix, "voice_over.json")) as fd:
voice_over = json.load(fd)
2023-11-08 11:01:20 +01:00
fragments = get_fragments(clips, voice_over, prefix)
with open(os.path.join(prefix, "fragments.json"), "w") as fd:
json.dump(fragments, fd, indent=2, ensure_ascii=False)
2023-10-10 16:16:59 +01:00
position = target_position = 0
target = fragment_target = duration / len(fragments)
base_prefix = os.path.join(prefix, 'render', str(base))
2023-11-01 09:23:00 +01:00
clips_used = []
2023-11-12 18:30:06 +01:00
stats = defaultdict(lambda: 0)
2023-11-20 23:08:48 +00:00
fragment_base = base
2023-10-10 16:16:59 +01:00
for fragment in fragments:
2023-11-20 23:08:48 +00:00
fragment_base += 1
fragment_id = int(fragment['name'].split(' ')[0])
2026-01-24 20:21:31 +01:00
if options.get("fragment") and int(options["fragment"]) != fragment_id:
continue
2023-10-10 16:16:59 +01:00
name = fragment['name'].replace(' ', '_')
if not fragment['clips']:
print("skipping empty fragment", name)
continue
fragment_prefix = os.path.join(base_prefix, name)
os.makedirs(fragment_prefix, exist_ok=True)
2023-11-01 09:23:00 +01:00
fragment_clips = fragment['clips']
2026-01-24 13:26:30 +01:00
used_ids = {c['id'] for c in clips_used}
2026-01-27 13:31:33 +01:00
unused_fragment_clips = [c for c in fragment_clips if c['id'] not in used_ids]
2023-11-01 09:23:00 +01:00
print('fragment clips', len(fragment_clips), 'unused', len(unused_fragment_clips))
2026-01-24 13:26:30 +01:00
print('--')
print('Video:')
2025-05-21 13:17:50 +01:00
scene, used = compose(
unused_fragment_clips,
2026-01-24 13:26:30 +01:00
fragment=fragment,
2025-05-21 13:17:50 +01:00
target=target,
base=fragment_base,
voice_over=fragment['voice_over'],
options=options
)
2023-11-01 09:23:00 +01:00
clips_used += used
2023-10-10 16:16:59 +01:00
scene_duration = get_scene_duration(scene)
2023-10-29 19:51:26 +01:00
print("%s %6.3f -> %6.3f (%6.3f)" % (name, target, scene_duration, fragment_target))
2026-01-06 20:08:31 +01:00
src = [a for a in scene['audio-rear']['A1'] if 'src' in a]
if src:
src = src[0]['src']
stats[src.split('/')[-2]] += 1
else:
2026-01-24 13:26:30 +01:00
print("!! fixme, fragment without VO")
2023-11-12 18:30:06 +01:00
2023-10-10 16:16:59 +01:00
position += scene_duration
target_position += fragment_target
if position > target_position:
target = fragment_target - (position-target_position)
2023-10-29 19:51:26 +01:00
print("adjusting target duration for next fragment: %6.3f -> %6.3f" % (fragment_target, target))
2023-10-10 16:16:59 +01:00
elif position < target_position:
target = target + 0.1 * fragment_target
timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/', options)
2023-10-10 16:16:59 +01:00
2023-11-16 09:08:03 +01:00
scene_json = json.dumps(scene, indent=2, ensure_ascii=False)
write_if_new(os.path.join(fragment_prefix, 'scene.json'), scene_json)
2023-10-10 16:16:59 +01:00
2024-12-03 20:12:15 +00:00
if not options['no_video'] and not options["single_file"]:
2023-10-10 16:16:59 +01:00
for timeline in timelines:
print(timeline)
2023-10-10 16:16:59 +01:00
ext = '.mp4'
if '/audio' in timeline:
2023-10-10 16:16:59 +01:00
ext = '.wav'
cmd = get_melt() + [
timeline,
2023-10-22 11:18:03 +01:00
'-quiet',
'-consumer', 'avformat:%s' % timeline.replace('.kdenlive', ext),
2023-10-10 16:16:59 +01:00
]
2023-10-22 11:18:03 +01:00
if ext == '.wav':
cmd += ['vn=1']
else:
cmd += ['an=1']
#cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
2023-10-10 16:16:59 +01:00
subprocess.call(cmd)
if ext == '.wav' and timeline.endswith('audio.kdenlive'):
2023-10-10 16:16:59 +01:00
cmd = [
'ffmpeg', '-y',
'-nostats', '-loglevel', 'error',
'-i',
2023-10-10 16:16:59 +01:00
timeline.replace('.kdenlive', ext),
timeline.replace('.kdenlive', '.mp4')
]
subprocess.call(cmd)
os.unlink(timeline.replace('.kdenlive', ext))
cmds = []
2024-12-03 20:12:15 +00:00
fragment_prefix = Path(fragment_prefix)
for src, out1, out2 in (
2023-11-03 22:15:20 +01:00
("audio-front.wav", "fl.wav", "fr.wav"),
("audio-center.wav", "fc.wav", "lfe.wav"),
("audio-rear.wav", "bl.wav", "br.wav"),
):
cmds.append([
2023-10-19 13:10:23 +01:00
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / src,
"-filter_complex",
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
"-map", "[left]", fragment_prefix / out1,
"-map", "[right]", fragment_prefix / out2,
])
2023-10-19 13:10:23 +01:00
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "fl.wav",
"-i", fragment_prefix / "fr.wav",
"-i", fragment_prefix / "fc.wav",
"-i", fragment_prefix / "lfe.wav",
"-i", fragment_prefix / "bl.wav",
"-i", fragment_prefix / "br.wav",
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]", "-c:a", "aac", fragment_prefix / "audio-5.1.mp4"
])
2025-05-10 10:04:25 +01:00
audio_front = "audio-5.1.mp4"
copy = '-c'
if options["stereo_downmix"]:
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "audio-front.wav",
"-i", fragment_prefix / "audio-center.wav",
"-i", fragment_prefix / "audio-rear.wav",
2025-05-20 12:02:36 +01:00
"-filter_complex",
"amix=inputs=4:duration=longest:dropout_transition=0",
'-ac', '2', fragment_prefix / "audio-stereo.wav"
2025-05-10 10:04:25 +01:00
])
audio_front = "audio-stereo.wav"
copy = '-c:v'
2023-10-28 11:24:51 +02:00
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
2023-11-03 22:15:20 +01:00
"-i", fragment_prefix / "front.mp4",
2025-05-10 10:04:25 +01:00
"-i", fragment_prefix / audio_front,
copy, "copy",
2025-05-21 14:23:23 +01:00
"-movflags", "+faststart",
2025-05-10 10:04:25 +01:00
fragment_prefix / "front-mixed.mp4",
2023-10-28 11:24:51 +02:00
])
for cmd in cmds:
2024-12-03 20:12:15 +00:00
if options["debug"]:
print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd)
for a, b in (
2025-05-10 10:04:25 +01:00
("front-mixed.mp4", "front.mp4"),
("audio-center.wav", "front.mp4"),
("audio-rear.wav", "front.mp4"),
("audio-front.wav", "front.mp4"),
("audio-5.1.mp4", "front.mp4"),
):
duration_a = ox.avinfo(str(fragment_prefix / a))['duration']
duration_b = ox.avinfo(str(fragment_prefix / b))['duration']
if abs(duration_a - duration_b) > 1/48:
print('!!', duration_a, fragment_prefix / a)
print('!!', duration_b, fragment_prefix / b)
2026-01-27 12:34:22 +01:00
#sys.exit(-1)
2026-01-27 14:21:29 +01:00
shutil.move(fragment_prefix / "front-mixed.mp4", fragment_prefix / "segment.mp4")
2026-01-27 12:34:22 +01:00
cleanup = [
2026-01-27 14:21:29 +01:00
"front.mp4",
"audio-5.1.mp4",
2026-01-27 14:21:29 +01:00
"audio-center.wav",
"audio-rear.wav",
2026-01-06 15:04:26 +01:00
"audio-front.wav",
"fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
2025-05-10 10:04:25 +01:00
"audio-stereo.wav",
2026-01-27 12:34:22 +01:00
]
if options["keep_audio"]:
shutil.move(fragment_prefix / "audio-center.wav", fragment_prefix / "vocals.wav")
shutil.move(fragment_prefix / "audio-front.wav", fragment_prefix / "foley.wav")
else:
cleanup += [
"vocals.wav",
"foley.wav"
]
for fn in cleanup:
2023-10-19 13:10:23 +01:00
fn = fragment_prefix / fn
if os.path.exists(fn):
os.unlink(fn)
2024-12-03 20:12:15 +00:00
if options["single_file"]:
cmds = []
base_prefix = Path(base_prefix)
for timeline in (
"front",
"audio-center",
"audio-front",
"audio-rear",
):
timelines = list(sorted(glob('%s/*/%s.kdenlive' % (base_prefix, timeline))))
ext = '.mp4'
if '/audio' in timelines[0]:
ext = '.wav'
out = base_prefix / (timeline + ext)
cmd = get_melt() + timelines + [
2024-12-03 20:12:15 +00:00
'-quiet',
'-consumer', 'avformat:%s' % out,
]
if ext == '.wav':
cmd += ['vn=1']
else:
cmd += ['an=1']
cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
cmds.append(cmd)
for src, out1, out2 in (
("audio-front.wav", "fl.wav", "fr.wav"),
("audio-center.wav", "fc.wav", "lfe.wav"),
("audio-rear.wav", "bl.wav", "br.wav"),
):
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / src,
"-filter_complex",
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
"-map", "[left]", base_prefix / out1,
"-map", "[right]", base_prefix / out2,
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / "fl.wav",
"-i", base_prefix / "fr.wav",
"-i", base_prefix / "fc.wav",
"-i", base_prefix / "lfe.wav",
"-i", base_prefix / "bl.wav",
"-i", base_prefix / "br.wav",
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]", "-c:a", "aac", base_prefix / "audio-5.1.mp4"
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / "front.mp4",
"-i", base_prefix / "audio-5.1.mp4",
"-c", "copy",
2025-05-21 14:23:23 +01:00
"-movflags", "+faststart",
2025-05-10 10:04:25 +01:00
base_prefix / "front-mixed.mp4",
2024-12-03 20:12:15 +00:00
])
for cmd in cmds:
if options["debug"]:
print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd)
2025-05-10 10:04:25 +01:00
shutil.move(base_prefix / "front-mixed.mp4", base_prefix / "front.mp4")
2024-12-03 20:12:15 +00:00
if options["keep_audio"]:
shutil.move(base_prefix / "audio-center.wav", base_prefix / "vocals.wav")
shutil.move(base_prefix / "audio-front.wav", base_prefix / "foley.wav")
for fn in (
"audio-5.1.mp4",
"audio-center.wav", "audio-rear.wav",
2026-01-06 15:04:26 +01:00
"audio-front.wav",
2024-12-03 20:12:15 +00:00
"fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
):
fn = base_prefix / fn
if os.path.exists(fn):
os.unlink(fn)
join_subtitles(base_prefix, options)
2024-12-03 20:12:15 +00:00
print("Duration - Target: %s Actual: %s" % (target_position, position))
2023-11-12 18:30:06 +01:00
print(json.dumps(dict(stats), sort_keys=True, indent=2))
with open(_cache, "w") as fd:
json.dump(_CACHE, fd)
2023-11-16 09:08:03 +01:00
2024-03-22 11:33:39 +01:00
def add_translations(sub, lang):
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if sub.languages:
value = ox.strip_tags(value)
if lang:
for slang in lang:
if slang == "en":
slang = None
for tsub in sub.item.annotations.filter(layer="subtitles", start=sub.start, end=sub.end, languages=slang):
tvalue = tsub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if tsub.languages:
tvalue = ox.strip_tags(tvalue)
value += '\n' + tvalue
return value
def add_translations_dict(sub, langs):
values = {}
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if sub.languages:
value = ox.strip_tags(value)
values[sub.languages] = value
else:
values["en"] = value
for slang in langs:
slang_value = None if slang == "en" else slang
if sub.languages == slang_value:
continue
for tsub in sub.item.annotations.filter(
layer="subtitles", start=sub.start, end=sub.end,
languages=slang_value
):
tvalue = tsub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if tsub.languages:
tvalue = ox.strip_tags(tvalue)
values[slang] = tvalue
return values
def get_srt(sub, offset, lang, tlang):
2023-11-16 09:12:53 +01:00
sdata = sub.json(keys=['in', 'out', 'value'])
2024-03-22 11:33:39 +01:00
sdata['value'] = sdata['value'].replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if tlang:
sdata['value'] = add_translations(sub, tlang)
langs = [lang]
if tlang:
langs += tlang
sdata['values'] = add_translations_dict(sub, langs)
2023-11-16 09:12:53 +01:00
if offset:
sdata["in"] += offset
sdata["out"] += offset
return sdata
def scene_subtitles(scene, options):
import item.models
offset = 0
subs = []
lang, tlang = parse_lang(options["lang"])
for clip in scene['audio-center']['A1']:
if not clip.get("blank"):
batch, fragment_id = clip['src'].replace('.wav', '').split('/')[-2:]
vo = item.models.Item.objects.filter(
data__batch__icontains=batch, data__title__startswith=fragment_id + '_'
).first()
if vo:
#print("%s => %s %s" % (clip['src'], vo, vo.get('batch')))
for sub in vo.annotations.filter(
layer="subtitles"
).filter(
languages=None if lang == "en" else lang
).exclude(value="").order_by("start"):
sdata = get_srt(sub, offset, lang, tlang)
subs.append(sdata)
else:
print("could not find vo for %s" % clip['src'])
offset += clip['duration']
return subs
def load_defaults(options):
path = os.path.join(options["prefix"], "options.json")
if os.path.exists(path):
with open(path) as fd:
2025-11-27 18:02:00 +01:00
defaults = json.load(fd)
for key in defaults:
if key not in options:
options[key] = defaults[key]
return options
2023-11-16 09:08:03 +01:00
def update_subtitles(options):
import item.models
options = load_defaults(options)
2023-11-16 09:08:03 +01:00
prefix = Path(options['prefix'])
base = int(options['offset'])
lang, tlang = parse_lang(options["lang"])
2023-11-16 09:08:03 +01:00
_cache = os.path.join(prefix, "cache.json")
if os.path.exists(_cache):
with open(_cache) as fd:
_CACHE.update(json.load(fd))
base_prefix = prefix / 'render' / str(base)
for folder in os.listdir(base_prefix):
folder = base_prefix / folder
2024-04-02 12:37:17 +02:00
scene_json = folder / "scene.json"
if not os.path.exists(scene_json):
continue
with open(scene_json) as fd:
2023-11-16 09:08:03 +01:00
scene = json.load(fd)
subs = scene_subtitles(scene, options)
write_subtitles(subs, folder, options)
2023-11-16 09:08:03 +01:00
2024-01-22 15:06:40 +01:00
def update_m3u(render_prefix, exclude=[]):
2026-01-27 14:21:29 +01:00
files = ox.sorted_strings(glob(render_prefix + "*/*/segment.mp4"))
2024-01-22 15:06:40 +01:00
for ex in exclude:
files = [f for f in files if not f.startswith(ex + "/")]
2026-01-06 15:04:26 +01:00
front_m3u = "\n".join(files)
front_m3u = front_m3u.replace(render_prefix, "")
2024-01-22 15:06:40 +01:00
front_m3u_f = render_prefix + "front.m3u"
with open(front_m3u_f + "_", "w") as fd:
fd.write(front_m3u)
shutil.move(front_m3u_f + "_", front_m3u_f)
def render_infinity(options):
options = load_defaults(options)
2024-01-22 15:06:40 +01:00
prefix = options['prefix']
duration = int(options['duration'])
2025-10-08 11:35:10 +01:00
defaults = {
"offset": 100,
"max-items": 30,
"no_video": False,
}
2024-01-22 15:06:40 +01:00
state_f = os.path.join(prefix, "infinity.json")
if os.path.exists(state_f):
with open(state_f) as fd:
state = json.load(fd)
else:
2025-10-08 11:35:10 +01:00
state = {}
2025-05-10 10:04:25 +01:00
for key in ("prefix", "duration", "debug", "single_file", "keep_audio", "stereo_downmix"):
2024-01-22 15:06:40 +01:00
state[key] = options[key]
2025-10-08 11:35:10 +01:00
for key in defaults:
if key not in state:
state[key] = defaults[key]
2024-01-22 15:06:40 +01:00
while True:
render_prefix = state["prefix"] + "/render/"
current = [
f for f in os.listdir(render_prefix)
if f.isdigit() and os.path.isdir(render_prefix + f) and state["offset"] > int(f) >= 100
2024-01-22 15:06:40 +01:00
]
if len(current) > state["max-items"]:
2024-03-19 11:48:36 +01:00
current = ox.sorted_strings(current)
remove = current[:-state["max-items"]]
2024-01-22 15:06:40 +01:00
update_m3u(render_prefix, exclude=remove)
for folder in remove:
folder = render_prefix + folder
print("remove", folder)
shutil.rmtree(folder)
2024-01-22 15:06:40 +01:00
render_all(state)
update_m3u(render_prefix)
state["offset"] += 1
with open(state_f + "~", "w") as fd:
json.dump(state, fd, indent=2)
shutil.move(state_f + "~", state_f)
2024-12-03 20:12:15 +00:00
def join_subtitles(base_prefix, options):
scenes = list(sorted(glob('%s/*/scene.json' % base_prefix)))
data = []
position = 0
2025-11-27 19:06:11 +01:00
for scene_json in scenes:
with open(scene_json) as fd:
scene = json.load(fd)
subs = scene_subtitles(scene, options)
data += shift_clips(subs, position)
position += get_scene_duration(scene)
write_subtitles(data, base_prefix, options)
def generate_clips(options):
import item.models
import itemlist.models
fps = 24
2025-11-27 18:02:00 +01:00
options = load_defaults(options)
prefix = options['prefix']
lang, tlang = parse_lang(options["lang"])
clips = []
2026-01-13 12:09:41 +00:00
for i in item.models.Item.objects.filter(sort__type='source'):
source_target = ""
qs = item.models.Item.objects.filter(data__title=i.data['title']).exclude(id=i.id)
2026-01-06 19:00:43 +01:00
#if qs.count() >= 1:
if True:
clip = {}
durations = []
for e in item.models.Item.objects.filter(data__title=i.data['title']):
2026-01-17 11:39:35 +00:00
if 'skip' in e.data.get('tags', []):
continue
if 'type' not in e.data:
print("ignoring invalid video %s (no type)" % e)
continue
if not e.files.filter(selected=True).exists():
continue
selected = e.files.filter(selected=True)[0]
source = selected.data.path
ext = os.path.splitext(source)[1]
type_ = e.data['type'][0].lower()
2026-01-06 19:00:43 +01:00
if type_.startswith('ai:'):
if 'ai' not in clip:
clip['ai'] = {}
2026-01-24 23:25:53 +01:00
ai_type = type_[3:]
n = 1
while ai_type in clip['ai']:
ai_type = '%s-%s' % (type_[3:], n)
n += 1
type_ = 'ai:' + ai_type
2026-01-24 23:25:53 +01:00
target = os.path.join(prefix, 'video', type_, i.data['title'] + ext)
2026-01-29 17:02:56 +01:00
clip['ai'][ai_type] = target
2026-01-25 18:50:44 +01:00
if type_ == "source":
source_target = target
clip['loudnorm'] = get_loudnorm(e.files.filter(selected=True)[0])
2026-01-25 18:53:31 +01:00
if type_.startswith('ai:'):
clip['ai'][ai_type] = target
else:
clip[type_] = target
os.makedirs(os.path.dirname(target), exist_ok=True)
2026-01-24 23:25:53 +01:00
if os.path.islink(target):
os.unlink(target)
os.symlink(source, target)
durations.append(selected.duration)
2026-01-22 12:24:32 +01:00
if not durations:
print(i.public_id, 'no duration!', clip)
continue
clip["duration"] = min(durations) - 1/24
2026-01-15 16:41:29 +00:00
# trim to a multiple of the output fps
d1 = format_duration(clip["duration"], fps)
2026-01-15 16:41:29 +00:00
if d1 != clip["duration"]:
clip["duration"] = d1
if not clip["duration"]:
print('!!', durations, clip)
continue
cd = format_duration(clip["duration"], fps)
clip["duration"] = cd
clip['tags'] = i.data.get('tags', [])
2026-01-14 21:17:23 +00:00
adjust_volume = i.data.get('adjustvolume', '')
if adjust_volume:
clip['volume'] = float(adjust_volume)
2026-01-06 19:00:43 +01:00
clip['id'] = i.public_id
2026-01-13 12:09:41 +00:00
name = os.path.basename(source_target)
seqid = re.sub(r"Hotel Aporia_(\d+)", "S\\1_", name)
seqid = re.sub(r"Night March_(\d+)", "S\\1_", seqid)
seqid = re.sub(r"_(\d+)H_(\d+)", "_S\\1\\2_", seqid)
seqid = seqid.split('_')[:2]
2026-01-22 12:24:32 +01:00
seqid = [b[1:] if b[:1] in ('B', 'S') else '0' for b in seqid]
seqid[1] = resolve_roman(seqid[1])
seqid[1] = ''.join([b for b in seqid[1] if b.isdigit()])
if not seqid[1]:
seqid[1] = '0'
try:
clip['seqid'] = int(''.join(['%06d' % int(b) for b in seqid]))
except:
print(name, seqid, 'failed')
raise
2026-01-06 15:04:26 +01:00
clips.append(clip)
with open(os.path.join(prefix, 'clips.json'), 'w') as fd:
json.dump(clips, fd, indent=2, ensure_ascii=False)
print("using", len(clips), "clips")
2026-01-24 13:26:30 +01:00
voice_over = {}
for vo in item.models.Item.objects.filter(
2026-01-06 20:08:31 +01:00
data__type__icontains="voice over",
):
2026-01-06 20:08:31 +01:00
title = vo.get('title')
2026-01-24 13:26:30 +01:00
parts = title.split('-')
fragment = '%02d' % int(parts[0].replace('ch', ''))
type = parts[1]
2026-01-27 12:34:22 +01:00
variant = '-'.join(parts[2:]).split('-ElevenLabs')[0]
source = vo.files.filter(selected=True)[0]
src = source.data.path
2026-01-24 13:26:30 +01:00
ext = src.split('.')[-1]
target = os.path.join(prefix, 'voice_over', fragment, '%s-%s.%s' % (type, variant, ext))
os.makedirs(os.path.dirname(target), exist_ok=True)
if os.path.islink(target):
os.unlink(target)
os.symlink(src, target)
subs = []
for sub in vo.annotations.filter(
layer="subtitles", languages=lang
).exclude(value="").order_by("start"):
sdata = get_srt(sub, 0, lang, tlang)
subs.append(sdata)
2026-01-24 13:26:30 +01:00
if fragment not in voice_over:
voice_over[fragment] = {}
if type not in voice_over[fragment]:
voice_over[fragment][type] = []
2026-01-27 12:34:22 +01:00
vo_variant = {
2026-01-24 13:26:30 +01:00
"variant": variant,
2026-01-29 14:15:31 +01:00
"id": vo.public_id,
"src": target,
#"duration": format_duration(source.duration, fps, True),
"duration": source.duration,
"subs": subs
2026-01-27 12:34:22 +01:00
}
done = False
if type == 'quote':
if '-a-t' in variant:
b_variant = variant.replace('-a-t', '-b-t').split('-t')[0]
for old in voice_over[fragment][type]:
print(type(old))
if isinstance(old, list) and old[0]['variant'].startswith(b_variant):
old.insert(0, vo_variant)
done = True
elif '-b-t' in variant:
a_variant = variant.replace('-b-t', '-a-t').split('-t')[0]
for old in voice_over[fragment][type]:
if isinstance(old, list) and old[0]['variant'].startswith(a_variant):
old.append(vo_variant)
done = True
if not done and '-a-t' in variant or '-b-t' in variant:
vo_variant = [vo_variant]
if not done:
voice_over[fragment][type].append(vo_variant)
with open(os.path.join(prefix, 'voice_over.json'), 'w') as fd:
json.dump(voice_over, fd, indent=2, ensure_ascii=False)
2026-01-29 14:15:31 +01:00
def render_stats(offset):
stats = {
"source": [],
"ai": [],
}
base_prefix = Path(default_prefix) / 'render' / str(offset)
for folder in os.listdir(base_prefix):
folder = base_prefix / folder
scene_json = folder / "scene.json"
if not os.path.exists(scene_json):
continue
with open(scene_json) as fd:
scene = json.load(fd)
for timeline, tdata in scene.items():
if isinstance(tdata, list):
continue
for track, clips in tdata.items():
for clip in clips:
if 'src' in clip:
if 'id' not in clip:
print(clip)
continue
if 'ai:' in clip['src']:
stats['ai'].append(clip['id'])
else:
stats['source'].append(clip['id'])
return stats
def update_unused():
import itemlist.models
import item.models
l = itemlist.models.List.objects.get(name='Unused Material')
used = []
for folder in os.listdir(Path(default_prefix) / 'render'):
if folder.isdigit():
x = render_stats(folder)
used += x['source']
used += x['ai']
for i in Item.objects.all().exclude(public_id__in=set(used)).filter(data__type__icontains='source'):
l.add(i)
for i in l.items.filter(public_id__in=set(used)):
l.remove(i)
2026-01-29 16:59:45 +01:00
def unused_tags():
import itemlist.models
import item.models
prefix = default_prefix
with open(os.path.join(prefix, "clips.json")) as fd:
clips = json.load(fd)
with open(os.path.join(prefix, "voice_over.json")) as fd:
voice_over = json.load(fd)
fragments = get_fragments(clips, voice_over, prefix)
tags = []
for fragment in fragments:
tags += fragment['tags']
used_tags = set(tags)
all_tags = {t.value for t in item.models.Facet.objects.filter(key='tags').distinct()}
unused_tags = all_tags - used_tags
unused_items = itemlist.models.List.objects.get(name='Unused Material').items.all()
with open("/srv/pandora/static/power/unused-tags.txt", "w") as fd:
for tag in sorted(unused_tags):
count = unused_items.filter(data__tags__contains=tag).count()
fd.write("%s (%d unused video clips)\n" % (tag, count))