2023-10-08 12:19:05 +01:00
|
|
|
#!/usr/bin/python3
|
2023-11-12 18:30:06 +01:00
|
|
|
from collections import defaultdict
|
2024-01-22 15:06:40 +01:00
|
|
|
from glob import glob
|
2023-10-08 12:19:05 +01:00
|
|
|
import json
|
|
|
|
|
import os
|
2023-11-16 09:08:03 +01:00
|
|
|
import re
|
|
|
|
|
import shutil
|
2023-10-08 12:19:05 +01:00
|
|
|
import subprocess
|
|
|
|
|
import sys
|
|
|
|
|
import time
|
2023-10-16 23:26:09 +01:00
|
|
|
from pathlib import Path
|
2023-10-08 12:19:05 +01:00
|
|
|
|
|
|
|
|
import ox
|
2024-12-04 09:16:24 +00:00
|
|
|
|
2023-10-09 14:10:34 +01:00
|
|
|
from .pi import random
|
2026-01-06 15:04:26 +01:00
|
|
|
from .render_kdenlive import KDEnliveProject, _CACHE, get_melt
|
2026-02-02 17:20:53 +01:00
|
|
|
from .utils import resolve_roman, write_if_new, format_duration, needs_update
|
2026-01-06 15:04:26 +01:00
|
|
|
from .render_utils import *
|
2023-10-08 12:19:05 +01:00
|
|
|
|
2025-11-27 18:09:06 +01:00
|
|
|
default_prefix = "/srv/p_for_power"
|
2023-10-08 12:19:05 +01:00
|
|
|
|
2023-11-16 09:08:03 +01:00
|
|
|
|
2026-01-24 13:26:30 +01:00
|
|
|
def get_loudnorm(file):
|
|
|
|
|
if "loudnorm" in file.info:
|
|
|
|
|
return file.info["loudnorm"]
|
|
|
|
|
source = file.data.path
|
|
|
|
|
cmd = [
|
|
|
|
|
"ffmpeg",
|
|
|
|
|
"-i", source,
|
|
|
|
|
"-vn",
|
|
|
|
|
"-af", "loudnorm=print_format=json",
|
|
|
|
|
"-f", "null",
|
|
|
|
|
"-"
|
|
|
|
|
]
|
|
|
|
|
result = subprocess.run(cmd, capture_output=True, text=True)
|
|
|
|
|
json_match = re.search(r"\{[\s\S]*\}", result.stderr)
|
|
|
|
|
if not json_match:
|
|
|
|
|
raise RuntimeError("Could not find loudnorm JSON output in ffmpeg output")
|
|
|
|
|
loudnorm_data = json.loads(json_match.group(0))
|
|
|
|
|
|
|
|
|
|
input_i = float(loudnorm_data.get("input_i", 0)) # Integrated loudness
|
|
|
|
|
input_lra = float(loudnorm_data.get("input_lra", 0)) # Loudness range
|
|
|
|
|
input_tp = float(loudnorm_data.get("input_tp", 0)) # True peak
|
|
|
|
|
input_thresh = float(loudnorm_data.get("input_thresh", 0)) # Threshold
|
|
|
|
|
loudnorm = f"L: {input_i:.6f}\tR: {input_lra:.6f}\tP {input_tp:.6f}"
|
|
|
|
|
file.info["loudnorm"] = loudnorm
|
|
|
|
|
file.save()
|
|
|
|
|
return loudnorm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def compose(clips, fragment, target=150, base=1024, voice_over=None, options=None):
|
2025-05-21 13:17:50 +01:00
|
|
|
if options is None:
|
|
|
|
|
options = {}
|
2024-12-03 19:35:37 +00:00
|
|
|
fps = 24
|
2023-10-08 12:19:05 +01:00
|
|
|
length = 0
|
|
|
|
|
scene = {
|
|
|
|
|
'front': {
|
|
|
|
|
'V1': [],
|
|
|
|
|
'V2': [],
|
|
|
|
|
},
|
2023-11-03 22:15:20 +01:00
|
|
|
'audio-center': {
|
|
|
|
|
'A1': [],
|
2026-01-26 18:35:29 +01:00
|
|
|
'A2': [],
|
2023-11-03 22:15:20 +01:00
|
|
|
},
|
|
|
|
|
'audio-front': {
|
|
|
|
|
'A1': [],
|
|
|
|
|
'A2': [],
|
|
|
|
|
'A3': [],
|
|
|
|
|
'A4': [],
|
2023-10-08 12:19:05 +01:00
|
|
|
},
|
2023-11-03 22:15:20 +01:00
|
|
|
'audio-rear': {
|
2023-10-08 12:19:05 +01:00
|
|
|
'A1': [],
|
|
|
|
|
'A2': [],
|
|
|
|
|
'A3': [],
|
|
|
|
|
'A4': [],
|
2023-11-03 22:15:20 +01:00
|
|
|
},
|
2023-10-08 12:19:05 +01:00
|
|
|
}
|
2023-10-09 20:29:11 +01:00
|
|
|
all_clips = clips.copy()
|
2023-11-12 18:30:06 +01:00
|
|
|
seq = random(10000 + base * 1000)
|
2023-11-01 09:23:00 +01:00
|
|
|
used = []
|
2023-10-16 23:26:09 +01:00
|
|
|
|
2026-01-17 11:39:35 +00:00
|
|
|
selected_clips_length = 0
|
2026-01-24 13:26:30 +01:00
|
|
|
ai_length = 0
|
2026-01-17 11:39:35 +00:00
|
|
|
selected_clips = []
|
2026-01-24 13:26:30 +01:00
|
|
|
|
|
|
|
|
tags = []
|
2026-01-25 20:18:10 +01:00
|
|
|
while selected_clips_length < target * 1.1:
|
2026-01-24 13:26:30 +01:00
|
|
|
if not tags:
|
|
|
|
|
tags = fragment["tags"].copy()
|
|
|
|
|
tag = random_choice(seq, tags, pop=True)
|
|
|
|
|
|
|
|
|
|
non_ai_clips = []
|
|
|
|
|
ai_clips = []
|
|
|
|
|
for clip in clips:
|
|
|
|
|
if tag in clip["tags"]:
|
|
|
|
|
if 'ai' in clip:
|
|
|
|
|
ai_clips.append(clip)
|
|
|
|
|
else:
|
|
|
|
|
non_ai_clips.append(clip)
|
2026-01-30 13:53:20 +01:00
|
|
|
if ai_length < target * 0.7 and ai_clips:
|
2026-01-24 13:26:30 +01:00
|
|
|
clip = random_choice(seq, ai_clips, pop=True)
|
|
|
|
|
clip["use_ai"] = True
|
2026-01-17 11:39:35 +00:00
|
|
|
selected_clips.append(clip)
|
|
|
|
|
selected_clips_length += clip['duration']
|
2026-01-24 13:26:30 +01:00
|
|
|
ai_length += clip['duration']
|
|
|
|
|
clips = [c for c in clips if c['id'] != clip['id']]
|
|
|
|
|
continue
|
2026-01-17 11:39:35 +00:00
|
|
|
|
2026-01-24 13:26:30 +01:00
|
|
|
available_clips = non_ai_clips + ai_clips
|
|
|
|
|
if available_clips:
|
|
|
|
|
clip = random_choice(seq, available_clips, pop=True)
|
|
|
|
|
clip["use_ai"] = False
|
|
|
|
|
selected_clips.append(clip)
|
|
|
|
|
selected_clips_length += clip['duration']
|
|
|
|
|
clips = [c for c in clips if c['id'] != clip['id']]
|
2026-01-17 11:39:35 +00:00
|
|
|
|
|
|
|
|
clips = selected_clips
|
2023-10-28 11:58:07 +02:00
|
|
|
clip = None
|
2023-10-10 16:16:59 +01:00
|
|
|
while target - length > 0 and clips:
|
2026-01-06 19:00:43 +01:00
|
|
|
clip = None
|
2023-10-28 11:58:07 +02:00
|
|
|
if not clip:
|
2026-01-06 20:21:18 +01:00
|
|
|
# FIXME: while not all clips have AI versions make sure we have one 50% of the time
|
2023-10-28 11:58:07 +02:00
|
|
|
clip = random_choice(seq, clips, True)
|
2026-01-24 13:26:30 +01:00
|
|
|
next_length = length + clip['duration']
|
|
|
|
|
if target - next_length < -target*0.1:
|
2023-10-08 12:19:05 +01:00
|
|
|
break
|
2026-01-26 18:35:29 +01:00
|
|
|
clip_duration = format_duration(clip['duration'], fps)
|
|
|
|
|
if clip['duration'] != clip_duration:
|
|
|
|
|
print("WTF", clip, clip['duration'], clip_duration)
|
2026-01-25 20:18:10 +01:00
|
|
|
length += clip_duration
|
2023-10-08 12:19:05 +01:00
|
|
|
|
2026-01-13 12:09:41 +00:00
|
|
|
# 50/50 source or ai
|
|
|
|
|
src = clip['source']
|
|
|
|
|
audio = clip['source']
|
2026-01-13 12:03:44 +00:00
|
|
|
# select ai if we have one
|
2026-01-25 20:18:10 +01:00
|
|
|
if 'ai' in clip and clip.get("use_ai"):
|
|
|
|
|
src = random_choice(seq, list(clip['ai'].values()), False)
|
2026-01-13 12:03:44 +00:00
|
|
|
|
2026-01-24 13:26:30 +01:00
|
|
|
print('%07.3f-%07.3f %07.3f %s (%s)' % (
|
2026-01-25 20:18:10 +01:00
|
|
|
length-clip_duration,
|
2026-01-24 13:26:30 +01:00
|
|
|
length,
|
2026-01-26 18:35:29 +01:00
|
|
|
clip_duration,
|
2026-01-24 13:26:30 +01:00
|
|
|
os.path.basename(clip['source']),
|
|
|
|
|
src.split('/')[-2]
|
|
|
|
|
))
|
2026-01-06 19:00:43 +01:00
|
|
|
|
2026-02-01 13:08:21 +01:00
|
|
|
scene['front']['V1'].append({
|
2026-01-26 18:35:29 +01:00
|
|
|
'duration': clip_duration,
|
2026-01-29 14:15:31 +01:00
|
|
|
'id': clip['id'],
|
2026-01-06 19:00:43 +01:00
|
|
|
'src': src,
|
2023-10-08 12:19:05 +01:00
|
|
|
"filter": {
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
|
2026-02-02 17:20:53 +01:00
|
|
|
volume_front = '-15'
|
2026-01-14 21:17:23 +00:00
|
|
|
if clip.get('volume') is not None:
|
|
|
|
|
volume_front = '%0.2f' % (float(volume_front) + clip['volume'])
|
2026-01-22 12:24:32 +01:00
|
|
|
|
2026-01-24 13:26:30 +01:00
|
|
|
'''
|
2026-01-22 12:24:32 +01:00
|
|
|
'dynamic_loudness': [
|
|
|
|
|
["target_loudness", "-35"],
|
|
|
|
|
["min_gain", "-15"],
|
|
|
|
|
["max_gin", "15"],
|
|
|
|
|
],
|
2026-01-24 13:26:30 +01:00
|
|
|
'''
|
|
|
|
|
|
|
|
|
|
audio_filter = {
|
|
|
|
|
'mono': [
|
|
|
|
|
["channels", "2"],
|
|
|
|
|
],
|
|
|
|
|
'loudness': [
|
2026-01-24 20:44:48 +01:00
|
|
|
["program", "-17"],
|
2026-01-24 13:26:30 +01:00
|
|
|
["results", clip["loudnorm"]],
|
|
|
|
|
],
|
2026-01-22 12:24:32 +01:00
|
|
|
'volume': volume_front,
|
|
|
|
|
'fadein': '00:00:00.125'
|
|
|
|
|
}
|
2023-11-03 22:15:20 +01:00
|
|
|
scene['audio-front']['A2'].append({
|
2026-01-26 18:35:29 +01:00
|
|
|
'duration': clip_duration,
|
2026-01-29 14:15:31 +01:00
|
|
|
'id': clip['id'],
|
2026-01-06 20:22:16 +01:00
|
|
|
'src': audio,
|
2026-01-22 12:24:32 +01:00
|
|
|
'filter': audio_filter.copy()
|
2023-11-03 22:15:20 +01:00
|
|
|
})
|
2026-01-26 18:35:29 +01:00
|
|
|
|
|
|
|
|
length = format_duration(length, fps)
|
|
|
|
|
ad = get_scene_duration(scene, track='audio-front:A2')
|
2026-02-01 13:08:21 +01:00
|
|
|
vd = get_scene_duration(scene, track='front:V1')
|
2026-01-26 18:35:29 +01:00
|
|
|
if ad == vd and abs(ad-length) > 1/48:
|
|
|
|
|
print('v: ', vd, 'ad', ad, 'length:', length, 'fixup')
|
|
|
|
|
length = ad
|
2026-02-01 13:08:21 +01:00
|
|
|
if abs(length - vd) > 1/48 or abs(length - ad) > 1/48 or ad != vd:
|
2026-01-26 18:35:29 +01:00
|
|
|
print('vd: ', vd, 'ad', ad, 'length:', length)
|
|
|
|
|
print(clip)
|
|
|
|
|
sys.exit(-1)
|
2023-11-01 09:23:00 +01:00
|
|
|
used.append(clip)
|
2026-01-24 13:26:30 +01:00
|
|
|
if not clips and target - length > 0:
|
|
|
|
|
print("not enough clips, need to reset")
|
|
|
|
|
used_ids = {c['id'] for c in used}
|
|
|
|
|
clips = [c for c in all_clips if c != clip and c['id'] not in used_ids]
|
|
|
|
|
if not clips:
|
|
|
|
|
print("not enough clips, also consider used")
|
|
|
|
|
clips = [c for c in all_clips if c != clip]
|
|
|
|
|
if not clips:
|
|
|
|
|
print("not enough clips, also consider last clip")
|
|
|
|
|
clips = all_clips.copy()
|
2026-01-25 20:18:10 +01:00
|
|
|
for clip in clips:
|
|
|
|
|
if "ai" in clip:
|
|
|
|
|
clip["use_ai"] = True
|
2026-01-24 13:26:30 +01:00
|
|
|
|
2026-01-26 18:35:29 +01:00
|
|
|
scene_duration = int(round(get_scene_duration(scene) * fps))
|
2026-01-24 13:26:30 +01:00
|
|
|
voice_overs = []
|
|
|
|
|
sub_offset = 0
|
|
|
|
|
subs = []
|
|
|
|
|
print("--")
|
|
|
|
|
print("Voice Over:")
|
|
|
|
|
if voice_over:
|
|
|
|
|
vo_keys = list(sorted(voice_over))
|
2026-01-24 18:47:14 +01:00
|
|
|
while int(sub_offset * fps) < scene_duration and vo_keys:
|
2026-01-24 13:26:30 +01:00
|
|
|
if sub_offset:
|
|
|
|
|
gap = (5 * fps + random_int(seq, 10 * fps)) / fps
|
|
|
|
|
else:
|
|
|
|
|
gap = (2 * fps + random_int(seq, 5 * fps)) / fps
|
2026-01-24 19:10:12 +01:00
|
|
|
gap = format_duration(gap, fps)
|
2026-01-26 18:35:29 +01:00
|
|
|
if int((sub_offset + gap) * fps) > scene_duration:
|
2026-01-24 13:26:30 +01:00
|
|
|
gap = format_duration((scene_duration - int(sub_offset * fps)) / fps, fps)
|
2026-01-26 18:35:29 +01:00
|
|
|
for tl, track in (
|
|
|
|
|
('audio-center', 'A1'),
|
|
|
|
|
('audio-center', 'A2'),
|
|
|
|
|
('audio-rear', 'A1'),
|
|
|
|
|
('audio-rear', 'A2'),
|
2026-02-01 13:08:21 +01:00
|
|
|
('front', 'V2'),
|
2026-01-26 18:35:29 +01:00
|
|
|
):
|
|
|
|
|
scene[tl][track].append({
|
|
|
|
|
'blank': True,
|
|
|
|
|
'duration': gap
|
|
|
|
|
})
|
2026-01-24 13:26:30 +01:00
|
|
|
print('%07.3f-%07.3f %07.3f' % (sub_offset, sub_offset+gap, gap), 'silence')
|
|
|
|
|
sub_offset += gap
|
|
|
|
|
|
|
|
|
|
vo_key = random_choice(seq, vo_keys, pop=True)
|
|
|
|
|
variant = random_int(seq, len(voice_over[vo_key]))
|
|
|
|
|
vo = voice_over[vo_key][variant]
|
2026-01-26 18:35:29 +01:00
|
|
|
if isinstance(vo, list):
|
|
|
|
|
vo, vo_b = vo
|
2026-01-27 12:34:22 +01:00
|
|
|
|
2026-01-26 18:35:29 +01:00
|
|
|
else:
|
|
|
|
|
vo_b = None
|
2026-01-29 11:54:51 +01:00
|
|
|
min_end = 2
|
|
|
|
|
while int((vo['duration'] + sub_offset + min_end) * fps) > scene_duration:
|
2026-01-24 13:26:30 +01:00
|
|
|
if not vo_keys:
|
|
|
|
|
vo = None
|
|
|
|
|
break
|
|
|
|
|
vo_key = random_choice(seq, vo_keys, pop=True)
|
|
|
|
|
variant = random_int(seq, len(voice_over[vo_key]))
|
|
|
|
|
vo = voice_over[vo_key][variant]
|
2026-01-26 18:35:29 +01:00
|
|
|
if isinstance(vo, list):
|
|
|
|
|
vo, vo_b = vo
|
|
|
|
|
else:
|
|
|
|
|
vo_b = None
|
2026-01-24 13:26:30 +01:00
|
|
|
if vo is None:
|
|
|
|
|
break
|
|
|
|
|
print('%07.3f-%07.3f %07.3f' % (sub_offset, sub_offset+vo["duration"], vo["duration"]), vo["src"].split('/')[-1])
|
|
|
|
|
voice_overs.append(vo)
|
|
|
|
|
voc = vo.copy()
|
2026-02-02 17:20:53 +01:00
|
|
|
a, b = '4', '-2'
|
2026-02-02 16:35:57 +01:00
|
|
|
#if options.get('stereo_downmix'):
|
|
|
|
|
# a, b = '5', '-1'
|
|
|
|
|
if vo_b:
|
2026-02-02 17:20:53 +01:00
|
|
|
a, b = '2', '-4'
|
2026-02-03 11:57:36 +01:00
|
|
|
vo_b_a = '-1'
|
|
|
|
|
vo_b_b = '-7'
|
2026-01-24 13:26:30 +01:00
|
|
|
voc['filter'] = {'volume': a}
|
|
|
|
|
scene['audio-center']['A1'].append(voc)
|
|
|
|
|
vo_low = vo.copy()
|
|
|
|
|
vo_low['filter'] = {'volume': b}
|
|
|
|
|
scene['audio-rear']['A1'].append(vo_low)
|
|
|
|
|
for sub in voc.get("subs", []):
|
|
|
|
|
sub = sub.copy()
|
|
|
|
|
sub["in"] += sub_offset
|
|
|
|
|
sub["out"] += sub_offset
|
|
|
|
|
subs.append(sub)
|
2026-01-26 18:35:29 +01:00
|
|
|
if vo_b:
|
|
|
|
|
vo_b = vo_b.copy()
|
2026-02-02 17:36:22 +01:00
|
|
|
vo_b['filter'] = {'volume': vo_b_a}
|
2026-01-26 18:35:29 +01:00
|
|
|
scene['audio-center']['A2'].append(vo_b)
|
|
|
|
|
vo_b = vo_b.copy()
|
2026-02-02 17:36:22 +01:00
|
|
|
vo_b['filter'] = {'volume': vo_b_b}
|
2026-01-27 12:34:22 +01:00
|
|
|
scene['audio-rear']['A2'].append(vo_b)
|
2026-01-26 18:35:29 +01:00
|
|
|
else:
|
|
|
|
|
for tl, track in (
|
|
|
|
|
('audio-center', 'A2'),
|
|
|
|
|
('audio-rear', 'A2'),
|
|
|
|
|
):
|
|
|
|
|
scene[tl][track].append({
|
|
|
|
|
'blank': True,
|
|
|
|
|
'duration': voc["duration"]
|
|
|
|
|
})
|
2026-02-01 13:08:21 +01:00
|
|
|
if 'ai' in vo:
|
|
|
|
|
scene['front']['V2'].append({
|
|
|
|
|
'duration': vo['duration'],
|
|
|
|
|
'id': vo['id'],
|
|
|
|
|
'src': vo['ai'],
|
|
|
|
|
"filter": {
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
else:
|
|
|
|
|
scene['front']['V2'].append({
|
|
|
|
|
'blank': True,
|
|
|
|
|
'duration': vo["duration"]
|
|
|
|
|
})
|
2026-01-24 13:26:30 +01:00
|
|
|
sub_offset += voc["duration"]
|
|
|
|
|
if subs:
|
|
|
|
|
scene["subtitles"] = subs
|
2026-01-26 18:35:29 +01:00
|
|
|
sub_offset = format_duration(sub_offset, fps)
|
2026-01-24 13:26:30 +01:00
|
|
|
|
2026-01-26 18:35:29 +01:00
|
|
|
if sub_offset < scene_duration/fps:
|
|
|
|
|
gap = scene_duration/fps - sub_offset
|
2026-01-24 13:26:30 +01:00
|
|
|
print('%07.3f-%07.3f %07.3f' % (sub_offset, sub_offset+gap, gap), 'silence')
|
2024-12-03 19:35:37 +00:00
|
|
|
scene['audio-center']['A1'].append({
|
|
|
|
|
'blank': True,
|
2026-01-24 13:26:30 +01:00
|
|
|
'duration': gap
|
2024-12-03 19:35:37 +00:00
|
|
|
})
|
|
|
|
|
scene['audio-rear']['A1'].append({
|
|
|
|
|
'blank': True,
|
2026-01-24 13:26:30 +01:00
|
|
|
'duration': gap
|
2024-12-03 19:35:37 +00:00
|
|
|
})
|
2026-02-01 13:08:21 +01:00
|
|
|
scene['front']['V2'].append({
|
|
|
|
|
'blank': True,
|
|
|
|
|
'duration': gap
|
|
|
|
|
})
|
2026-01-24 13:26:30 +01:00
|
|
|
sub_offset += gap
|
2026-01-26 18:35:29 +01:00
|
|
|
'''
|
|
|
|
|
print("scene duration: %0.3f vo: %0.3f (length: %0.3f, target: %0.3f)" % (
|
|
|
|
|
get_scene_duration(scene),
|
|
|
|
|
sub_offset,
|
|
|
|
|
length,
|
|
|
|
|
target
|
|
|
|
|
))
|
|
|
|
|
'''
|
|
|
|
|
print("scene duration: %0.3f (target: %0.3f)" % (
|
|
|
|
|
get_scene_duration(scene),
|
|
|
|
|
target
|
|
|
|
|
))
|
2023-11-01 09:23:00 +01:00
|
|
|
return scene, used
|
2023-10-08 12:19:05 +01:00
|
|
|
|
2025-11-14 12:17:31 +01:00
|
|
|
def write_subtitles(data, folder, options):
|
|
|
|
|
data = fix_overlaps(data)
|
|
|
|
|
path = folder / "front.srt"
|
2025-11-21 11:00:06 +01:00
|
|
|
if options.get("subtitle_format") == "srt":
|
|
|
|
|
srt = ox.srt.encode(data)
|
|
|
|
|
write_if_new(str(path), srt, 'b')
|
2026-01-27 14:25:31 +01:00
|
|
|
path = folder / "segment.ass"
|
2025-11-27 18:02:00 +01:00
|
|
|
if os.path.exists(path):
|
|
|
|
|
os.unlink(path)
|
2025-11-21 11:00:06 +01:00
|
|
|
else:
|
|
|
|
|
if os.path.exists(path):
|
|
|
|
|
os.unlink(path)
|
2026-01-27 14:25:31 +01:00
|
|
|
path = folder / "segment.ass"
|
2025-11-21 11:00:06 +01:00
|
|
|
ass = ass_encode(data, options)
|
|
|
|
|
write_if_new(str(path), ass, '')
|
2025-11-14 12:17:31 +01:00
|
|
|
|
|
|
|
|
|
2024-12-04 09:16:24 +00:00
|
|
|
def render(root, scene, prefix='', options=None):
|
2025-11-27 18:02:00 +01:00
|
|
|
if options is None:
|
|
|
|
|
options = {}
|
2023-10-08 12:19:05 +01:00
|
|
|
fps = 24
|
2023-10-09 20:29:11 +01:00
|
|
|
files = []
|
2024-12-03 19:35:37 +00:00
|
|
|
scene_duration = int(get_scene_duration(scene) * fps)
|
2023-10-08 12:19:05 +01:00
|
|
|
for timeline, data in scene.items():
|
2023-11-14 17:48:55 +01:00
|
|
|
if timeline == "subtitles":
|
2025-11-14 12:17:31 +01:00
|
|
|
folder = Path(root) / prefix
|
|
|
|
|
write_subtitles(data, folder, options)
|
2023-11-14 17:48:55 +01:00
|
|
|
continue
|
2023-10-10 16:16:59 +01:00
|
|
|
#print(timeline)
|
2023-10-08 12:19:05 +01:00
|
|
|
project = KDEnliveProject(root)
|
|
|
|
|
|
|
|
|
|
tracks = []
|
2023-11-09 00:33:06 +01:00
|
|
|
track_durations = {}
|
2023-10-08 12:19:05 +01:00
|
|
|
for track, clips in data.items():
|
2023-10-10 16:16:59 +01:00
|
|
|
#print(track)
|
2023-10-08 12:19:05 +01:00
|
|
|
for clip in clips:
|
|
|
|
|
project.append_clip(track, clip)
|
2024-12-04 09:16:24 +00:00
|
|
|
track_durations[track] = sum([int(c['duration'] * fps) for c in clips])
|
2023-11-09 00:33:06 +01:00
|
|
|
if timeline.startswith('audio-'):
|
|
|
|
|
track_duration = project.get_duration()
|
|
|
|
|
delta = scene_duration - track_duration
|
|
|
|
|
if delta > 0:
|
|
|
|
|
for track in track_durations:
|
|
|
|
|
if track_durations[track] == track_duration:
|
2024-12-03 19:35:37 +00:00
|
|
|
project.append_clip(track, {'blank': True, "duration": delta/fps})
|
2024-12-04 09:16:24 +00:00
|
|
|
|
2023-10-09 20:29:11 +01:00
|
|
|
path = os.path.join(root, prefix + "%s.kdenlive" % timeline)
|
2023-11-16 09:08:03 +01:00
|
|
|
project_xml = project.to_xml()
|
|
|
|
|
write_if_new(path, project_xml)
|
2024-12-04 09:16:24 +00:00
|
|
|
|
|
|
|
|
if options["debug"]:
|
|
|
|
|
# check duration
|
|
|
|
|
out_duration = get_project_duration(path)
|
|
|
|
|
p_duration = project.get_duration()
|
|
|
|
|
print(path, 'out: %s, project: %s, scene: %s' %(out_duration, p_duration, scene_duration))
|
|
|
|
|
if p_duration != scene_duration:
|
|
|
|
|
print(path, 'FAIL project: %s, scene: %s' %(p_duration, scene_duration))
|
|
|
|
|
_cache = os.path.join(root, "cache.json")
|
|
|
|
|
with open(_cache, "w") as fd:
|
|
|
|
|
json.dump(_CACHE, fd)
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
if out_duration != p_duration:
|
|
|
|
|
print(path, 'fail got: %s expected: %s' %(out_duration, p_duration))
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
2023-10-09 20:29:11 +01:00
|
|
|
files.append(path)
|
|
|
|
|
return files
|
2023-10-10 16:16:59 +01:00
|
|
|
|
2023-11-08 11:01:20 +01:00
|
|
|
def get_fragments(clips, voice_over, prefix):
|
2023-10-10 16:16:59 +01:00
|
|
|
import itemlist.models
|
2023-10-16 23:26:09 +01:00
|
|
|
import item.models
|
|
|
|
|
|
2023-10-10 16:16:59 +01:00
|
|
|
fragments = []
|
2023-10-16 23:26:09 +01:00
|
|
|
|
2023-10-10 16:16:59 +01:00
|
|
|
for l in itemlist.models.List.objects.filter(status='featured').order_by('name'):
|
|
|
|
|
if l.name.split(' ')[0].isdigit():
|
2026-01-24 13:26:30 +01:00
|
|
|
fragment_id = l.name.split(' ')[0]
|
2023-10-10 16:16:59 +01:00
|
|
|
fragment = {
|
2026-01-24 13:26:30 +01:00
|
|
|
'id': fragment_id,
|
2023-10-10 16:16:59 +01:00
|
|
|
'name': l.name,
|
2023-11-08 11:01:20 +01:00
|
|
|
'tags': [],
|
|
|
|
|
'anti-tags': [],
|
2023-10-10 16:16:59 +01:00
|
|
|
'description': l.description
|
|
|
|
|
}
|
2023-11-08 11:01:20 +01:00
|
|
|
for con in l.query['conditions']:
|
|
|
|
|
if "conditions" in con:
|
|
|
|
|
for sub in con["conditions"]:
|
|
|
|
|
if sub['key'] == "tags" and sub['operator'] == '==':
|
2026-01-30 08:38:05 +01:00
|
|
|
fragment['tags'].append(sub['value'].lower().strip())
|
2023-11-08 11:01:20 +01:00
|
|
|
elif sub['key'] == "tags" and sub['operator'] == '!=':
|
2026-01-30 08:38:05 +01:00
|
|
|
fragment['tags'].append(sub['value'].lower().strip())
|
2026-01-22 12:24:32 +01:00
|
|
|
elif sub['key'] == 'type' and sub['value'] in ('source', ''):
|
|
|
|
|
pass
|
2023-11-08 11:01:20 +01:00
|
|
|
else:
|
2023-11-18 21:31:45 +01:00
|
|
|
print(l.name, 'unknown sub condition', sub)
|
2023-11-08 11:01:20 +01:00
|
|
|
elif con.get('key') == "tags" and con['operator'] == '==':
|
2026-01-30 08:38:05 +01:00
|
|
|
fragment['tags'].append(con['value'].lower().strip())
|
2026-01-31 17:50:36 +01:00
|
|
|
elif con.get('key') == "tags" and con['operator'] == '!==':
|
2026-01-30 08:38:05 +01:00
|
|
|
fragment['anti-tags'].append(con['value'].lower().strip())
|
2023-11-08 11:01:20 +01:00
|
|
|
|
2023-10-16 23:26:09 +01:00
|
|
|
fragment["id"] = int(fragment['name'].split(' ')[0])
|
2026-01-13 12:09:41 +00:00
|
|
|
sources = []
|
2023-11-08 11:01:20 +01:00
|
|
|
for i in l.get_items(l.user):
|
|
|
|
|
orig = i.files.filter(selected=True).first()
|
|
|
|
|
if orig:
|
|
|
|
|
ext = os.path.splitext(orig.data.path)[1]
|
2026-01-06 20:08:31 +01:00
|
|
|
if 'type' not in i.data:
|
|
|
|
|
print("FIXME", i)
|
|
|
|
|
continue
|
2023-11-08 11:01:20 +01:00
|
|
|
type_ = i.data['type'][0].lower()
|
2026-02-02 10:03:09 +01:00
|
|
|
target = os.path.join(prefix, 'video', type_, "%s-%s%s" % (i.data['title'], i.public_id, ext))
|
2026-01-13 12:09:41 +00:00
|
|
|
sources.append(target)
|
2023-10-10 16:16:59 +01:00
|
|
|
fragment['clips'] = []
|
|
|
|
|
for clip in clips:
|
2023-11-08 11:01:20 +01:00
|
|
|
#if set(clip['tags']) & set(fragment['tags']) and not set(clip['tags']) & set(fragment['anti-tags']):
|
2026-01-13 12:09:41 +00:00
|
|
|
key = 'source'
|
|
|
|
|
source = clip['source']
|
|
|
|
|
if source in sources:
|
2023-10-10 16:16:59 +01:00
|
|
|
fragment['clips'].append(clip)
|
2026-01-24 13:26:30 +01:00
|
|
|
fragment["voice_over"] = voice_over.get(fragment_id, {})
|
2023-10-10 16:16:59 +01:00
|
|
|
fragments.append(fragment)
|
|
|
|
|
fragments.sort(key=lambda f: ox.sort_string(f['name']))
|
|
|
|
|
return fragments
|
|
|
|
|
|
|
|
|
|
def render_all(options):
|
2025-11-21 11:00:40 +01:00
|
|
|
options = load_defaults(options)
|
2023-10-10 16:16:59 +01:00
|
|
|
prefix = options['prefix']
|
|
|
|
|
duration = int(options['duration'])
|
|
|
|
|
base = int(options['offset'])
|
|
|
|
|
|
2023-10-16 23:26:09 +01:00
|
|
|
_cache = os.path.join(prefix, "cache.json")
|
|
|
|
|
if os.path.exists(_cache):
|
|
|
|
|
with open(_cache) as fd:
|
|
|
|
|
_CACHE.update(json.load(fd))
|
|
|
|
|
|
2023-10-10 16:16:59 +01:00
|
|
|
with open(os.path.join(prefix, "clips.json")) as fd:
|
|
|
|
|
clips = json.load(fd)
|
2023-10-16 23:26:09 +01:00
|
|
|
with open(os.path.join(prefix, "voice_over.json")) as fd:
|
|
|
|
|
voice_over = json.load(fd)
|
2023-11-08 11:01:20 +01:00
|
|
|
fragments = get_fragments(clips, voice_over, prefix)
|
2023-10-16 23:26:09 +01:00
|
|
|
with open(os.path.join(prefix, "fragments.json"), "w") as fd:
|
|
|
|
|
json.dump(fragments, fd, indent=2, ensure_ascii=False)
|
2023-10-10 16:16:59 +01:00
|
|
|
position = target_position = 0
|
|
|
|
|
target = fragment_target = duration / len(fragments)
|
|
|
|
|
base_prefix = os.path.join(prefix, 'render', str(base))
|
2023-11-01 09:23:00 +01:00
|
|
|
clips_used = []
|
2023-11-12 18:30:06 +01:00
|
|
|
|
|
|
|
|
stats = defaultdict(lambda: 0)
|
2023-11-20 23:08:48 +00:00
|
|
|
fragment_base = base
|
2023-10-10 16:16:59 +01:00
|
|
|
for fragment in fragments:
|
2023-11-20 23:08:48 +00:00
|
|
|
fragment_base += 1
|
2023-10-16 23:26:09 +01:00
|
|
|
fragment_id = int(fragment['name'].split(' ')[0])
|
2026-01-24 20:21:31 +01:00
|
|
|
if options.get("fragment") and int(options["fragment"]) != fragment_id:
|
2026-01-13 12:03:44 +00:00
|
|
|
continue
|
2023-10-10 16:16:59 +01:00
|
|
|
name = fragment['name'].replace(' ', '_')
|
|
|
|
|
if not fragment['clips']:
|
|
|
|
|
print("skipping empty fragment", name)
|
|
|
|
|
continue
|
|
|
|
|
fragment_prefix = os.path.join(base_prefix, name)
|
|
|
|
|
os.makedirs(fragment_prefix, exist_ok=True)
|
2023-11-01 09:23:00 +01:00
|
|
|
fragment_clips = fragment['clips']
|
2026-01-24 13:26:30 +01:00
|
|
|
used_ids = {c['id'] for c in clips_used}
|
2026-01-27 13:31:33 +01:00
|
|
|
unused_fragment_clips = [c for c in fragment_clips if c['id'] not in used_ids]
|
2023-11-01 09:23:00 +01:00
|
|
|
print('fragment clips', len(fragment_clips), 'unused', len(unused_fragment_clips))
|
2026-01-24 13:26:30 +01:00
|
|
|
print('--')
|
|
|
|
|
print('Video:')
|
2025-05-21 13:17:50 +01:00
|
|
|
scene, used = compose(
|
|
|
|
|
unused_fragment_clips,
|
2026-01-24 13:26:30 +01:00
|
|
|
fragment=fragment,
|
2025-05-21 13:17:50 +01:00
|
|
|
target=target,
|
|
|
|
|
base=fragment_base,
|
|
|
|
|
voice_over=fragment['voice_over'],
|
|
|
|
|
options=options
|
|
|
|
|
)
|
2023-11-01 09:23:00 +01:00
|
|
|
clips_used += used
|
2023-10-10 16:16:59 +01:00
|
|
|
scene_duration = get_scene_duration(scene)
|
2023-10-29 19:51:26 +01:00
|
|
|
print("%s %6.3f -> %6.3f (%6.3f)" % (name, target, scene_duration, fragment_target))
|
2026-01-06 20:08:31 +01:00
|
|
|
src = [a for a in scene['audio-rear']['A1'] if 'src' in a]
|
|
|
|
|
if src:
|
|
|
|
|
src = src[0]['src']
|
|
|
|
|
stats[src.split('/')[-2]] += 1
|
|
|
|
|
else:
|
2026-01-24 13:26:30 +01:00
|
|
|
print("!! fixme, fragment without VO")
|
2023-11-12 18:30:06 +01:00
|
|
|
|
2023-10-10 16:16:59 +01:00
|
|
|
position += scene_duration
|
|
|
|
|
target_position += fragment_target
|
|
|
|
|
if position > target_position:
|
|
|
|
|
target = fragment_target - (position-target_position)
|
2023-10-29 19:51:26 +01:00
|
|
|
print("adjusting target duration for next fragment: %6.3f -> %6.3f" % (fragment_target, target))
|
2023-10-10 16:16:59 +01:00
|
|
|
elif position < target_position:
|
|
|
|
|
target = target + 0.1 * fragment_target
|
|
|
|
|
|
2024-12-04 09:16:24 +00:00
|
|
|
timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/', options)
|
2023-10-10 16:16:59 +01:00
|
|
|
|
2023-11-16 09:08:03 +01:00
|
|
|
scene_json = json.dumps(scene, indent=2, ensure_ascii=False)
|
2026-02-02 15:33:55 +01:00
|
|
|
scene_json_path = os.path.join(fragment_prefix, 'scene.json')
|
|
|
|
|
segment_path = os.path.join(fragment_prefix, 'segment.mp4')
|
|
|
|
|
is_new = write_if_new(scene_json_path, scene_json)
|
|
|
|
|
if not is_new:
|
|
|
|
|
if not os.path.exists(segment_path) or os.lstat(scene_json_path).st_mtime > os.lstat(segment_path).st_mtime:
|
|
|
|
|
is_new = True
|
|
|
|
|
print("%s needs update" % segment_path)
|
|
|
|
|
if not is_new:
|
|
|
|
|
print("%s did not change" % scene_json_path)
|
|
|
|
|
|
|
|
|
|
if is_new and not options['no_video'] and not options["single_file"]:
|
2023-10-10 16:16:59 +01:00
|
|
|
for timeline in timelines:
|
2023-10-16 23:26:09 +01:00
|
|
|
print(timeline)
|
2023-10-10 16:16:59 +01:00
|
|
|
ext = '.mp4'
|
2023-10-16 23:26:09 +01:00
|
|
|
if '/audio' in timeline:
|
2023-10-10 16:16:59 +01:00
|
|
|
ext = '.wav'
|
2026-02-02 17:20:53 +01:00
|
|
|
out = '%s' % timeline.replace('.kdenlive', ext)
|
2026-02-03 18:15:08 +01:00
|
|
|
tmp_out = '%s' % timeline.replace('.kdenlive', ".tmp" + ext)
|
2024-12-04 09:16:24 +00:00
|
|
|
cmd = get_melt() + [
|
|
|
|
|
timeline,
|
2023-10-22 11:18:03 +01:00
|
|
|
'-quiet',
|
2026-02-03 18:15:08 +01:00
|
|
|
'-consumer', 'avformat:%s' % tmp_out,
|
2023-10-10 16:16:59 +01:00
|
|
|
]
|
2023-10-22 11:18:03 +01:00
|
|
|
if ext == '.wav':
|
|
|
|
|
cmd += ['vn=1']
|
|
|
|
|
else:
|
2023-11-09 01:11:51 +01:00
|
|
|
cmd += ['an=1']
|
2026-02-01 22:12:31 +01:00
|
|
|
if options.get("use_qsv"):
|
|
|
|
|
cmd += ['vcodec=h264_qsv', 'pix_fmt=nv12', 'rc=icq', 'global_quality=17']
|
|
|
|
|
elif options.get("only_keyframes"):
|
|
|
|
|
cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
|
2026-02-02 17:20:53 +01:00
|
|
|
if needs_update(timeline, out):
|
|
|
|
|
subprocess.call(cmd)
|
2026-02-03 18:15:08 +01:00
|
|
|
shutil.move(tmp_out, out)
|
2023-10-16 23:26:09 +01:00
|
|
|
if ext == '.wav' and timeline.endswith('audio.kdenlive'):
|
2023-10-10 16:16:59 +01:00
|
|
|
cmd = [
|
2023-10-16 23:26:09 +01:00
|
|
|
'ffmpeg', '-y',
|
|
|
|
|
'-nostats', '-loglevel', 'error',
|
|
|
|
|
'-i',
|
2023-10-10 16:16:59 +01:00
|
|
|
timeline.replace('.kdenlive', ext),
|
|
|
|
|
timeline.replace('.kdenlive', '.mp4')
|
|
|
|
|
]
|
2026-02-02 17:20:53 +01:00
|
|
|
wav = timeline.replace('.kdenlive', ext)
|
|
|
|
|
mp4 = timeline.replace('.kdenlive', '.mp4')
|
|
|
|
|
if needs_update(wav, mp4):
|
|
|
|
|
subprocess.call(cmd)
|
|
|
|
|
if not options.get("keep_parts"):
|
|
|
|
|
os.unlink(wav)
|
2023-10-10 16:16:59 +01:00
|
|
|
|
2023-10-16 23:26:09 +01:00
|
|
|
cmds = []
|
2024-12-03 20:12:15 +00:00
|
|
|
fragment_prefix = Path(fragment_prefix)
|
2023-10-16 23:26:09 +01:00
|
|
|
for src, out1, out2 in (
|
2023-11-03 22:15:20 +01:00
|
|
|
("audio-front.wav", "fl.wav", "fr.wav"),
|
|
|
|
|
("audio-center.wav", "fc.wav", "lfe.wav"),
|
|
|
|
|
("audio-rear.wav", "bl.wav", "br.wav"),
|
2023-10-16 23:26:09 +01:00
|
|
|
):
|
|
|
|
|
cmds.append([
|
2023-10-19 13:10:23 +01:00
|
|
|
"ffmpeg", "-y",
|
|
|
|
|
"-nostats", "-loglevel", "error",
|
|
|
|
|
"-i", fragment_prefix / src,
|
|
|
|
|
"-filter_complex",
|
2023-10-16 23:26:09 +01:00
|
|
|
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
|
|
|
|
|
"-map", "[left]", fragment_prefix / out1,
|
|
|
|
|
"-map", "[right]", fragment_prefix / out2,
|
|
|
|
|
])
|
2023-10-19 13:10:23 +01:00
|
|
|
cmds.append([
|
|
|
|
|
"ffmpeg", "-y",
|
|
|
|
|
"-nostats", "-loglevel", "error",
|
|
|
|
|
"-i", fragment_prefix / "fl.wav",
|
|
|
|
|
"-i", fragment_prefix / "fr.wav",
|
|
|
|
|
"-i", fragment_prefix / "fc.wav",
|
|
|
|
|
"-i", fragment_prefix / "lfe.wav",
|
|
|
|
|
"-i", fragment_prefix / "bl.wav",
|
|
|
|
|
"-i", fragment_prefix / "br.wav",
|
|
|
|
|
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
|
|
|
|
|
"-map", "[a]", "-c:a", "aac", fragment_prefix / "audio-5.1.mp4"
|
|
|
|
|
])
|
2025-05-10 10:04:25 +01:00
|
|
|
audio_front = "audio-5.1.mp4"
|
|
|
|
|
copy = '-c'
|
|
|
|
|
if options["stereo_downmix"]:
|
|
|
|
|
cmds.append([
|
|
|
|
|
"ffmpeg", "-y",
|
|
|
|
|
"-nostats", "-loglevel", "error",
|
|
|
|
|
"-i", fragment_prefix / "audio-front.wav",
|
|
|
|
|
"-i", fragment_prefix / "audio-center.wav",
|
|
|
|
|
"-i", fragment_prefix / "audio-rear.wav",
|
2025-05-20 12:02:36 +01:00
|
|
|
"-filter_complex",
|
|
|
|
|
"amix=inputs=4:duration=longest:dropout_transition=0",
|
|
|
|
|
'-ac', '2', fragment_prefix / "audio-stereo.wav"
|
2025-05-10 10:04:25 +01:00
|
|
|
])
|
|
|
|
|
audio_front = "audio-stereo.wav"
|
|
|
|
|
copy = '-c:v'
|
|
|
|
|
|
2023-10-28 11:24:51 +02:00
|
|
|
cmds.append([
|
|
|
|
|
"ffmpeg", "-y",
|
|
|
|
|
"-nostats", "-loglevel", "error",
|
2023-11-03 22:15:20 +01:00
|
|
|
"-i", fragment_prefix / "front.mp4",
|
2025-05-10 10:04:25 +01:00
|
|
|
"-i", fragment_prefix / audio_front,
|
|
|
|
|
copy, "copy",
|
2025-05-21 14:23:23 +01:00
|
|
|
"-movflags", "+faststart",
|
2025-05-10 10:04:25 +01:00
|
|
|
fragment_prefix / "front-mixed.mp4",
|
2023-10-28 11:24:51 +02:00
|
|
|
])
|
2023-10-16 23:26:09 +01:00
|
|
|
for cmd in cmds:
|
2024-12-03 20:12:15 +00:00
|
|
|
if options["debug"]:
|
|
|
|
|
print(" ".join([str(x) for x in cmd]))
|
2023-10-16 23:26:09 +01:00
|
|
|
subprocess.call(cmd)
|
2023-11-09 01:11:51 +01:00
|
|
|
|
|
|
|
|
for a, b in (
|
2025-05-10 10:04:25 +01:00
|
|
|
("front-mixed.mp4", "front.mp4"),
|
2026-01-26 18:35:29 +01:00
|
|
|
("audio-center.wav", "front.mp4"),
|
|
|
|
|
("audio-rear.wav", "front.mp4"),
|
|
|
|
|
("audio-front.wav", "front.mp4"),
|
|
|
|
|
("audio-5.1.mp4", "front.mp4"),
|
2023-11-09 01:11:51 +01:00
|
|
|
):
|
|
|
|
|
duration_a = ox.avinfo(str(fragment_prefix / a))['duration']
|
|
|
|
|
duration_b = ox.avinfo(str(fragment_prefix / b))['duration']
|
2026-01-26 18:35:29 +01:00
|
|
|
if abs(duration_a - duration_b) > 1/48:
|
2023-11-09 01:11:51 +01:00
|
|
|
print('!!', duration_a, fragment_prefix / a)
|
|
|
|
|
print('!!', duration_b, fragment_prefix / b)
|
2026-01-27 12:34:22 +01:00
|
|
|
#sys.exit(-1)
|
2026-01-27 14:21:29 +01:00
|
|
|
shutil.move(fragment_prefix / "front-mixed.mp4", fragment_prefix / "segment.mp4")
|
2026-01-27 12:34:22 +01:00
|
|
|
cleanup = [
|
2026-01-27 14:21:29 +01:00
|
|
|
"front.mp4",
|
2023-11-09 01:11:51 +01:00
|
|
|
"audio-5.1.mp4",
|
2026-01-27 14:21:29 +01:00
|
|
|
"audio-center.wav",
|
|
|
|
|
"audio-rear.wav",
|
2026-01-06 15:04:26 +01:00
|
|
|
"audio-front.wav",
|
2023-11-09 01:11:51 +01:00
|
|
|
"fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
|
2025-05-10 10:04:25 +01:00
|
|
|
"audio-stereo.wav",
|
2026-01-27 12:34:22 +01:00
|
|
|
]
|
|
|
|
|
if options["keep_audio"]:
|
|
|
|
|
shutil.move(fragment_prefix / "audio-center.wav", fragment_prefix / "vocals.wav")
|
|
|
|
|
shutil.move(fragment_prefix / "audio-front.wav", fragment_prefix / "foley.wav")
|
|
|
|
|
else:
|
|
|
|
|
cleanup += [
|
|
|
|
|
"vocals.wav",
|
|
|
|
|
"foley.wav"
|
|
|
|
|
]
|
2026-02-02 17:20:53 +01:00
|
|
|
if not options.get("keep_parts"):
|
|
|
|
|
for fn in cleanup:
|
|
|
|
|
fn = fragment_prefix / fn
|
|
|
|
|
if os.path.exists(fn):
|
|
|
|
|
os.unlink(fn)
|
2023-11-08 23:55:08 +01:00
|
|
|
|
2026-02-02 15:33:55 +01:00
|
|
|
if is_new and options["single_file"]:
|
2024-12-03 20:12:15 +00:00
|
|
|
cmds = []
|
|
|
|
|
base_prefix = Path(base_prefix)
|
|
|
|
|
for timeline in (
|
|
|
|
|
"front",
|
|
|
|
|
"audio-center",
|
|
|
|
|
"audio-front",
|
|
|
|
|
"audio-rear",
|
|
|
|
|
):
|
|
|
|
|
timelines = list(sorted(glob('%s/*/%s.kdenlive' % (base_prefix, timeline))))
|
|
|
|
|
ext = '.mp4'
|
|
|
|
|
if '/audio' in timelines[0]:
|
|
|
|
|
ext = '.wav'
|
|
|
|
|
out = base_prefix / (timeline + ext)
|
2026-02-03 18:15:08 +01:00
|
|
|
tmp_out = base_prefix / (timeline + ".tmp" +ext)
|
2024-12-04 09:16:24 +00:00
|
|
|
cmd = get_melt() + timelines + [
|
2024-12-03 20:12:15 +00:00
|
|
|
'-quiet',
|
2026-02-03 18:15:08 +01:00
|
|
|
'-consumer', 'avformat:%s' % tmp_out,
|
2024-12-03 20:12:15 +00:00
|
|
|
]
|
|
|
|
|
if ext == '.wav':
|
|
|
|
|
cmd += ['vn=1']
|
|
|
|
|
else:
|
|
|
|
|
cmd += ['an=1']
|
2026-02-01 22:12:31 +01:00
|
|
|
if options.get("use_qsv"):
|
|
|
|
|
cmd += ['vcodec=h264_qsv', 'pix_fmt=nv12', 'rc=icq', 'global_quality=17']
|
|
|
|
|
elif options.get("only_keyframes"):
|
|
|
|
|
cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
|
2026-02-02 17:20:53 +01:00
|
|
|
if needs_update(timelines[0], out):
|
|
|
|
|
cmds.append(cmd)
|
2026-02-03 18:15:08 +01:00
|
|
|
shutil.move(tmp_out, out)
|
2026-02-02 17:20:53 +01:00
|
|
|
|
2024-12-03 20:12:15 +00:00
|
|
|
for src, out1, out2 in (
|
|
|
|
|
("audio-front.wav", "fl.wav", "fr.wav"),
|
|
|
|
|
("audio-center.wav", "fc.wav", "lfe.wav"),
|
|
|
|
|
("audio-rear.wav", "bl.wav", "br.wav"),
|
|
|
|
|
):
|
2026-02-02 17:20:53 +01:00
|
|
|
if needs_update(src, out1):
|
|
|
|
|
cmds.append([
|
|
|
|
|
"ffmpeg", "-y",
|
|
|
|
|
"-nostats", "-loglevel", "error",
|
|
|
|
|
"-i", base_prefix / src,
|
|
|
|
|
"-filter_complex",
|
|
|
|
|
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
|
|
|
|
|
"-map", "[left]", base_prefix / out1,
|
|
|
|
|
"-map", "[right]", base_prefix / out2,
|
|
|
|
|
])
|
|
|
|
|
|
2024-12-03 20:12:15 +00:00
|
|
|
cmds.append([
|
|
|
|
|
"ffmpeg", "-y",
|
|
|
|
|
"-nostats", "-loglevel", "error",
|
|
|
|
|
"-i", base_prefix / "fl.wav",
|
|
|
|
|
"-i", base_prefix / "fr.wav",
|
|
|
|
|
"-i", base_prefix / "fc.wav",
|
|
|
|
|
"-i", base_prefix / "lfe.wav",
|
|
|
|
|
"-i", base_prefix / "bl.wav",
|
|
|
|
|
"-i", base_prefix / "br.wav",
|
|
|
|
|
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
|
|
|
|
|
"-map", "[a]", "-c:a", "aac", base_prefix / "audio-5.1.mp4"
|
|
|
|
|
])
|
|
|
|
|
cmds.append([
|
|
|
|
|
"ffmpeg", "-y",
|
|
|
|
|
"-nostats", "-loglevel", "error",
|
|
|
|
|
"-i", base_prefix / "front.mp4",
|
|
|
|
|
"-i", base_prefix / "audio-5.1.mp4",
|
|
|
|
|
"-c", "copy",
|
2025-05-21 14:23:23 +01:00
|
|
|
"-movflags", "+faststart",
|
2025-05-10 10:04:25 +01:00
|
|
|
base_prefix / "front-mixed.mp4",
|
2024-12-03 20:12:15 +00:00
|
|
|
])
|
|
|
|
|
for cmd in cmds:
|
|
|
|
|
if options["debug"]:
|
|
|
|
|
print(" ".join([str(x) for x in cmd]))
|
|
|
|
|
subprocess.call(cmd)
|
|
|
|
|
|
2025-05-10 10:04:25 +01:00
|
|
|
shutil.move(base_prefix / "front-mixed.mp4", base_prefix / "front.mp4")
|
2024-12-03 20:12:15 +00:00
|
|
|
if options["keep_audio"]:
|
|
|
|
|
shutil.move(base_prefix / "audio-center.wav", base_prefix / "vocals.wav")
|
|
|
|
|
shutil.move(base_prefix / "audio-front.wav", base_prefix / "foley.wav")
|
|
|
|
|
for fn in (
|
|
|
|
|
"audio-5.1.mp4",
|
|
|
|
|
"audio-center.wav", "audio-rear.wav",
|
2026-01-06 15:04:26 +01:00
|
|
|
"audio-front.wav",
|
2024-12-03 20:12:15 +00:00
|
|
|
"fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
|
|
|
|
|
):
|
|
|
|
|
fn = base_prefix / fn
|
|
|
|
|
if os.path.exists(fn):
|
|
|
|
|
os.unlink(fn)
|
2025-11-14 12:17:31 +01:00
|
|
|
join_subtitles(base_prefix, options)
|
2024-12-03 20:12:15 +00:00
|
|
|
|
2023-10-16 23:26:09 +01:00
|
|
|
print("Duration - Target: %s Actual: %s" % (target_position, position))
|
2026-01-30 08:38:05 +01:00
|
|
|
#print(json.dumps(dict(stats), sort_keys=True, indent=2))
|
2023-10-16 23:26:09 +01:00
|
|
|
with open(_cache, "w") as fd:
|
|
|
|
|
json.dump(_CACHE, fd)
|
2023-11-16 09:08:03 +01:00
|
|
|
|
|
|
|
|
|
2024-03-22 11:33:39 +01:00
|
|
|
def add_translations(sub, lang):
|
|
|
|
|
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
|
|
|
|
|
if sub.languages:
|
|
|
|
|
value = ox.strip_tags(value)
|
|
|
|
|
if lang:
|
|
|
|
|
for slang in lang:
|
|
|
|
|
if slang == "en":
|
|
|
|
|
slang = None
|
|
|
|
|
for tsub in sub.item.annotations.filter(layer="subtitles", start=sub.start, end=sub.end, languages=slang):
|
|
|
|
|
tvalue = tsub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
|
|
|
|
|
if tsub.languages:
|
|
|
|
|
tvalue = ox.strip_tags(tvalue)
|
|
|
|
|
value += '\n' + tvalue
|
|
|
|
|
return value
|
|
|
|
|
|
2025-11-14 12:17:31 +01:00
|
|
|
def add_translations_dict(sub, langs):
|
|
|
|
|
values = {}
|
|
|
|
|
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
|
|
|
|
|
if sub.languages:
|
|
|
|
|
value = ox.strip_tags(value)
|
|
|
|
|
values[sub.languages] = value
|
|
|
|
|
else:
|
|
|
|
|
values["en"] = value
|
|
|
|
|
for slang in langs:
|
|
|
|
|
slang_value = None if slang == "en" else slang
|
|
|
|
|
if sub.languages == slang_value:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
for tsub in sub.item.annotations.filter(
|
|
|
|
|
layer="subtitles", start=sub.start, end=sub.end,
|
|
|
|
|
languages=slang_value
|
|
|
|
|
):
|
|
|
|
|
tvalue = tsub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
|
|
|
|
|
if tsub.languages:
|
|
|
|
|
tvalue = ox.strip_tags(tvalue)
|
|
|
|
|
values[slang] = tvalue
|
|
|
|
|
return values
|
|
|
|
|
|
|
|
|
|
def get_srt(sub, offset, lang, tlang):
|
2023-11-16 09:12:53 +01:00
|
|
|
sdata = sub.json(keys=['in', 'out', 'value'])
|
2024-03-22 11:33:39 +01:00
|
|
|
sdata['value'] = sdata['value'].replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
|
2025-11-14 12:17:31 +01:00
|
|
|
if tlang:
|
|
|
|
|
sdata['value'] = add_translations(sub, tlang)
|
|
|
|
|
langs = [lang]
|
|
|
|
|
if tlang:
|
|
|
|
|
langs += tlang
|
|
|
|
|
sdata['values'] = add_translations_dict(sub, langs)
|
2023-11-16 09:12:53 +01:00
|
|
|
if offset:
|
|
|
|
|
sdata["in"] += offset
|
|
|
|
|
sdata["out"] += offset
|
|
|
|
|
return sdata
|
|
|
|
|
|
2025-11-14 12:17:31 +01:00
|
|
|
def scene_subtitles(scene, options):
|
|
|
|
|
import item.models
|
|
|
|
|
offset = 0
|
|
|
|
|
subs = []
|
|
|
|
|
lang, tlang = parse_lang(options["lang"])
|
|
|
|
|
for clip in scene['audio-center']['A1']:
|
|
|
|
|
if not clip.get("blank"):
|
|
|
|
|
batch, fragment_id = clip['src'].replace('.wav', '').split('/')[-2:]
|
|
|
|
|
vo = item.models.Item.objects.filter(
|
|
|
|
|
data__batch__icontains=batch, data__title__startswith=fragment_id + '_'
|
|
|
|
|
).first()
|
|
|
|
|
if vo:
|
|
|
|
|
#print("%s => %s %s" % (clip['src'], vo, vo.get('batch')))
|
|
|
|
|
for sub in vo.annotations.filter(
|
|
|
|
|
layer="subtitles"
|
|
|
|
|
).filter(
|
|
|
|
|
languages=None if lang == "en" else lang
|
|
|
|
|
).exclude(value="").order_by("start"):
|
|
|
|
|
sdata = get_srt(sub, offset, lang, tlang)
|
|
|
|
|
subs.append(sdata)
|
|
|
|
|
else:
|
|
|
|
|
print("could not find vo for %s" % clip['src'])
|
|
|
|
|
offset += clip['duration']
|
|
|
|
|
return subs
|
|
|
|
|
|
2025-11-21 11:00:40 +01:00
|
|
|
|
2026-02-02 15:33:55 +01:00
|
|
|
DEFAULTS = {
|
|
|
|
|
"max-items": 10,
|
|
|
|
|
"no_video": False
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-21 11:00:40 +01:00
|
|
|
def load_defaults(options):
|
|
|
|
|
path = os.path.join(options["prefix"], "options.json")
|
|
|
|
|
if os.path.exists(path):
|
|
|
|
|
with open(path) as fd:
|
2025-11-27 18:02:00 +01:00
|
|
|
defaults = json.load(fd)
|
2025-11-21 11:00:40 +01:00
|
|
|
for key in defaults:
|
|
|
|
|
if key not in options:
|
|
|
|
|
options[key] = defaults[key]
|
2026-02-02 15:33:55 +01:00
|
|
|
for key in DEFAULTS:
|
|
|
|
|
if key not in options:
|
|
|
|
|
options[key] = DEFAULTS[key]
|
2025-11-21 11:00:40 +01:00
|
|
|
return options
|
|
|
|
|
|
2026-02-02 15:33:55 +01:00
|
|
|
def reload_options(options):
|
|
|
|
|
path = os.path.join(options["prefix"], "options.json")
|
|
|
|
|
if os.path.exists(path):
|
|
|
|
|
with open(path) as fd:
|
|
|
|
|
defaults = json.load(fd)
|
|
|
|
|
for key in defaults:
|
|
|
|
|
options[key] = defaults[key]
|
|
|
|
|
return options
|
2025-11-21 11:00:40 +01:00
|
|
|
|
2023-11-16 09:08:03 +01:00
|
|
|
def update_subtitles(options):
|
|
|
|
|
import item.models
|
2025-11-21 11:00:40 +01:00
|
|
|
options = load_defaults(options)
|
2023-11-16 09:08:03 +01:00
|
|
|
prefix = Path(options['prefix'])
|
2026-01-30 10:30:09 +01:00
|
|
|
|
|
|
|
|
if options['offset'] is None:
|
|
|
|
|
offsets = [int(folder) for folder in os.listdir(prefix / 'render') if folder.isdigit()]
|
|
|
|
|
for offset in offsets:
|
|
|
|
|
options['offset'] = offset
|
|
|
|
|
update_subtitles(options)
|
|
|
|
|
return
|
|
|
|
|
|
2023-11-16 09:08:03 +01:00
|
|
|
base = int(options['offset'])
|
2025-11-14 12:17:31 +01:00
|
|
|
lang, tlang = parse_lang(options["lang"])
|
2023-11-16 09:08:03 +01:00
|
|
|
|
|
|
|
|
_cache = os.path.join(prefix, "cache.json")
|
|
|
|
|
if os.path.exists(_cache):
|
|
|
|
|
with open(_cache) as fd:
|
|
|
|
|
_CACHE.update(json.load(fd))
|
|
|
|
|
|
|
|
|
|
base_prefix = prefix / 'render' / str(base)
|
|
|
|
|
for folder in os.listdir(base_prefix):
|
|
|
|
|
folder = base_prefix / folder
|
2024-04-02 12:37:17 +02:00
|
|
|
scene_json = folder / "scene.json"
|
|
|
|
|
if not os.path.exists(scene_json):
|
|
|
|
|
continue
|
|
|
|
|
with open(scene_json) as fd:
|
2023-11-16 09:08:03 +01:00
|
|
|
scene = json.load(fd)
|
2025-11-14 12:17:31 +01:00
|
|
|
subs = scene_subtitles(scene, options)
|
|
|
|
|
write_subtitles(subs, folder, options)
|
2023-11-16 09:08:03 +01:00
|
|
|
|
2024-01-22 15:06:40 +01:00
|
|
|
def update_m3u(render_prefix, exclude=[]):
|
2026-01-27 14:21:29 +01:00
|
|
|
files = ox.sorted_strings(glob(render_prefix + "*/*/segment.mp4"))
|
2024-01-22 15:06:40 +01:00
|
|
|
for ex in exclude:
|
|
|
|
|
files = [f for f in files if not f.startswith(ex + "/")]
|
2026-01-06 15:04:26 +01:00
|
|
|
front_m3u = "\n".join(files)
|
|
|
|
|
front_m3u = front_m3u.replace(render_prefix, "")
|
2024-01-22 15:06:40 +01:00
|
|
|
|
|
|
|
|
front_m3u_f = render_prefix + "front.m3u"
|
|
|
|
|
|
|
|
|
|
with open(front_m3u_f + "_", "w") as fd:
|
|
|
|
|
fd.write(front_m3u)
|
|
|
|
|
shutil.move(front_m3u_f + "_", front_m3u_f)
|
|
|
|
|
|
|
|
|
|
def render_infinity(options):
|
2026-01-16 11:13:28 +00:00
|
|
|
options = load_defaults(options)
|
2024-01-22 15:06:40 +01:00
|
|
|
prefix = options['prefix']
|
|
|
|
|
duration = int(options['duration'])
|
|
|
|
|
|
2025-10-08 11:35:10 +01:00
|
|
|
defaults = {
|
|
|
|
|
"offset": 100,
|
|
|
|
|
}
|
2024-01-22 15:06:40 +01:00
|
|
|
state_f = os.path.join(prefix, "infinity.json")
|
|
|
|
|
if os.path.exists(state_f):
|
|
|
|
|
with open(state_f) as fd:
|
|
|
|
|
state = json.load(fd)
|
|
|
|
|
else:
|
2025-10-08 11:35:10 +01:00
|
|
|
state = {}
|
2026-02-02 15:33:55 +01:00
|
|
|
|
|
|
|
|
option_keys = (
|
|
|
|
|
"prefix",
|
|
|
|
|
"duration",
|
|
|
|
|
"debug",
|
|
|
|
|
"single_file",
|
|
|
|
|
"keep_audio",
|
|
|
|
|
"stereo_downmix"
|
|
|
|
|
)
|
2026-02-02 15:37:46 +01:00
|
|
|
for key in option_keys:
|
2024-01-22 15:06:40 +01:00
|
|
|
state[key] = options[key]
|
|
|
|
|
|
2025-10-08 11:35:10 +01:00
|
|
|
for key in defaults:
|
|
|
|
|
if key not in state:
|
|
|
|
|
state[key] = defaults[key]
|
|
|
|
|
|
2024-01-22 15:06:40 +01:00
|
|
|
while True:
|
|
|
|
|
render_prefix = state["prefix"] + "/render/"
|
|
|
|
|
current = [
|
|
|
|
|
f for f in os.listdir(render_prefix)
|
2024-03-19 10:12:15 +01:00
|
|
|
if f.isdigit() and os.path.isdir(render_prefix + f) and state["offset"] > int(f) >= 100
|
2024-01-22 15:06:40 +01:00
|
|
|
]
|
2026-02-02 15:33:55 +01:00
|
|
|
if len(current) > options["max-items"]:
|
2024-03-19 11:48:36 +01:00
|
|
|
current = ox.sorted_strings(current)
|
2026-02-02 15:33:55 +01:00
|
|
|
remove = current[:-options["max-items"]]
|
2024-01-22 15:06:40 +01:00
|
|
|
update_m3u(render_prefix, exclude=remove)
|
|
|
|
|
for folder in remove:
|
|
|
|
|
folder = render_prefix + folder
|
|
|
|
|
print("remove", folder)
|
2024-03-19 10:12:15 +01:00
|
|
|
shutil.rmtree(folder)
|
2024-01-22 15:06:40 +01:00
|
|
|
render_all(state)
|
|
|
|
|
update_m3u(render_prefix)
|
|
|
|
|
state["offset"] += 1
|
|
|
|
|
with open(state_f + "~", "w") as fd:
|
|
|
|
|
json.dump(state, fd, indent=2)
|
|
|
|
|
shutil.move(state_f + "~", state_f)
|
2026-02-02 15:33:55 +01:00
|
|
|
options = reload_options(options)
|
2026-02-02 15:37:46 +01:00
|
|
|
for key in option_keys:
|
2026-02-02 15:33:55 +01:00
|
|
|
state[key] = options[key]
|
2024-12-03 20:12:15 +00:00
|
|
|
|
|
|
|
|
|
2025-11-14 12:17:31 +01:00
|
|
|
def join_subtitles(base_prefix, options):
|
|
|
|
|
scenes = list(sorted(glob('%s/*/scene.json' % base_prefix)))
|
|
|
|
|
data = []
|
|
|
|
|
position = 0
|
2025-11-27 19:06:11 +01:00
|
|
|
for scene_json in scenes:
|
|
|
|
|
with open(scene_json) as fd:
|
|
|
|
|
scene = json.load(fd)
|
2025-11-14 12:17:31 +01:00
|
|
|
subs = scene_subtitles(scene, options)
|
|
|
|
|
data += shift_clips(subs, position)
|
|
|
|
|
position += get_scene_duration(scene)
|
|
|
|
|
write_subtitles(data, base_prefix, options)
|
|
|
|
|
|
|
|
|
|
def generate_clips(options):
|
|
|
|
|
import item.models
|
|
|
|
|
import itemlist.models
|
|
|
|
|
|
2026-01-26 18:35:29 +01:00
|
|
|
fps = 24
|
2025-11-27 18:02:00 +01:00
|
|
|
options = load_defaults(options)
|
2025-11-14 12:17:31 +01:00
|
|
|
prefix = options['prefix']
|
|
|
|
|
lang, tlang = parse_lang(options["lang"])
|
|
|
|
|
clips = []
|
2026-02-02 09:43:14 +01:00
|
|
|
skip = []
|
2026-01-31 17:50:36 +01:00
|
|
|
remove_from_edit = itemlist.models.List.objects.filter(name='Remove from Edit').first()
|
|
|
|
|
if remove_from_edit:
|
|
|
|
|
skip = [i.public_id for i in remove_from_edit.get_items(remove_from_edit.user).all()]
|
2026-02-02 09:43:14 +01:00
|
|
|
not_yet = itemlist.models.List.objects.filter(name='Not yet').first()
|
|
|
|
|
if not_yet:
|
|
|
|
|
skip += [i.public_id for i in not_yet.get_items(not_yet.user).all()]
|
2026-02-02 10:03:09 +01:00
|
|
|
if skip:
|
|
|
|
|
skip = list(set(skip))
|
|
|
|
|
print("skipping %s clips" % len(skip))
|
2026-01-13 12:09:41 +00:00
|
|
|
for i in item.models.Item.objects.filter(sort__type='source'):
|
2026-01-31 17:50:36 +01:00
|
|
|
if i.public_id in skip:
|
|
|
|
|
continue
|
2026-01-13 12:09:41 +00:00
|
|
|
source_target = ""
|
2025-11-14 12:17:31 +01:00
|
|
|
qs = item.models.Item.objects.filter(data__title=i.data['title']).exclude(id=i.id)
|
2026-01-06 19:00:43 +01:00
|
|
|
#if qs.count() >= 1:
|
|
|
|
|
if True:
|
2025-11-14 12:17:31 +01:00
|
|
|
clip = {}
|
|
|
|
|
durations = []
|
|
|
|
|
for e in item.models.Item.objects.filter(data__title=i.data['title']):
|
2026-01-17 11:39:35 +00:00
|
|
|
if 'skip' in e.data.get('tags', []):
|
|
|
|
|
continue
|
2026-01-31 17:50:36 +01:00
|
|
|
if e.public_id in skip:
|
|
|
|
|
continue
|
2025-11-14 12:17:31 +01:00
|
|
|
if 'type' not in e.data:
|
|
|
|
|
print("ignoring invalid video %s (no type)" % e)
|
|
|
|
|
continue
|
|
|
|
|
if not e.files.filter(selected=True).exists():
|
|
|
|
|
continue
|
2026-01-26 18:35:29 +01:00
|
|
|
selected = e.files.filter(selected=True)[0]
|
|
|
|
|
source = selected.data.path
|
2025-11-14 12:17:31 +01:00
|
|
|
ext = os.path.splitext(source)[1]
|
|
|
|
|
type_ = e.data['type'][0].lower()
|
2026-01-29 17:05:16 +01:00
|
|
|
ai_type = None
|
2026-01-06 19:00:43 +01:00
|
|
|
if type_.startswith('ai:'):
|
|
|
|
|
if 'ai' not in clip:
|
|
|
|
|
clip['ai'] = {}
|
2026-01-24 23:25:53 +01:00
|
|
|
ai_type = type_[3:]
|
|
|
|
|
n = 1
|
|
|
|
|
while ai_type in clip['ai']:
|
|
|
|
|
ai_type = '%s-%s' % (type_[3:], n)
|
|
|
|
|
n += 1
|
2026-01-26 18:35:29 +01:00
|
|
|
type_ = 'ai:' + ai_type
|
2026-02-02 10:03:09 +01:00
|
|
|
target = os.path.join(prefix, 'video', type_, "%s-%s%s" % (i.data['title'], i.public_id, ext))
|
2026-01-29 17:05:16 +01:00
|
|
|
if ai_type:
|
|
|
|
|
clip['ai'][ai_type] = target
|
2026-01-25 18:50:44 +01:00
|
|
|
if type_ == "source":
|
|
|
|
|
source_target = target
|
|
|
|
|
clip['loudnorm'] = get_loudnorm(e.files.filter(selected=True)[0])
|
2026-01-25 18:53:31 +01:00
|
|
|
if type_.startswith('ai:'):
|
|
|
|
|
clip['ai'][ai_type] = target
|
|
|
|
|
else:
|
|
|
|
|
clip[type_] = target
|
|
|
|
|
os.makedirs(os.path.dirname(target), exist_ok=True)
|
2026-01-24 23:25:53 +01:00
|
|
|
if os.path.islink(target):
|
|
|
|
|
os.unlink(target)
|
|
|
|
|
os.symlink(source, target)
|
2026-01-26 18:35:29 +01:00
|
|
|
durations.append(selected.duration)
|
2026-01-22 12:24:32 +01:00
|
|
|
if not durations:
|
|
|
|
|
print(i.public_id, 'no duration!', clip)
|
|
|
|
|
continue
|
2026-01-26 18:35:29 +01:00
|
|
|
clip["duration"] = min(durations) - 1/24
|
2026-01-15 16:41:29 +00:00
|
|
|
# trim to a multiple of the output fps
|
2026-01-26 18:35:29 +01:00
|
|
|
d1 = format_duration(clip["duration"], fps)
|
2026-01-15 16:41:29 +00:00
|
|
|
if d1 != clip["duration"]:
|
|
|
|
|
clip["duration"] = d1
|
2025-11-14 12:17:31 +01:00
|
|
|
if not clip["duration"]:
|
|
|
|
|
print('!!', durations, clip)
|
|
|
|
|
continue
|
2026-01-26 18:35:29 +01:00
|
|
|
cd = format_duration(clip["duration"], fps)
|
2025-11-14 12:17:31 +01:00
|
|
|
clip["duration"] = cd
|
2026-01-30 08:38:05 +01:00
|
|
|
clip['tags'] = [t.lower().strip() for t in i.data.get('tags', [])]
|
2026-01-14 21:17:23 +00:00
|
|
|
adjust_volume = i.data.get('adjustvolume', '')
|
|
|
|
|
if adjust_volume:
|
|
|
|
|
clip['volume'] = float(adjust_volume)
|
2026-01-06 19:00:43 +01:00
|
|
|
clip['id'] = i.public_id
|
2026-01-06 15:04:26 +01:00
|
|
|
clips.append(clip)
|
2025-11-14 12:17:31 +01:00
|
|
|
|
|
|
|
|
with open(os.path.join(prefix, 'clips.json'), 'w') as fd:
|
|
|
|
|
json.dump(clips, fd, indent=2, ensure_ascii=False)
|
|
|
|
|
|
|
|
|
|
print("using", len(clips), "clips")
|
|
|
|
|
|
2026-01-24 13:26:30 +01:00
|
|
|
voice_over = {}
|
2025-11-14 12:17:31 +01:00
|
|
|
for vo in item.models.Item.objects.filter(
|
2026-01-06 20:08:31 +01:00
|
|
|
data__type__icontains="voice over",
|
2025-11-14 12:17:31 +01:00
|
|
|
):
|
2026-01-06 20:08:31 +01:00
|
|
|
title = vo.get('title')
|
2026-01-24 13:26:30 +01:00
|
|
|
parts = title.split('-')
|
|
|
|
|
|
|
|
|
|
fragment = '%02d' % int(parts[0].replace('ch', ''))
|
|
|
|
|
type = parts[1]
|
2026-01-27 12:34:22 +01:00
|
|
|
variant = '-'.join(parts[2:]).split('-ElevenLabs')[0]
|
2025-11-14 12:17:31 +01:00
|
|
|
source = vo.files.filter(selected=True)[0]
|
|
|
|
|
src = source.data.path
|
2026-01-24 13:26:30 +01:00
|
|
|
ext = src.split('.')[-1]
|
2026-02-02 10:03:09 +01:00
|
|
|
target = os.path.join(prefix, 'voice_over', fragment, '%s-%s-%s.%s' % (type, variant, vo.public_id, ext))
|
2025-11-14 12:17:31 +01:00
|
|
|
os.makedirs(os.path.dirname(target), exist_ok=True)
|
|
|
|
|
if os.path.islink(target):
|
|
|
|
|
os.unlink(target)
|
|
|
|
|
os.symlink(src, target)
|
|
|
|
|
subs = []
|
|
|
|
|
for sub in vo.annotations.filter(
|
|
|
|
|
layer="subtitles", languages=lang
|
|
|
|
|
).exclude(value="").order_by("start"):
|
|
|
|
|
sdata = get_srt(sub, 0, lang, tlang)
|
|
|
|
|
subs.append(sdata)
|
2026-01-24 13:26:30 +01:00
|
|
|
if fragment not in voice_over:
|
|
|
|
|
voice_over[fragment] = {}
|
|
|
|
|
if type not in voice_over[fragment]:
|
|
|
|
|
voice_over[fragment][type] = []
|
2026-01-27 12:34:22 +01:00
|
|
|
vo_variant = {
|
2026-01-24 13:26:30 +01:00
|
|
|
"variant": variant,
|
2026-01-29 14:15:31 +01:00
|
|
|
"id": vo.public_id,
|
2025-11-14 12:17:31 +01:00
|
|
|
"src": target,
|
2026-01-26 18:35:29 +01:00
|
|
|
#"duration": format_duration(source.duration, fps, True),
|
|
|
|
|
"duration": source.duration,
|
2025-11-14 12:17:31 +01:00
|
|
|
"subs": subs
|
2026-01-27 12:34:22 +01:00
|
|
|
}
|
2026-02-01 13:08:21 +01:00
|
|
|
ai = item.models.Item.objects.filter(
|
|
|
|
|
data__title=vo.data['title'],
|
|
|
|
|
data__type__contains='ai:audio-to-video'
|
|
|
|
|
).first()
|
|
|
|
|
if ai:
|
|
|
|
|
ai_source = ai.files.filter(selected=True)[0]
|
|
|
|
|
ai_src = ai_source.data.path
|
2026-02-02 10:03:09 +01:00
|
|
|
ai_target = os.path.join(prefix, 'voice_video', fragment, '%s-%s-%s.%s' % (type, variant, ai.public_id, 'mp4'))
|
2026-02-01 13:08:21 +01:00
|
|
|
os.makedirs(os.path.dirname(ai_target), exist_ok=True)
|
|
|
|
|
if os.path.islink(ai_target):
|
|
|
|
|
os.unlink(ai_target)
|
|
|
|
|
os.symlink(ai_src, ai_target)
|
|
|
|
|
vo_variant['ai'] = ai_target
|
2026-01-27 12:34:22 +01:00
|
|
|
done = False
|
|
|
|
|
if type == 'quote':
|
|
|
|
|
if '-a-t' in variant:
|
|
|
|
|
b_variant = variant.replace('-a-t', '-b-t').split('-t')[0]
|
|
|
|
|
for old in voice_over[fragment][type]:
|
|
|
|
|
if isinstance(old, list) and old[0]['variant'].startswith(b_variant):
|
|
|
|
|
old.insert(0, vo_variant)
|
|
|
|
|
done = True
|
|
|
|
|
elif '-b-t' in variant:
|
|
|
|
|
a_variant = variant.replace('-b-t', '-a-t').split('-t')[0]
|
|
|
|
|
for old in voice_over[fragment][type]:
|
|
|
|
|
if isinstance(old, list) and old[0]['variant'].startswith(a_variant):
|
|
|
|
|
old.append(vo_variant)
|
|
|
|
|
done = True
|
|
|
|
|
if not done and '-a-t' in variant or '-b-t' in variant:
|
|
|
|
|
vo_variant = [vo_variant]
|
|
|
|
|
if not done:
|
|
|
|
|
voice_over[fragment][type].append(vo_variant)
|
2025-11-14 12:17:31 +01:00
|
|
|
with open(os.path.join(prefix, 'voice_over.json'), 'w') as fd:
|
|
|
|
|
json.dump(voice_over, fd, indent=2, ensure_ascii=False)
|
2026-01-29 14:15:31 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def render_stats(offset):
|
|
|
|
|
stats = {
|
|
|
|
|
"source": [],
|
|
|
|
|
"ai": [],
|
|
|
|
|
}
|
|
|
|
|
base_prefix = Path(default_prefix) / 'render' / str(offset)
|
|
|
|
|
for folder in os.listdir(base_prefix):
|
|
|
|
|
folder = base_prefix / folder
|
|
|
|
|
scene_json = folder / "scene.json"
|
|
|
|
|
if not os.path.exists(scene_json):
|
|
|
|
|
continue
|
|
|
|
|
with open(scene_json) as fd:
|
|
|
|
|
scene = json.load(fd)
|
|
|
|
|
for timeline, tdata in scene.items():
|
|
|
|
|
if isinstance(tdata, list):
|
|
|
|
|
continue
|
|
|
|
|
for track, clips in tdata.items():
|
|
|
|
|
for clip in clips:
|
|
|
|
|
if 'src' in clip:
|
|
|
|
|
if 'id' not in clip:
|
|
|
|
|
print(clip)
|
|
|
|
|
continue
|
|
|
|
|
if 'ai:' in clip['src']:
|
|
|
|
|
stats['ai'].append(clip['id'])
|
|
|
|
|
else:
|
|
|
|
|
stats['source'].append(clip['id'])
|
|
|
|
|
return stats
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def update_unused():
|
|
|
|
|
import itemlist.models
|
|
|
|
|
import item.models
|
|
|
|
|
l = itemlist.models.List.objects.get(name='Unused Material')
|
|
|
|
|
used = []
|
|
|
|
|
for folder in os.listdir(Path(default_prefix) / 'render'):
|
|
|
|
|
if folder.isdigit():
|
|
|
|
|
x = render_stats(folder)
|
|
|
|
|
used += x['source']
|
|
|
|
|
used += x['ai']
|
2026-01-31 17:50:36 +01:00
|
|
|
for i in item.models.Item.objects.all().exclude(public_id__in=set(used)).filter(data__type__icontains='source'):
|
2026-01-29 14:15:31 +01:00
|
|
|
l.add(i)
|
|
|
|
|
for i in l.items.filter(public_id__in=set(used)):
|
|
|
|
|
l.remove(i)
|
2026-01-29 16:59:45 +01:00
|
|
|
|
|
|
|
|
def unused_tags():
|
|
|
|
|
import itemlist.models
|
|
|
|
|
import item.models
|
|
|
|
|
prefix = default_prefix
|
|
|
|
|
|
|
|
|
|
with open(os.path.join(prefix, "clips.json")) as fd:
|
|
|
|
|
clips = json.load(fd)
|
|
|
|
|
with open(os.path.join(prefix, "voice_over.json")) as fd:
|
|
|
|
|
voice_over = json.load(fd)
|
|
|
|
|
fragments = get_fragments(clips, voice_over, prefix)
|
|
|
|
|
tags = []
|
2026-01-30 08:38:05 +01:00
|
|
|
anti_tags = []
|
2026-01-29 16:59:45 +01:00
|
|
|
|
|
|
|
|
for fragment in fragments:
|
|
|
|
|
tags += fragment['tags']
|
2026-01-30 08:38:05 +01:00
|
|
|
anti_tags += fragment['anti-tags']
|
2026-01-29 16:59:45 +01:00
|
|
|
|
|
|
|
|
used_tags = set(tags)
|
2026-01-30 08:38:05 +01:00
|
|
|
used_anti_tags = set(anti_tags)
|
2026-02-01 13:08:21 +01:00
|
|
|
skip_tags = {'ai-failed', 'ai-fail', 'skip'}
|
|
|
|
|
all_tags = {t.value.strip().lower() for t in item.models.Facet.objects.filter(key='tags').distinct() if t.value.strip().lower() not in skip_tags}
|
2026-01-30 08:38:05 +01:00
|
|
|
unused_tags = all_tags - used_tags - used_anti_tags
|
2026-01-29 16:59:45 +01:00
|
|
|
unused_items = itemlist.models.List.objects.get(name='Unused Material').items.all()
|
2026-01-31 17:50:36 +01:00
|
|
|
|
|
|
|
|
unused = []
|
|
|
|
|
for tag in sorted(unused_tags):
|
2026-02-01 13:08:21 +01:00
|
|
|
total = item.models.Item.objects.filter(data__type__contains='source').filter(data__tags__icontains=tag).count()
|
2026-01-31 17:50:36 +01:00
|
|
|
count = unused_items.filter(data__tags__icontains=tag).count()
|
2026-02-01 13:08:21 +01:00
|
|
|
unused.append([count, tag, total])
|
2026-01-29 16:59:45 +01:00
|
|
|
with open("/srv/pandora/static/power/unused-tags.txt", "w") as fd:
|
2026-02-01 13:08:21 +01:00
|
|
|
for count, tag, total in reversed(sorted(unused)):
|
|
|
|
|
fd.write("%s (%d unused video clips of %s)\n" % (tag, count, total))
|
2026-01-30 18:45:26 +01:00
|
|
|
|
|
|
|
|
def fragment_statistics():
|
|
|
|
|
import itemlist.models
|
2026-02-01 22:12:31 +01:00
|
|
|
from item.models import Item
|
2026-01-30 18:45:26 +01:00
|
|
|
stats = {}
|
2026-02-01 22:12:31 +01:00
|
|
|
duration = {}
|
|
|
|
|
ai_duration = {}
|
|
|
|
|
prefix = default_prefix
|
|
|
|
|
|
|
|
|
|
with open(os.path.join(prefix, "clips.json")) as fd:
|
|
|
|
|
clips = json.load(fd)
|
2026-02-02 09:43:14 +01:00
|
|
|
skip = []
|
|
|
|
|
remove_from_edit = itemlist.models.List.objects.filter(name='Remove from Edit').first()
|
|
|
|
|
if remove_from_edit:
|
|
|
|
|
skip += [i.public_id for i in remove_from_edit.get_items(remove_from_edit.user).all()]
|
|
|
|
|
not_yet = itemlist.models.List.objects.filter(name='Not yet').first()
|
|
|
|
|
if not_yet:
|
|
|
|
|
skip += [i.public_id for i in not_yet.get_items(not_yet.user).all()]
|
2026-02-02 10:03:09 +01:00
|
|
|
if skip:
|
|
|
|
|
skip = list(set(skip))
|
2026-02-01 22:12:31 +01:00
|
|
|
|
2026-01-30 18:45:26 +01:00
|
|
|
for l in itemlist.models.List.objects.filter(status='featured').order_by('name'):
|
|
|
|
|
if l.name.split(' ')[0].isdigit():
|
|
|
|
|
fragment_id = l.name.split(' ')[0]
|
|
|
|
|
fragment = {
|
|
|
|
|
'id': fragment_id,
|
|
|
|
|
'name': l.name,
|
|
|
|
|
'tags': [],
|
|
|
|
|
'anti-tags': [],
|
|
|
|
|
'description': l.description
|
|
|
|
|
}
|
|
|
|
|
for con in l.query['conditions']:
|
|
|
|
|
if "conditions" in con:
|
|
|
|
|
for sub in con["conditions"]:
|
|
|
|
|
if sub['key'] == "tags" and sub['operator'] == '==':
|
|
|
|
|
fragment['tags'].append(sub['value'].lower().strip())
|
2026-01-31 17:50:36 +01:00
|
|
|
elif sub['key'] == "tags" and sub['operator'] == '!==':
|
|
|
|
|
fragment['anti-tags'].append(sub['value'].lower().strip())
|
2026-01-30 18:45:26 +01:00
|
|
|
elif sub['key'] == 'type' and sub['value'] in ('source', ''):
|
|
|
|
|
pass
|
|
|
|
|
else:
|
|
|
|
|
print(l.name, 'unknown sub condition', sub)
|
|
|
|
|
elif con.get('key') == "tags" and con['operator'] == '==':
|
|
|
|
|
fragment['tags'].append(con['value'].lower().strip())
|
2026-01-31 17:50:36 +01:00
|
|
|
elif con.get('key') == "tags" and con['operator'] == '!==':
|
2026-01-30 18:45:26 +01:00
|
|
|
fragment['anti-tags'].append(con['value'].lower().strip())
|
|
|
|
|
|
2026-02-01 22:12:31 +01:00
|
|
|
|
2026-01-30 18:45:26 +01:00
|
|
|
if fragment_id not in stats:
|
|
|
|
|
stats[fragment_id] = {}
|
|
|
|
|
for tag in fragment['tags']:
|
|
|
|
|
stats[fragment_id][tag] = 0
|
|
|
|
|
|
2026-02-01 22:12:31 +01:00
|
|
|
duration[fragment_id] = ai_duration[fragment_id] = 0
|
|
|
|
|
|
2026-01-30 18:45:26 +01:00
|
|
|
for item in l.get_items(l.user).all():
|
2026-02-02 09:43:14 +01:00
|
|
|
if item.public_id in skip:
|
|
|
|
|
continue
|
2026-01-30 18:45:26 +01:00
|
|
|
item_tags = [t.lower().strip() for t in item.get('tags')]
|
|
|
|
|
if set(item_tags) & set(fragment['anti-tags']):
|
|
|
|
|
continue
|
|
|
|
|
for tag in set(fragment['tags']):
|
|
|
|
|
if tag in item_tags:
|
|
|
|
|
stats[fragment_id][tag] += 1
|
2026-02-01 22:12:31 +01:00
|
|
|
duration[fragment_id] += item.sort.duration
|
2026-02-02 09:43:14 +01:00
|
|
|
for ai in Item.objects.filter(
|
|
|
|
|
data__title=item.data['title']
|
|
|
|
|
).filter(data__type__icontains='ai:').exclude(
|
|
|
|
|
public_id__in=skip
|
|
|
|
|
):
|
2026-02-01 22:12:31 +01:00
|
|
|
ai_duration[fragment_id] += ai.sort.duration
|
2026-01-30 18:45:26 +01:00
|
|
|
with open("/srv/pandora/static/power/fragments.txt", "w") as fd:
|
|
|
|
|
for fragment, data in stats.items():
|
2026-02-01 22:12:31 +01:00
|
|
|
fd.write("%s (%s source material, %s ai material)\n" % (
|
|
|
|
|
fragment,
|
|
|
|
|
ox.format_duration(1000*duration[fragment], 1, milliseconds=False),
|
|
|
|
|
ox.format_duration(1000*ai_duration[fragment], 1, milliseconds=False))
|
|
|
|
|
)
|
2026-01-30 18:45:26 +01:00
|
|
|
for tag in sorted(data):
|
|
|
|
|
fd.write(" %s: %s\n" % (tag, data[tag]))
|
|
|
|
|
return stats
|