2023-10-08 11:19:05 +00:00
|
|
|
#!/usr/bin/python3
|
2023-11-12 17:30:06 +00:00
|
|
|
from collections import defaultdict
|
2024-01-22 14:06:40 +00:00
|
|
|
from glob import glob
|
2023-10-08 11:19:05 +00:00
|
|
|
import json
|
|
|
|
import os
|
2023-11-16 08:08:03 +00:00
|
|
|
import re
|
|
|
|
import shutil
|
2023-10-08 11:19:05 +00:00
|
|
|
import subprocess
|
|
|
|
import sys
|
|
|
|
import time
|
2023-10-16 22:26:09 +00:00
|
|
|
from pathlib import Path
|
2023-10-08 11:19:05 +00:00
|
|
|
|
|
|
|
import ox
|
2023-10-09 13:10:34 +00:00
|
|
|
from .pi import random
|
2023-10-16 22:26:09 +00:00
|
|
|
from .render_kdenlive import KDEnliveProject, _CACHE
|
2023-10-08 11:19:05 +00:00
|
|
|
|
|
|
|
|
2023-11-12 17:30:06 +00:00
|
|
|
def random_int(seq, length):
|
|
|
|
n = n_ = length - 1
|
2023-10-08 11:19:05 +00:00
|
|
|
#print('len', n)
|
|
|
|
if n == 0:
|
2023-11-12 17:30:06 +00:00
|
|
|
return n
|
|
|
|
r = seq() / 9 * 10
|
2023-10-08 11:19:05 +00:00
|
|
|
base = 10
|
|
|
|
while n > 10:
|
|
|
|
n /= 10
|
2023-11-12 17:30:06 +00:00
|
|
|
r += seq() / 9 * 10
|
2023-10-08 11:19:05 +00:00
|
|
|
base += 10
|
2023-11-12 17:30:06 +00:00
|
|
|
r = int(round(n_ * r / base))
|
|
|
|
return r
|
|
|
|
|
|
|
|
def random_choice(seq, items, pop=False):
|
|
|
|
n = random_int(seq, len(items))
|
2023-10-08 11:19:05 +00:00
|
|
|
if pop:
|
2023-11-12 17:30:06 +00:00
|
|
|
return items.pop(n)
|
|
|
|
return items[n]
|
2023-10-08 11:19:05 +00:00
|
|
|
|
|
|
|
def chance(seq, chance):
|
2023-11-12 21:47:57 +00:00
|
|
|
return (seq() / 10) < chance
|
2023-10-08 11:19:05 +00:00
|
|
|
|
2023-10-28 09:58:07 +00:00
|
|
|
def get_clip_by_seqid(clips, seqid):
|
|
|
|
selected = None
|
|
|
|
for i, clip in enumerate(clips):
|
|
|
|
if clip['seqid'] == seqid:
|
|
|
|
selected = i
|
|
|
|
break
|
|
|
|
if selected is not None:
|
|
|
|
return clips.pop(i)
|
|
|
|
return None
|
|
|
|
|
2023-11-16 08:08:03 +00:00
|
|
|
|
|
|
|
def write_if_new(path, data, mode=''):
|
|
|
|
read_mode = 'r' + mode
|
|
|
|
write_mode = 'w' + mode
|
|
|
|
if os.path.exists(path):
|
|
|
|
with open(path, read_mode) as fd:
|
|
|
|
old = fd.read()
|
|
|
|
else:
|
|
|
|
old = ""
|
|
|
|
is_new = data != old
|
|
|
|
if path.endswith(".kdenlive"):
|
|
|
|
is_new = re.sub('\{.{36}\}', '', data) != re.sub('\{.{36}\}', '', old)
|
|
|
|
if is_new:
|
|
|
|
with open(path, write_mode) as fd:
|
|
|
|
fd.write(data)
|
|
|
|
|
|
|
|
|
2023-10-16 22:26:09 +00:00
|
|
|
def compose(clips, target=150, base=1024, voice_over=None):
|
2024-12-03 19:35:37 +00:00
|
|
|
fps = 24
|
2023-10-08 11:19:05 +00:00
|
|
|
length = 0
|
|
|
|
scene = {
|
|
|
|
'front': {
|
|
|
|
'V1': [],
|
|
|
|
'V2': [],
|
|
|
|
},
|
|
|
|
'back': {
|
|
|
|
'V1': [],
|
|
|
|
'V2': [],
|
2023-11-09 00:11:51 +00:00
|
|
|
},
|
|
|
|
'audio-back': {
|
2023-11-03 21:15:20 +00:00
|
|
|
'A1': [],
|
|
|
|
},
|
|
|
|
'audio-center': {
|
|
|
|
'A1': [],
|
|
|
|
},
|
|
|
|
'audio-front': {
|
|
|
|
'A1': [],
|
|
|
|
'A2': [],
|
|
|
|
'A3': [],
|
|
|
|
'A4': [],
|
2023-10-08 11:19:05 +00:00
|
|
|
},
|
2023-11-03 21:15:20 +00:00
|
|
|
'audio-rear': {
|
2023-10-08 11:19:05 +00:00
|
|
|
'A1': [],
|
|
|
|
'A2': [],
|
|
|
|
'A3': [],
|
|
|
|
'A4': [],
|
2023-11-03 21:15:20 +00:00
|
|
|
},
|
2023-10-08 11:19:05 +00:00
|
|
|
}
|
2023-10-09 19:29:11 +00:00
|
|
|
all_clips = clips.copy()
|
2023-11-12 17:30:06 +00:00
|
|
|
seq = random(10000 + base * 1000)
|
2023-11-01 08:23:00 +00:00
|
|
|
used = []
|
2023-10-16 22:26:09 +00:00
|
|
|
|
|
|
|
voice_overs = []
|
2024-12-03 19:35:37 +00:00
|
|
|
sub_offset = 0
|
2023-10-16 22:26:09 +00:00
|
|
|
if voice_over:
|
2023-11-12 17:30:06 +00:00
|
|
|
vo_keys = list(sorted(voice_over))
|
2023-10-16 22:26:09 +00:00
|
|
|
if chance(seq, 0.5):
|
2023-11-12 17:30:06 +00:00
|
|
|
vo_key = vo_keys[random_int(seq, len(vo_keys))]
|
|
|
|
voice_overs.append(voice_over[vo_key])
|
2023-10-16 22:26:09 +00:00
|
|
|
elif len(vo_keys) >= 2:
|
2023-11-12 17:30:06 +00:00
|
|
|
vo1 = vo_keys.pop(random_int(seq, len(vo_keys)))
|
|
|
|
vo2 = vo_keys.pop(random_int(seq, len(vo_keys)))
|
2023-10-16 22:26:09 +00:00
|
|
|
voice_overs.append(voice_over[vo1])
|
|
|
|
if voice_over[vo1]["duration"] + voice_over[vo2]["duration"] < target:
|
2023-10-30 19:25:46 +00:00
|
|
|
print("adding second vo")
|
2023-10-16 22:26:09 +00:00
|
|
|
voice_overs.append(voice_over[vo2])
|
2023-11-12 17:30:06 +00:00
|
|
|
print("vo:", [x['src'] for x in voice_overs], list(sorted(voice_over)))
|
2023-10-16 22:26:09 +00:00
|
|
|
vo_min = sum([vo['duration'] for vo in voice_overs])
|
2023-11-14 16:48:55 +00:00
|
|
|
sub_offset = 0
|
2023-10-16 22:26:09 +00:00
|
|
|
if vo_min > target:
|
|
|
|
target = vo_min
|
2023-10-28 18:26:33 +00:00
|
|
|
elif vo_min < target:
|
2024-12-03 19:35:37 +00:00
|
|
|
offset = int(((target - vo_min) / 2) * fps) / fps
|
2023-11-03 21:15:20 +00:00
|
|
|
scene['audio-center']['A1'].append({
|
|
|
|
'blank': True,
|
|
|
|
'duration': offset
|
|
|
|
})
|
|
|
|
scene['audio-rear']['A1'].append({
|
2023-10-16 22:26:09 +00:00
|
|
|
'blank': True,
|
|
|
|
'duration': offset
|
|
|
|
})
|
2023-10-28 18:26:33 +00:00
|
|
|
vo_min += offset
|
2023-11-14 16:48:55 +00:00
|
|
|
sub_offset = offset
|
|
|
|
subs = []
|
2023-10-16 22:26:09 +00:00
|
|
|
for vo in voice_overs:
|
2023-11-07 23:10:28 +00:00
|
|
|
voc = vo.copy()
|
2024-03-22 13:25:13 +00:00
|
|
|
a, b = '-11', '-3'
|
2023-11-12 17:30:06 +00:00
|
|
|
if 'Whispered' in voc['src']:
|
2024-03-22 13:25:13 +00:00
|
|
|
a, b = '-8', '0'
|
2023-11-14 10:12:18 +00:00
|
|
|
elif 'Read' in voc['src']:
|
2024-03-22 13:25:13 +00:00
|
|
|
a, b = '-7.75', '0.25'
|
2023-11-14 10:12:18 +00:00
|
|
|
elif 'Free' in voc['src']:
|
2024-03-22 13:25:13 +00:00
|
|
|
a, b = '-8.8', '-0.8'
|
2023-11-14 10:12:18 +00:00
|
|
|
elif 'Ashley' in voc['src']:
|
2024-03-22 13:25:13 +00:00
|
|
|
a, b = '-9.5', '-1.50'
|
2023-12-08 10:23:03 +00:00
|
|
|
elif 'Melody' in voc['src']:
|
2024-04-14 09:36:13 +00:00
|
|
|
a, b = '-5.25', '-0.25'
|
2023-11-12 17:30:06 +00:00
|
|
|
voc['filter'] = {'volume': a}
|
2023-11-07 23:10:28 +00:00
|
|
|
scene['audio-center']['A1'].append(voc)
|
2023-11-03 21:15:20 +00:00
|
|
|
vo_low = vo.copy()
|
2023-11-12 17:30:06 +00:00
|
|
|
vo_low['filter'] = {'volume': b}
|
2023-11-03 21:15:20 +00:00
|
|
|
scene['audio-rear']['A1'].append(vo_low)
|
2023-11-14 16:48:55 +00:00
|
|
|
for sub in voc.get("subs", []):
|
|
|
|
sub = sub.copy()
|
|
|
|
sub["in"] += sub_offset
|
|
|
|
sub["out"] += sub_offset
|
|
|
|
subs.append(sub)
|
|
|
|
sub_offset += voc["duration"]
|
|
|
|
if subs:
|
|
|
|
scene["subtitles"] = subs
|
2023-10-16 22:26:09 +00:00
|
|
|
|
2023-10-28 09:58:07 +00:00
|
|
|
clip = None
|
2023-10-10 15:16:59 +00:00
|
|
|
while target - length > 0 and clips:
|
2023-10-28 09:58:07 +00:00
|
|
|
# coin flip which site is visible (50% chance)
|
2023-10-29 15:03:40 +00:00
|
|
|
if length:
|
|
|
|
remaining = target - length
|
|
|
|
remaining = remaining * 1.05 # allow for max of 10% over time
|
2023-11-01 08:23:00 +00:00
|
|
|
clips_ = [c for c in clips if c['duration'] <= remaining]
|
|
|
|
if clips_:
|
|
|
|
clips = clips_
|
2023-10-28 09:58:07 +00:00
|
|
|
if clip:
|
|
|
|
if chance(seq, 0.5):
|
|
|
|
next_seqid = clip['seqid'] + 1
|
|
|
|
clip = get_clip_by_seqid(clips, next_seqid)
|
|
|
|
else:
|
|
|
|
clip = None
|
|
|
|
if not clip:
|
|
|
|
clip = random_choice(seq, clips, True)
|
2023-10-09 19:29:11 +00:00
|
|
|
if not clips:
|
2023-11-01 08:23:00 +00:00
|
|
|
print("not enough clips, need to reset")
|
2023-11-18 20:28:22 +00:00
|
|
|
clips = [c for c in all_clips if c != clip and c not in used]
|
|
|
|
if not clips:
|
|
|
|
print("not enough clips, also consider used")
|
|
|
|
clips = [c for c in all_clips if c != clip]
|
2023-10-10 15:16:59 +00:00
|
|
|
if not clips:
|
2023-11-18 20:28:22 +00:00
|
|
|
print("not enough clips, also consider last clip")
|
2023-10-10 15:16:59 +00:00
|
|
|
clips = all_clips.copy()
|
2023-10-28 18:26:33 +00:00
|
|
|
if length + clip['duration'] > target and length >= vo_min:
|
2023-10-08 11:19:05 +00:00
|
|
|
break
|
2023-10-29 15:03:40 +00:00
|
|
|
print('%06.3f %06.3f' % (length, clip['duration']), os.path.basename(clip['original']))
|
2023-10-08 11:19:05 +00:00
|
|
|
length += clip['duration']
|
|
|
|
|
2023-10-28 09:33:29 +00:00
|
|
|
if "foreground" not in clip and "animation" in clip:
|
|
|
|
fg = clip['animation']
|
2023-10-28 13:03:35 +00:00
|
|
|
transparancy = 1
|
2023-10-19 12:10:23 +00:00
|
|
|
else:
|
2023-10-28 09:33:29 +00:00
|
|
|
fg = clip['foreground']
|
2023-11-20 09:03:41 +00:00
|
|
|
if 'animation' in clip and chance(seq, 0.15):
|
|
|
|
fg = clip['animation']
|
|
|
|
transparancy = 1
|
|
|
|
else:
|
|
|
|
if 'foreground2' in clip:
|
|
|
|
if 'foreground3' in clip:
|
|
|
|
n = seq()
|
|
|
|
if n <= 3: # 0,1,2,3
|
2023-11-20 09:04:10 +00:00
|
|
|
clip['foreground']
|
2023-11-20 09:03:41 +00:00
|
|
|
elif n <= 6: # 4,5,6
|
|
|
|
clip['foreground2']
|
|
|
|
else: # 7,8,9
|
|
|
|
clip['foreground3']
|
|
|
|
elif chance(seq, 0.5):
|
|
|
|
fg = clip['foreground2']
|
|
|
|
transparancy = seq() / 9
|
|
|
|
transparancy = 1
|
2023-10-28 13:03:35 +00:00
|
|
|
if 'foley' in clip:
|
|
|
|
foley = clip['foley']
|
|
|
|
else:
|
|
|
|
foley = fg
|
|
|
|
scene['front']['V2'].append({
|
2023-10-08 11:19:05 +00:00
|
|
|
'duration': clip['duration'],
|
2023-10-10 09:19:47 +00:00
|
|
|
'src': fg,
|
2023-10-08 11:19:05 +00:00
|
|
|
"filter": {
|
2023-10-28 13:03:35 +00:00
|
|
|
'transparency': transparancy,
|
2023-10-08 11:19:05 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2023-11-05 09:18:54 +00:00
|
|
|
transparency = seq() / 9
|
2023-11-07 23:04:37 +00:00
|
|
|
# 50% of time no transparancy of foregroudnd layer
|
|
|
|
# 50% some transparancy, 25%, 50%, 75% levels of transparancy
|
2023-11-05 09:18:54 +00:00
|
|
|
transparancy = 1
|
2023-10-08 11:19:05 +00:00
|
|
|
# coin flip which site is visible (50% chance)
|
2023-11-18 20:28:22 +00:00
|
|
|
#if chance(seq, 0.5):
|
|
|
|
if chance(seq, 0.8):
|
2023-10-08 11:19:05 +00:00
|
|
|
transparency_front = transparency
|
|
|
|
transparency_back = 0
|
|
|
|
else:
|
2023-11-19 08:21:12 +00:00
|
|
|
transparency_back = random_choice(seq, [0.25, 0.5, 0.75, 1])
|
2023-10-08 11:19:05 +00:00
|
|
|
transparency_front = 0
|
2023-11-05 09:18:54 +00:00
|
|
|
transparency_original = seq() / 9
|
2023-10-31 08:01:04 +00:00
|
|
|
transparency_original = 1
|
2023-10-28 09:33:29 +00:00
|
|
|
if "background" in clip:
|
2023-10-28 13:03:35 +00:00
|
|
|
scene['front']['V1'].append({
|
2023-10-28 09:33:29 +00:00
|
|
|
'duration': clip['duration'],
|
|
|
|
'src': clip['background'],
|
|
|
|
"filter": {
|
|
|
|
'transparency': transparency_front
|
|
|
|
}
|
|
|
|
})
|
2023-10-28 13:03:35 +00:00
|
|
|
scene['back']['V2'].append({
|
2023-10-28 09:33:29 +00:00
|
|
|
'duration': clip['duration'],
|
|
|
|
'src': clip['background'],
|
|
|
|
"filter": {
|
|
|
|
'transparency': transparency_back
|
|
|
|
}
|
|
|
|
})
|
|
|
|
else:
|
2023-10-28 13:03:35 +00:00
|
|
|
scene['front']['V1'].append({
|
2023-10-28 09:33:29 +00:00
|
|
|
'duration': clip['duration'],
|
2023-10-28 13:03:35 +00:00
|
|
|
'src': clip['animation'],
|
|
|
|
"filter": {
|
|
|
|
'transparency': 0,
|
|
|
|
}
|
2023-10-28 09:33:29 +00:00
|
|
|
})
|
2023-10-28 13:03:35 +00:00
|
|
|
scene['back']['V2'].append({
|
2023-10-28 09:33:29 +00:00
|
|
|
'duration': clip['duration'],
|
2023-10-28 13:03:35 +00:00
|
|
|
'src': clip['original'],
|
|
|
|
"filter": {
|
|
|
|
'transparency': 0,
|
|
|
|
}
|
2023-10-28 09:33:29 +00:00
|
|
|
})
|
|
|
|
|
2023-10-28 13:03:35 +00:00
|
|
|
scene['back']['V1'].append({
|
2023-10-08 11:19:05 +00:00
|
|
|
'duration': clip['duration'],
|
|
|
|
'src': clip['original'],
|
|
|
|
"filter": {
|
2023-10-28 13:03:35 +00:00
|
|
|
'transparency': transparency_original,
|
2023-10-08 11:19:05 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
# 50 % chance to blur original from 0 to 30
|
|
|
|
if chance(seq, 0.5):
|
|
|
|
blur = seq() * 3
|
2023-10-09 13:10:34 +00:00
|
|
|
if blur:
|
2023-10-28 13:03:35 +00:00
|
|
|
scene['back']['V1'][-1]['filter']['blur'] = blur
|
2023-11-09 00:11:51 +00:00
|
|
|
scene['audio-back']['A1'].append({
|
2023-10-08 11:19:05 +00:00
|
|
|
'duration': clip['duration'],
|
|
|
|
'src': clip['original'],
|
2024-03-22 13:25:13 +00:00
|
|
|
'filter': {'volume': '-8.2'},
|
2023-10-08 11:19:05 +00:00
|
|
|
})
|
2023-10-19 12:10:23 +00:00
|
|
|
# TBD: Foley
|
2024-03-22 13:25:13 +00:00
|
|
|
cf_volume = '-2.5'
|
2023-11-03 21:15:20 +00:00
|
|
|
scene['audio-front']['A2'].append({
|
|
|
|
'duration': clip['duration'],
|
|
|
|
'src': foley,
|
2024-01-12 10:50:17 +00:00
|
|
|
'filter': {'volume': cf_volume},
|
2023-11-03 21:15:20 +00:00
|
|
|
})
|
|
|
|
scene['audio-rear']['A2'].append({
|
2023-10-08 11:19:05 +00:00
|
|
|
'duration': clip['duration'],
|
2023-10-19 12:10:23 +00:00
|
|
|
'src': foley,
|
2024-01-12 10:50:17 +00:00
|
|
|
'filter': {'volume': cf_volume},
|
2023-10-08 11:19:05 +00:00
|
|
|
})
|
2023-11-01 08:23:00 +00:00
|
|
|
used.append(clip)
|
2023-10-30 19:25:46 +00:00
|
|
|
print("scene duration %0.3f (target: %0.3f, vo_min: %0.3f)" % (length, target, vo_min))
|
2024-12-03 19:35:37 +00:00
|
|
|
if sub_offset < length:
|
|
|
|
delta = length - sub_offset
|
|
|
|
scene['audio-center']['A1'].append({
|
|
|
|
'blank': True,
|
|
|
|
'duration': delta
|
|
|
|
})
|
|
|
|
scene['audio-rear']['A1'].append({
|
|
|
|
'blank': True,
|
|
|
|
'duration': delta
|
|
|
|
})
|
2023-11-01 08:23:00 +00:00
|
|
|
return scene, used
|
2023-10-08 11:19:05 +00:00
|
|
|
|
2023-10-10 15:16:59 +00:00
|
|
|
def get_scene_duration(scene):
|
|
|
|
duration = 0
|
|
|
|
for key, value in scene.items():
|
|
|
|
for name, clips in value.items():
|
|
|
|
for clip in clips:
|
|
|
|
duration += clip['duration']
|
|
|
|
return duration
|
2023-10-08 11:19:05 +00:00
|
|
|
|
2024-03-19 10:48:36 +00:00
|
|
|
def get_offset_duration(prefix):
|
|
|
|
duration = 0
|
|
|
|
for root, folders, files in os.walk(prefix):
|
|
|
|
for f in files:
|
|
|
|
if f == 'scene.json':
|
|
|
|
path = os.path.join(root, f)
|
|
|
|
scene = json.load(open(path))
|
|
|
|
duration += get_scene_duration(scene)
|
|
|
|
return duration
|
|
|
|
|
2023-10-09 13:10:34 +00:00
|
|
|
def render(root, scene, prefix=''):
|
2023-10-08 11:19:05 +00:00
|
|
|
fps = 24
|
2023-10-09 19:29:11 +00:00
|
|
|
files = []
|
2024-12-03 19:35:37 +00:00
|
|
|
scene_duration = int(get_scene_duration(scene) * fps)
|
2023-10-08 11:19:05 +00:00
|
|
|
for timeline, data in scene.items():
|
2023-11-14 16:48:55 +00:00
|
|
|
if timeline == "subtitles":
|
2023-11-16 08:08:03 +00:00
|
|
|
path = os.path.join(root, prefix + "front.srt")
|
2023-12-08 12:13:00 +00:00
|
|
|
data = fix_overlaps(data)
|
2023-11-16 08:08:03 +00:00
|
|
|
srt = ox.srt.encode(data)
|
|
|
|
write_if_new(path, srt, 'b')
|
2023-11-14 16:48:55 +00:00
|
|
|
continue
|
2023-10-10 15:16:59 +00:00
|
|
|
#print(timeline)
|
2023-10-08 11:19:05 +00:00
|
|
|
project = KDEnliveProject(root)
|
|
|
|
|
|
|
|
tracks = []
|
2023-11-08 23:33:06 +00:00
|
|
|
track_durations = {}
|
2023-10-08 11:19:05 +00:00
|
|
|
for track, clips in data.items():
|
2023-10-10 15:16:59 +00:00
|
|
|
#print(track)
|
2023-10-08 11:19:05 +00:00
|
|
|
for clip in clips:
|
|
|
|
project.append_clip(track, clip)
|
2024-12-03 19:35:37 +00:00
|
|
|
track_durations[track] = int(sum([c['duration'] for c in clips]) * fps)
|
2023-11-08 23:33:06 +00:00
|
|
|
if timeline.startswith('audio-'):
|
|
|
|
track_duration = project.get_duration()
|
|
|
|
delta = scene_duration - track_duration
|
|
|
|
if delta > 0:
|
|
|
|
for track in track_durations:
|
|
|
|
if track_durations[track] == track_duration:
|
2024-12-03 19:35:37 +00:00
|
|
|
project.append_clip(track, {'blank': True, "duration": delta/fps})
|
2023-11-08 23:33:06 +00:00
|
|
|
break
|
2023-10-09 19:29:11 +00:00
|
|
|
path = os.path.join(root, prefix + "%s.kdenlive" % timeline)
|
2023-11-16 08:08:03 +00:00
|
|
|
project_xml = project.to_xml()
|
|
|
|
write_if_new(path, project_xml)
|
2023-10-09 19:29:11 +00:00
|
|
|
files.append(path)
|
|
|
|
return files
|
2023-10-10 15:16:59 +00:00
|
|
|
|
2024-03-22 09:56:50 +00:00
|
|
|
|
2023-11-08 10:01:20 +00:00
|
|
|
def get_fragments(clips, voice_over, prefix):
|
2023-10-10 15:16:59 +00:00
|
|
|
import itemlist.models
|
2023-10-16 22:26:09 +00:00
|
|
|
import item.models
|
|
|
|
|
2023-10-10 15:16:59 +00:00
|
|
|
fragments = []
|
2023-10-16 22:26:09 +00:00
|
|
|
|
2023-10-10 15:16:59 +00:00
|
|
|
for l in itemlist.models.List.objects.filter(status='featured').order_by('name'):
|
|
|
|
if l.name.split(' ')[0].isdigit():
|
|
|
|
fragment = {
|
|
|
|
'name': l.name,
|
2023-11-08 10:01:20 +00:00
|
|
|
'tags': [],
|
|
|
|
'anti-tags': [],
|
2023-10-10 15:16:59 +00:00
|
|
|
'description': l.description
|
|
|
|
}
|
2023-11-08 10:01:20 +00:00
|
|
|
for con in l.query['conditions']:
|
|
|
|
if "conditions" in con:
|
|
|
|
for sub in con["conditions"]:
|
|
|
|
if sub['key'] == "tags" and sub['operator'] == '==':
|
|
|
|
fragment['tags'].append(sub['value'])
|
|
|
|
elif sub['key'] == "tags" and sub['operator'] == '!=':
|
|
|
|
fragment['tags'].append(sub['value'])
|
|
|
|
else:
|
2023-11-18 20:31:45 +00:00
|
|
|
print(l.name, 'unknown sub condition', sub)
|
2023-11-08 10:01:20 +00:00
|
|
|
elif con.get('key') == "tags" and con['operator'] == '==':
|
|
|
|
fragment['tags'].append(con['value'])
|
|
|
|
elif con.get('key') == "tags" and con['operator'] == '!=':
|
|
|
|
fragment['anti-tags'].append(con['value'])
|
|
|
|
|
2023-10-16 22:26:09 +00:00
|
|
|
fragment["id"] = int(fragment['name'].split(' ')[0])
|
2023-11-08 10:01:20 +00:00
|
|
|
originals = []
|
|
|
|
for i in l.get_items(l.user):
|
|
|
|
orig = i.files.filter(selected=True).first()
|
|
|
|
if orig:
|
|
|
|
ext = os.path.splitext(orig.data.path)[1]
|
|
|
|
type_ = i.data['type'][0].lower()
|
|
|
|
target = os.path.join(prefix, type_, i.data['title'] + ext)
|
|
|
|
originals.append(target)
|
2023-10-10 15:16:59 +00:00
|
|
|
fragment['clips'] = []
|
|
|
|
for clip in clips:
|
2023-11-08 10:01:20 +00:00
|
|
|
#if set(clip['tags']) & set(fragment['tags']) and not set(clip['tags']) & set(fragment['anti-tags']):
|
|
|
|
if clip['original'] in originals:
|
2023-10-10 15:16:59 +00:00
|
|
|
fragment['clips'].append(clip)
|
2023-10-16 22:26:09 +00:00
|
|
|
fragment["voice_over"] = voice_over.get(str(fragment["id"]), {})
|
2023-10-10 15:16:59 +00:00
|
|
|
fragments.append(fragment)
|
|
|
|
fragments.sort(key=lambda f: ox.sort_string(f['name']))
|
|
|
|
return fragments
|
|
|
|
|
|
|
|
|
|
|
|
def render_all(options):
|
|
|
|
prefix = options['prefix']
|
|
|
|
duration = int(options['duration'])
|
|
|
|
base = int(options['offset'])
|
|
|
|
|
2023-10-16 22:26:09 +00:00
|
|
|
_cache = os.path.join(prefix, "cache.json")
|
|
|
|
if os.path.exists(_cache):
|
|
|
|
with open(_cache) as fd:
|
|
|
|
_CACHE.update(json.load(fd))
|
|
|
|
|
2023-10-10 15:16:59 +00:00
|
|
|
with open(os.path.join(prefix, "clips.json")) as fd:
|
|
|
|
clips = json.load(fd)
|
2023-10-16 22:26:09 +00:00
|
|
|
with open(os.path.join(prefix, "voice_over.json")) as fd:
|
|
|
|
voice_over = json.load(fd)
|
2023-11-08 10:01:20 +00:00
|
|
|
fragments = get_fragments(clips, voice_over, prefix)
|
2023-10-16 22:26:09 +00:00
|
|
|
with open(os.path.join(prefix, "fragments.json"), "w") as fd:
|
|
|
|
json.dump(fragments, fd, indent=2, ensure_ascii=False)
|
2023-10-10 15:16:59 +00:00
|
|
|
position = target_position = 0
|
|
|
|
target = fragment_target = duration / len(fragments)
|
|
|
|
base_prefix = os.path.join(prefix, 'render', str(base))
|
2023-11-01 08:23:00 +00:00
|
|
|
clips_used = []
|
2023-11-12 17:30:06 +00:00
|
|
|
|
|
|
|
stats = defaultdict(lambda: 0)
|
2023-11-20 23:08:48 +00:00
|
|
|
fragment_base = base
|
2023-10-10 15:16:59 +00:00
|
|
|
for fragment in fragments:
|
2023-11-20 23:08:48 +00:00
|
|
|
fragment_base += 1
|
2023-10-16 22:26:09 +00:00
|
|
|
fragment_id = int(fragment['name'].split(' ')[0])
|
2023-10-10 15:16:59 +00:00
|
|
|
name = fragment['name'].replace(' ', '_')
|
2023-10-16 22:26:09 +00:00
|
|
|
if fragment_id < 10:
|
2023-10-10 15:16:59 +00:00
|
|
|
name = '0' + name
|
|
|
|
if not fragment['clips']:
|
|
|
|
print("skipping empty fragment", name)
|
|
|
|
continue
|
|
|
|
fragment_prefix = os.path.join(base_prefix, name)
|
|
|
|
os.makedirs(fragment_prefix, exist_ok=True)
|
2023-11-01 08:23:00 +00:00
|
|
|
fragment_clips = fragment['clips']
|
|
|
|
unused_fragment_clips = [c for c in fragment_clips if c not in clips_used]
|
|
|
|
print('fragment clips', len(fragment_clips), 'unused', len(unused_fragment_clips))
|
2023-11-20 23:08:48 +00:00
|
|
|
scene, used = compose(unused_fragment_clips, target=target, base=fragment_base, voice_over=fragment['voice_over'])
|
2023-11-01 08:23:00 +00:00
|
|
|
clips_used += used
|
2023-10-10 15:16:59 +00:00
|
|
|
scene_duration = get_scene_duration(scene)
|
2023-10-29 18:51:26 +00:00
|
|
|
print("%s %6.3f -> %6.3f (%6.3f)" % (name, target, scene_duration, fragment_target))
|
2023-11-12 17:30:06 +00:00
|
|
|
src = [a for a in scene['audio-rear']['A1'] if 'src' in a][0]['src']
|
|
|
|
stats[src.split('/')[-2]] += 1
|
|
|
|
|
2023-10-10 15:16:59 +00:00
|
|
|
position += scene_duration
|
|
|
|
target_position += fragment_target
|
|
|
|
if position > target_position:
|
|
|
|
target = fragment_target - (position-target_position)
|
2023-10-29 18:51:26 +00:00
|
|
|
print("adjusting target duration for next fragment: %6.3f -> %6.3f" % (fragment_target, target))
|
2023-10-10 15:16:59 +00:00
|
|
|
elif position < target_position:
|
|
|
|
target = target + 0.1 * fragment_target
|
|
|
|
|
|
|
|
timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/')
|
|
|
|
|
2023-11-16 08:08:03 +00:00
|
|
|
scene_json = json.dumps(scene, indent=2, ensure_ascii=False)
|
|
|
|
write_if_new(os.path.join(fragment_prefix, 'scene.json'), scene_json)
|
2023-10-10 15:16:59 +00:00
|
|
|
|
|
|
|
if not options['no_video']:
|
|
|
|
for timeline in timelines:
|
2023-10-16 22:26:09 +00:00
|
|
|
print(timeline)
|
2023-10-10 15:16:59 +00:00
|
|
|
ext = '.mp4'
|
2023-10-16 22:26:09 +00:00
|
|
|
if '/audio' in timeline:
|
2023-10-10 15:16:59 +00:00
|
|
|
ext = '.wav'
|
|
|
|
cmd = [
|
|
|
|
'xvfb-run', '-a',
|
|
|
|
'melt', timeline,
|
2023-10-22 10:18:03 +00:00
|
|
|
'-quiet',
|
2023-10-16 22:26:09 +00:00
|
|
|
'-consumer', 'avformat:%s' % timeline.replace('.kdenlive', ext),
|
2023-10-10 15:16:59 +00:00
|
|
|
]
|
2023-10-22 10:18:03 +00:00
|
|
|
if ext == '.wav':
|
|
|
|
cmd += ['vn=1']
|
|
|
|
else:
|
2023-11-09 00:11:51 +00:00
|
|
|
#if not timeline.endswith("back.kdenlive"):
|
|
|
|
cmd += ['an=1']
|
2023-11-03 21:15:20 +00:00
|
|
|
cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
|
2023-10-10 15:16:59 +00:00
|
|
|
subprocess.call(cmd)
|
2023-10-16 22:26:09 +00:00
|
|
|
if ext == '.wav' and timeline.endswith('audio.kdenlive'):
|
2023-10-10 15:16:59 +00:00
|
|
|
cmd = [
|
2023-10-16 22:26:09 +00:00
|
|
|
'ffmpeg', '-y',
|
|
|
|
'-nostats', '-loglevel', 'error',
|
|
|
|
'-i',
|
2023-10-10 15:16:59 +00:00
|
|
|
timeline.replace('.kdenlive', ext),
|
|
|
|
timeline.replace('.kdenlive', '.mp4')
|
|
|
|
]
|
|
|
|
subprocess.call(cmd)
|
|
|
|
os.unlink(timeline.replace('.kdenlive', ext))
|
|
|
|
|
2023-10-16 22:26:09 +00:00
|
|
|
fragment_prefix = Path(fragment_prefix)
|
|
|
|
cmds = []
|
|
|
|
for src, out1, out2 in (
|
2023-11-03 21:15:20 +00:00
|
|
|
("audio-front.wav", "fl.wav", "fr.wav"),
|
|
|
|
("audio-center.wav", "fc.wav", "lfe.wav"),
|
|
|
|
("audio-rear.wav", "bl.wav", "br.wav"),
|
2023-10-16 22:26:09 +00:00
|
|
|
):
|
|
|
|
cmds.append([
|
2023-10-19 12:10:23 +00:00
|
|
|
"ffmpeg", "-y",
|
|
|
|
"-nostats", "-loglevel", "error",
|
|
|
|
"-i", fragment_prefix / src,
|
|
|
|
"-filter_complex",
|
2023-10-16 22:26:09 +00:00
|
|
|
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
|
|
|
|
"-map", "[left]", fragment_prefix / out1,
|
|
|
|
"-map", "[right]", fragment_prefix / out2,
|
|
|
|
])
|
2023-10-19 12:10:23 +00:00
|
|
|
cmds.append([
|
|
|
|
"ffmpeg", "-y",
|
|
|
|
"-nostats", "-loglevel", "error",
|
|
|
|
"-i", fragment_prefix / "fl.wav",
|
|
|
|
"-i", fragment_prefix / "fr.wav",
|
|
|
|
"-i", fragment_prefix / "fc.wav",
|
|
|
|
"-i", fragment_prefix / "lfe.wav",
|
|
|
|
"-i", fragment_prefix / "bl.wav",
|
|
|
|
"-i", fragment_prefix / "br.wav",
|
|
|
|
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
|
|
|
|
"-map", "[a]", "-c:a", "aac", fragment_prefix / "audio-5.1.mp4"
|
|
|
|
])
|
2023-10-28 09:24:51 +00:00
|
|
|
cmds.append([
|
|
|
|
"ffmpeg", "-y",
|
|
|
|
"-nostats", "-loglevel", "error",
|
2023-11-03 21:15:20 +00:00
|
|
|
"-i", fragment_prefix / "front.mp4",
|
2023-10-28 09:24:51 +00:00
|
|
|
"-i", fragment_prefix / "audio-5.1.mp4",
|
|
|
|
"-c", "copy",
|
2023-11-03 21:15:20 +00:00
|
|
|
fragment_prefix / "front-5.1.mp4",
|
2023-10-28 09:24:51 +00:00
|
|
|
])
|
2023-11-09 00:11:51 +00:00
|
|
|
cmds.append([
|
|
|
|
"ffmpeg", "-y",
|
|
|
|
"-nostats", "-loglevel", "error",
|
|
|
|
"-i", fragment_prefix / "back.mp4",
|
|
|
|
"-i", fragment_prefix / "audio-back.wav",
|
|
|
|
"-c:v", "copy",
|
|
|
|
fragment_prefix / "back-audio.mp4",
|
|
|
|
])
|
2023-10-16 22:26:09 +00:00
|
|
|
for cmd in cmds:
|
2023-10-19 12:10:23 +00:00
|
|
|
#print(" ".join([str(x) for x in cmd]))
|
2023-10-16 22:26:09 +00:00
|
|
|
subprocess.call(cmd)
|
2023-11-09 00:11:51 +00:00
|
|
|
|
|
|
|
for a, b in (
|
|
|
|
("back-audio.mp4", "back.mp4"),
|
|
|
|
("front-5.1.mp4", "back.mp4"),
|
|
|
|
):
|
|
|
|
duration_a = ox.avinfo(str(fragment_prefix / a))['duration']
|
|
|
|
duration_b = ox.avinfo(str(fragment_prefix / b))['duration']
|
|
|
|
if duration_a != duration_b:
|
|
|
|
print('!!', duration_a, fragment_prefix / a)
|
|
|
|
print('!!', duration_b, fragment_prefix / b)
|
|
|
|
sys.exit(-1)
|
|
|
|
shutil.move(fragment_prefix / "back-audio.mp4", fragment_prefix / "back.mp4")
|
2023-11-03 21:15:20 +00:00
|
|
|
shutil.move(fragment_prefix / "front-5.1.mp4", fragment_prefix / "front.mp4")
|
2023-10-19 12:10:23 +00:00
|
|
|
for fn in (
|
2023-11-09 00:11:51 +00:00
|
|
|
"audio-5.1.mp4",
|
2024-08-29 15:36:18 +00:00
|
|
|
"audio-center.wav", "audio-rear.wav",
|
2023-11-16 14:12:03 +00:00
|
|
|
"audio-front.wav", "audio-back.wav", "back-audio.mp4",
|
2023-11-09 00:11:51 +00:00
|
|
|
"fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
|
2023-10-19 12:10:23 +00:00
|
|
|
):
|
|
|
|
fn = fragment_prefix / fn
|
|
|
|
if os.path.exists(fn):
|
|
|
|
os.unlink(fn)
|
2023-11-08 22:55:08 +00:00
|
|
|
|
2023-10-16 22:26:09 +00:00
|
|
|
print("Duration - Target: %s Actual: %s" % (target_position, position))
|
2023-11-12 17:30:06 +00:00
|
|
|
print(json.dumps(dict(stats), sort_keys=True, indent=2))
|
2023-10-16 22:26:09 +00:00
|
|
|
with open(_cache, "w") as fd:
|
|
|
|
json.dump(_CACHE, fd)
|
2023-11-16 08:08:03 +00:00
|
|
|
|
|
|
|
|
2024-03-22 10:33:39 +00:00
|
|
|
def add_translations(sub, lang):
|
|
|
|
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
|
|
|
|
if sub.languages:
|
|
|
|
value = ox.strip_tags(value)
|
|
|
|
if lang:
|
|
|
|
for slang in lang:
|
|
|
|
if slang == "en":
|
|
|
|
slang = None
|
|
|
|
for tsub in sub.item.annotations.filter(layer="subtitles", start=sub.start, end=sub.end, languages=slang):
|
|
|
|
tvalue = tsub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
|
|
|
|
if tsub.languages:
|
|
|
|
tvalue = ox.strip_tags(tvalue)
|
|
|
|
value += '\n' + tvalue
|
|
|
|
return value
|
|
|
|
|
|
|
|
def get_srt(sub, offset=0, lang=None):
|
2023-11-16 08:12:53 +00:00
|
|
|
sdata = sub.json(keys=['in', 'out', 'value'])
|
2024-03-22 10:33:39 +00:00
|
|
|
sdata['value'] = sdata['value'].replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
|
|
|
|
if lang:
|
|
|
|
sdata['value'] = add_translations(sub, lang)
|
2023-11-16 08:12:53 +00:00
|
|
|
if offset:
|
|
|
|
sdata["in"] += offset
|
|
|
|
sdata["out"] += offset
|
|
|
|
return sdata
|
|
|
|
|
2023-12-08 12:13:00 +00:00
|
|
|
def fix_overlaps(data):
|
|
|
|
previous = None
|
|
|
|
for sub in data:
|
|
|
|
if previous is None:
|
|
|
|
previous = sub
|
|
|
|
else:
|
|
|
|
if sub['in'] < previous['out']:
|
|
|
|
previous['out'] = sub['in'] - 0.001
|
|
|
|
previous = sub
|
|
|
|
return data
|
|
|
|
|
2023-11-16 08:08:03 +00:00
|
|
|
def update_subtitles(options):
|
|
|
|
import item.models
|
|
|
|
|
|
|
|
prefix = Path(options['prefix'])
|
|
|
|
base = int(options['offset'])
|
2024-03-22 09:56:50 +00:00
|
|
|
lang = options["lang"]
|
2024-03-22 10:33:39 +00:00
|
|
|
if lang and "," in lang:
|
|
|
|
lang = lang.split(',')
|
2024-03-22 11:29:11 +00:00
|
|
|
if isinstance(lang, list):
|
|
|
|
tlang = lang[1:]
|
|
|
|
lang = lang[0]
|
|
|
|
else:
|
|
|
|
tlang = None
|
|
|
|
if lang == "en":
|
|
|
|
lang = None
|
2023-11-16 08:08:03 +00:00
|
|
|
|
|
|
|
_cache = os.path.join(prefix, "cache.json")
|
|
|
|
if os.path.exists(_cache):
|
|
|
|
with open(_cache) as fd:
|
|
|
|
_CACHE.update(json.load(fd))
|
|
|
|
|
|
|
|
base_prefix = prefix / 'render' / str(base)
|
|
|
|
for folder in os.listdir(base_prefix):
|
|
|
|
folder = base_prefix / folder
|
2024-04-02 10:37:17 +00:00
|
|
|
scene_json = folder / "scene.json"
|
|
|
|
if not os.path.exists(scene_json):
|
|
|
|
continue
|
|
|
|
with open(scene_json) as fd:
|
2023-11-16 08:08:03 +00:00
|
|
|
scene = json.load(fd)
|
|
|
|
offset = 0
|
|
|
|
subs = []
|
|
|
|
for clip in scene['audio-center']['A1']:
|
|
|
|
if not clip.get("blank"):
|
|
|
|
batch, fragment_id = clip['src'].replace('.wav', '').split('/')[-2:]
|
2023-11-24 13:06:35 +00:00
|
|
|
vo = item.models.Item.objects.filter(data__batch__icontains=batch, data__title__startswith=fragment_id + '_').first()
|
2023-11-16 08:08:03 +00:00
|
|
|
if vo:
|
2023-11-24 13:06:35 +00:00
|
|
|
#print("%s => %s %s" % (clip['src'], vo, vo.get('batch')))
|
2024-03-22 09:56:50 +00:00
|
|
|
for sub in vo.annotations.filter(layer="subtitles").filter(languages=lang).exclude(value="").order_by("start"):
|
2024-03-22 10:33:39 +00:00
|
|
|
sdata = get_srt(sub, offset, tlang)
|
2023-11-16 08:08:03 +00:00
|
|
|
subs.append(sdata)
|
2023-11-24 13:06:35 +00:00
|
|
|
else:
|
|
|
|
print("could not find vo for %s" % clip['src'])
|
2023-11-16 08:08:03 +00:00
|
|
|
offset += clip['duration']
|
|
|
|
path = folder / "front.srt"
|
2023-12-08 12:13:00 +00:00
|
|
|
data = fix_overlaps(subs)
|
2023-11-16 08:08:03 +00:00
|
|
|
srt = ox.srt.encode(subs)
|
|
|
|
write_if_new(str(path), srt, 'b')
|
|
|
|
|
2024-01-22 14:06:40 +00:00
|
|
|
|
|
|
|
def update_m3u(render_prefix, exclude=[]):
|
|
|
|
files = ox.sorted_strings(glob(render_prefix + "*/*/back.mp4"))
|
|
|
|
for ex in exclude:
|
|
|
|
files = [f for f in files if not f.startswith(ex + "/")]
|
|
|
|
back_m3u = "\n".join(files)
|
|
|
|
back_m3u = back_m3u.replace(render_prefix, "")
|
|
|
|
front_m3u = back_m3u.replace("back.mp4", "front.mp4")
|
|
|
|
|
|
|
|
back_m3u_f = render_prefix + "back.m3u"
|
|
|
|
front_m3u_f = render_prefix + "front.m3u"
|
|
|
|
|
|
|
|
with open(back_m3u_f + "_", "w") as fd:
|
|
|
|
fd.write(back_m3u)
|
|
|
|
with open(front_m3u_f + "_", "w") as fd:
|
|
|
|
fd.write(front_m3u)
|
|
|
|
shutil.move(front_m3u_f + "_", front_m3u_f)
|
|
|
|
cmd = ["scp", front_m3u_f, "front:" + front_m3u_f]
|
|
|
|
subprocess.check_call(cmd)
|
|
|
|
shutil.move(back_m3u_f + "_", back_m3u_f)
|
|
|
|
|
|
|
|
|
|
|
|
def render_infinity(options):
|
|
|
|
prefix = options['prefix']
|
|
|
|
duration = int(options['duration'])
|
|
|
|
|
|
|
|
state_f = os.path.join(prefix, "infinity.json")
|
|
|
|
if os.path.exists(state_f):
|
|
|
|
with open(state_f) as fd:
|
|
|
|
state = json.load(fd)
|
|
|
|
else:
|
|
|
|
state = {
|
|
|
|
"offset": 100,
|
2024-03-19 09:12:15 +00:00
|
|
|
"max-items": 30,
|
|
|
|
"no_video": False,
|
2024-01-22 14:06:40 +00:00
|
|
|
}
|
|
|
|
for key in ("prefix", "duration"):
|
|
|
|
state[key] = options[key]
|
|
|
|
|
|
|
|
while True:
|
|
|
|
render_prefix = state["prefix"] + "/render/"
|
|
|
|
current = [
|
|
|
|
f for f in os.listdir(render_prefix)
|
2024-03-19 09:12:15 +00:00
|
|
|
if f.isdigit() and os.path.isdir(render_prefix + f) and state["offset"] > int(f) >= 100
|
2024-01-22 14:06:40 +00:00
|
|
|
]
|
|
|
|
if len(current) > state["max-items"]:
|
2024-03-19 10:48:36 +00:00
|
|
|
current = ox.sorted_strings(current)
|
|
|
|
remove = current[:-state["max-items"]]
|
2024-01-22 14:06:40 +00:00
|
|
|
update_m3u(render_prefix, exclude=remove)
|
|
|
|
for folder in remove:
|
|
|
|
folder = render_prefix + folder
|
|
|
|
print("remove", folder)
|
2024-03-19 09:12:15 +00:00
|
|
|
shutil.rmtree(folder)
|
|
|
|
cmd = ["ssh", "front", "rm", "-rf", folder]
|
|
|
|
#print(cmd)
|
|
|
|
subprocess.check_call(cmd)
|
2024-01-22 14:06:40 +00:00
|
|
|
render_all(state)
|
|
|
|
path = "%s%s/" % (render_prefix, state["offset"])
|
|
|
|
cmd = ['rsync', '-a', path, "front:" + path]
|
|
|
|
subprocess.check_call(cmd)
|
|
|
|
update_m3u(render_prefix)
|
|
|
|
state["offset"] += 1
|
|
|
|
with open(state_f + "~", "w") as fd:
|
|
|
|
json.dump(state, fd, indent=2)
|
|
|
|
shutil.move(state_f + "~", state_f)
|