pandora_t_for_time/render.py

892 lines
32 KiB
Python
Raw Normal View History

2023-10-08 11:19:05 +00:00
#!/usr/bin/python3
2023-11-12 17:30:06 +00:00
from collections import defaultdict
2024-01-22 14:06:40 +00:00
from glob import glob
2023-10-08 11:19:05 +00:00
import json
import os
2023-11-16 08:08:03 +00:00
import re
import shutil
2023-10-08 11:19:05 +00:00
import subprocess
import sys
import time
from pathlib import Path
2023-10-08 11:19:05 +00:00
import ox
import lxml.etree
2023-10-09 13:10:34 +00:00
from .pi import random
from .render_kdenlive import KDEnliveProject, _CACHE, melt_xml, get_melt
2023-10-08 11:19:05 +00:00
2023-11-12 17:30:06 +00:00
def random_int(seq, length):
n = n_ = length - 1
2023-10-08 11:19:05 +00:00
#print('len', n)
if n == 0:
2023-11-12 17:30:06 +00:00
return n
r = seq() / 9 * 10
2023-10-08 11:19:05 +00:00
base = 10
while n > 10:
n /= 10
2023-11-12 17:30:06 +00:00
r += seq() / 9 * 10
2023-10-08 11:19:05 +00:00
base += 10
2023-11-12 17:30:06 +00:00
r = int(round(n_ * r / base))
return r
def random_choice(seq, items, pop=False):
n = random_int(seq, len(items))
2023-10-08 11:19:05 +00:00
if pop:
2023-11-12 17:30:06 +00:00
return items.pop(n)
return items[n]
2023-10-08 11:19:05 +00:00
def chance(seq, chance):
2023-11-12 21:47:57 +00:00
return (seq() / 10) < chance
2023-10-08 11:19:05 +00:00
def get_clip_by_seqid(clips, seqid):
selected = None
for i, clip in enumerate(clips):
if clip['seqid'] == seqid:
selected = i
break
if selected is not None:
return clips.pop(i)
return None
2023-11-16 08:08:03 +00:00
def write_if_new(path, data, mode=''):
read_mode = 'r' + mode
write_mode = 'w' + mode
if os.path.exists(path):
with open(path, read_mode) as fd:
old = fd.read()
else:
old = ""
is_new = data != old
if path.endswith(".kdenlive"):
is_new = re.sub('\{.{36}\}', '', data) != re.sub('\{.{36}\}', '', old)
if is_new:
with open(path, write_mode) as fd:
fd.write(data)
def format_duration(duration, fps):
return float('%0.5f' % (round(duration * fps) / fps))
2023-11-16 08:08:03 +00:00
def compose(clips, target=150, base=1024, voice_over=None):
2024-12-03 19:35:37 +00:00
fps = 24
2023-10-08 11:19:05 +00:00
length = 0
scene = {
'front': {
'V1': [],
'V2': [],
},
'back': {
'V1': [],
'V2': [],
},
'audio-back': {
2023-11-03 21:15:20 +00:00
'A1': [],
},
'audio-center': {
'A1': [],
},
'audio-front': {
'A1': [],
'A2': [],
'A3': [],
'A4': [],
2023-10-08 11:19:05 +00:00
},
2023-11-03 21:15:20 +00:00
'audio-rear': {
2023-10-08 11:19:05 +00:00
'A1': [],
'A2': [],
'A3': [],
'A4': [],
2023-11-03 21:15:20 +00:00
},
2023-10-08 11:19:05 +00:00
}
all_clips = clips.copy()
2023-11-12 17:30:06 +00:00
seq = random(10000 + base * 1000)
2023-11-01 08:23:00 +00:00
used = []
voice_overs = []
2024-12-03 19:35:37 +00:00
sub_offset = 0
if voice_over:
2023-11-12 17:30:06 +00:00
vo_keys = list(sorted(voice_over))
if chance(seq, 0.5):
2023-11-12 17:30:06 +00:00
vo_key = vo_keys[random_int(seq, len(vo_keys))]
voice_overs.append(voice_over[vo_key])
elif len(vo_keys) >= 2:
2023-11-12 17:30:06 +00:00
vo1 = vo_keys.pop(random_int(seq, len(vo_keys)))
vo2 = vo_keys.pop(random_int(seq, len(vo_keys)))
voice_overs.append(voice_over[vo1])
if voice_over[vo1]["duration"] + voice_over[vo2]["duration"] < target:
2023-10-30 19:25:46 +00:00
print("adding second vo")
voice_overs.append(voice_over[vo2])
2023-11-12 17:30:06 +00:00
print("vo:", [x['src'] for x in voice_overs], list(sorted(voice_over)))
vo_min = sum([vo['duration'] for vo in voice_overs])
2023-11-14 16:48:55 +00:00
sub_offset = 0
if vo_min > target:
target = vo_min
2023-10-28 18:26:33 +00:00
elif vo_min < target:
offset = format_duration((target - vo_min) / 2, fps)
2023-11-03 21:15:20 +00:00
scene['audio-center']['A1'].append({
'blank': True,
'duration': offset
})
scene['audio-rear']['A1'].append({
'blank': True,
'duration': offset
})
2023-10-28 18:26:33 +00:00
vo_min += offset
2023-11-14 16:48:55 +00:00
sub_offset = offset
subs = []
for vo in voice_overs:
2023-11-07 23:10:28 +00:00
voc = vo.copy()
a, b = '-11', '-3'
2023-11-12 17:30:06 +00:00
if 'Whispered' in voc['src']:
a, b = '-8', '0'
2023-11-14 10:12:18 +00:00
elif 'Read' in voc['src']:
a, b = '-7.75', '0.25'
2023-11-14 10:12:18 +00:00
elif 'Free' in voc['src']:
a, b = '-8.8', '-0.8'
2023-11-14 10:12:18 +00:00
elif 'Ashley' in voc['src']:
a, b = '-9.5', '-1.50'
2023-12-08 10:23:03 +00:00
elif 'Melody' in voc['src']:
a, b = '-5.25', '-0.25'
2023-11-12 17:30:06 +00:00
voc['filter'] = {'volume': a}
2023-11-07 23:10:28 +00:00
scene['audio-center']['A1'].append(voc)
2023-11-03 21:15:20 +00:00
vo_low = vo.copy()
2023-11-12 17:30:06 +00:00
vo_low['filter'] = {'volume': b}
2023-11-03 21:15:20 +00:00
scene['audio-rear']['A1'].append(vo_low)
2023-11-14 16:48:55 +00:00
for sub in voc.get("subs", []):
sub = sub.copy()
sub["in"] += sub_offset
sub["out"] += sub_offset
subs.append(sub)
sub_offset += voc["duration"]
if subs:
scene["subtitles"] = subs
clip = None
2023-10-10 15:16:59 +00:00
while target - length > 0 and clips:
# coin flip which site is visible (50% chance)
if length:
remaining = target - length
remaining = remaining * 1.05 # allow for max of 10% over time
2023-11-01 08:23:00 +00:00
clips_ = [c for c in clips if c['duration'] <= remaining]
if clips_:
clips = clips_
if clip:
if chance(seq, 0.5):
next_seqid = clip['seqid'] + 1
clip = get_clip_by_seqid(clips, next_seqid)
else:
clip = None
if not clip:
clip = random_choice(seq, clips, True)
if not clips:
2023-11-01 08:23:00 +00:00
print("not enough clips, need to reset")
2023-11-18 20:28:22 +00:00
clips = [c for c in all_clips if c != clip and c not in used]
if not clips:
print("not enough clips, also consider used")
clips = [c for c in all_clips if c != clip]
2023-10-10 15:16:59 +00:00
if not clips:
2023-11-18 20:28:22 +00:00
print("not enough clips, also consider last clip")
2023-10-10 15:16:59 +00:00
clips = all_clips.copy()
2023-10-28 18:26:33 +00:00
if length + clip['duration'] > target and length >= vo_min:
2023-10-08 11:19:05 +00:00
break
print('%06.3f %06.3f' % (length, clip['duration']), os.path.basename(clip['original']))
length += int(clip['duration'] * fps) / fps
2023-10-08 11:19:05 +00:00
2023-10-28 09:33:29 +00:00
if "foreground" not in clip and "animation" in clip:
fg = clip['animation']
transparancy = 1
2023-10-19 12:10:23 +00:00
else:
2023-10-28 09:33:29 +00:00
fg = clip['foreground']
2023-11-20 09:03:41 +00:00
if 'animation' in clip and chance(seq, 0.15):
fg = clip['animation']
transparancy = 1
else:
if 'foreground2' in clip:
if 'foreground3' in clip:
n = seq()
if n <= 3: # 0,1,2,3
2023-11-20 09:04:10 +00:00
clip['foreground']
2023-11-20 09:03:41 +00:00
elif n <= 6: # 4,5,6
clip['foreground2']
else: # 7,8,9
clip['foreground3']
elif chance(seq, 0.5):
fg = clip['foreground2']
transparancy = seq() / 9
transparancy = 1
if 'foley' in clip:
foley = clip['foley']
else:
foley = fg
scene['front']['V2'].append({
2023-10-08 11:19:05 +00:00
'duration': clip['duration'],
2023-10-10 09:19:47 +00:00
'src': fg,
2023-10-08 11:19:05 +00:00
"filter": {
'transparency': transparancy,
2023-10-08 11:19:05 +00:00
}
})
transparency = seq() / 9
2023-11-07 23:04:37 +00:00
# 50% of time no transparancy of foregroudnd layer
# 50% some transparancy, 25%, 50%, 75% levels of transparancy
transparancy = 1
2023-10-08 11:19:05 +00:00
# coin flip which site is visible (50% chance)
2023-11-18 20:28:22 +00:00
#if chance(seq, 0.5):
if chance(seq, 0.8):
2023-10-08 11:19:05 +00:00
transparency_front = transparency
transparency_back = 0
else:
2023-11-19 08:21:12 +00:00
transparency_back = random_choice(seq, [0.25, 0.5, 0.75, 1])
2023-10-08 11:19:05 +00:00
transparency_front = 0
transparency_original = seq() / 9
2023-10-31 08:01:04 +00:00
transparency_original = 1
2023-10-28 09:33:29 +00:00
if "background" in clip:
scene['front']['V1'].append({
2023-10-28 09:33:29 +00:00
'duration': clip['duration'],
'src': clip['background'],
"filter": {
'transparency': transparency_front
}
})
scene['back']['V2'].append({
2023-10-28 09:33:29 +00:00
'duration': clip['duration'],
'src': clip['background'],
"filter": {
'transparency': transparency_back
}
})
else:
scene['front']['V1'].append({
2023-10-28 09:33:29 +00:00
'duration': clip['duration'],
'src': clip['animation'],
"filter": {
'transparency': 0,
}
2023-10-28 09:33:29 +00:00
})
scene['back']['V2'].append({
2023-10-28 09:33:29 +00:00
'duration': clip['duration'],
'src': clip['original'],
"filter": {
'transparency': 0,
}
2023-10-28 09:33:29 +00:00
})
scene['back']['V1'].append({
2023-10-08 11:19:05 +00:00
'duration': clip['duration'],
'src': clip['original'],
"filter": {
'transparency': transparency_original,
2023-10-08 11:19:05 +00:00
}
})
# 50 % chance to blur original from 0 to 30
if chance(seq, 0.5):
blur = seq() * 3
2023-10-09 13:10:34 +00:00
if blur:
scene['back']['V1'][-1]['filter']['blur'] = blur
scene['audio-back']['A1'].append({
2023-10-08 11:19:05 +00:00
'duration': clip['duration'],
'src': clip['original'],
'filter': {'volume': '-8.2'},
2023-10-08 11:19:05 +00:00
})
2023-10-19 12:10:23 +00:00
# TBD: Foley
cf_volume = '-2.5'
2023-11-03 21:15:20 +00:00
scene['audio-front']['A2'].append({
'duration': clip['duration'],
'src': foley,
'filter': {'volume': cf_volume},
2023-11-03 21:15:20 +00:00
})
scene['audio-rear']['A2'].append({
2023-10-08 11:19:05 +00:00
'duration': clip['duration'],
2023-10-19 12:10:23 +00:00
'src': foley,
'filter': {'volume': cf_volume},
2023-10-08 11:19:05 +00:00
})
2023-11-01 08:23:00 +00:00
used.append(clip)
2023-10-30 19:25:46 +00:00
print("scene duration %0.3f (target: %0.3f, vo_min: %0.3f)" % (length, target, vo_min))
scene_duration = int(get_scene_duration(scene) * fps)
sub_offset = int(sub_offset * fps)
if sub_offset < scene_duration:
delta = format_duration((scene_duration - sub_offset) / fps, fps)
print(">> add %0.3f of silence.. %0.3f (scene_duration)" % (delta, scene_duration / fps))
2024-12-03 19:35:37 +00:00
scene['audio-center']['A1'].append({
'blank': True,
'duration': delta
})
scene['audio-rear']['A1'].append({
'blank': True,
'duration': delta
})
elif sub_offset > scene_duration:
delta = format_duration((scene_duration - sub_offset) / fps, fps)
scene['audio-center']['A1'][-1]["duration"] += delta
scene['audio-rear']['A1'][-1]["duration"] += delta
print("WTF, needed to cut %s new duration: %s" % (delta, scene['audio-center']['A1'][-1]["duration"]))
print(scene['audio-center']['A1'][-1])
2023-11-01 08:23:00 +00:00
return scene, used
2023-10-08 11:19:05 +00:00
def get_track_duration(scene, k, n):
duration = 0
for key, value in scene.items():
if key == k:
for name, clips in value.items():
if name == n:
for clip in clips:
duration += int(clip['duration'] * 24)
return duration / 24
2023-10-10 15:16:59 +00:00
def get_scene_duration(scene):
2024-12-03 20:12:15 +00:00
if isinstance(scene, str):
with open(scene) as fd:
scene = json.load(fd)
2023-10-10 15:16:59 +00:00
duration = 0
for key, value in scene.items():
for name, clips in value.items():
for clip in clips:
duration += int(clip['duration'] * 24)
return duration / 24
2023-10-08 11:19:05 +00:00
2024-03-19 10:48:36 +00:00
def get_offset_duration(prefix):
duration = 0
for root, folders, files in os.walk(prefix):
for f in files:
if f == 'scene.json':
duration += get_scene_duration(scene)
return duration
def render(root, scene, prefix='', options=None):
if options is None: options = {}
2023-10-08 11:19:05 +00:00
fps = 24
files = []
2024-12-03 19:35:37 +00:00
scene_duration = int(get_scene_duration(scene) * fps)
2023-10-08 11:19:05 +00:00
for timeline, data in scene.items():
2023-11-14 16:48:55 +00:00
if timeline == "subtitles":
2023-11-16 08:08:03 +00:00
path = os.path.join(root, prefix + "front.srt")
2023-12-08 12:13:00 +00:00
data = fix_overlaps(data)
2023-11-16 08:08:03 +00:00
srt = ox.srt.encode(data)
write_if_new(path, srt, 'b')
2023-11-14 16:48:55 +00:00
continue
2023-10-10 15:16:59 +00:00
#print(timeline)
2023-10-08 11:19:05 +00:00
project = KDEnliveProject(root)
tracks = []
2023-11-08 23:33:06 +00:00
track_durations = {}
2023-10-08 11:19:05 +00:00
for track, clips in data.items():
2023-10-10 15:16:59 +00:00
#print(track)
2023-10-08 11:19:05 +00:00
for clip in clips:
project.append_clip(track, clip)
track_durations[track] = sum([int(c['duration'] * fps) for c in clips])
2023-11-08 23:33:06 +00:00
if timeline.startswith('audio-'):
track_duration = project.get_duration()
delta = scene_duration - track_duration
if delta > 0:
for track in track_durations:
if track_durations[track] == track_duration:
2024-12-03 19:35:37 +00:00
project.append_clip(track, {'blank': True, "duration": delta/fps})
path = os.path.join(root, prefix + "%s.kdenlive" % timeline)
2023-11-16 08:08:03 +00:00
project_xml = project.to_xml()
write_if_new(path, project_xml)
if options["debug"]:
# check duration
out_duration = get_project_duration(path)
p_duration = project.get_duration()
print(path, 'out: %s, project: %s, scene: %s' %(out_duration, p_duration, scene_duration))
if p_duration != scene_duration:
print(path, 'FAIL project: %s, scene: %s' %(p_duration, scene_duration))
_cache = os.path.join(root, "cache.json")
with open(_cache, "w") as fd:
json.dump(_CACHE, fd)
sys.exit(1)
if out_duration != p_duration:
print(path, 'fail got: %s expected: %s' %(out_duration, p_duration))
sys.exit(1)
files.append(path)
return files
2023-10-10 15:16:59 +00:00
def get_project_duration(file):
out = melt_xml(file)
chain = lxml.etree.fromstring(out).xpath('producer')[0]
duration = int(chain.attrib['out']) + 1
return duration
2024-03-22 09:56:50 +00:00
2023-11-08 10:01:20 +00:00
def get_fragments(clips, voice_over, prefix):
2023-10-10 15:16:59 +00:00
import itemlist.models
import item.models
2023-10-10 15:16:59 +00:00
fragments = []
2023-10-10 15:16:59 +00:00
for l in itemlist.models.List.objects.filter(status='featured').order_by('name'):
if l.name.split(' ')[0].isdigit():
fragment = {
'name': l.name,
2023-11-08 10:01:20 +00:00
'tags': [],
'anti-tags': [],
2023-10-10 15:16:59 +00:00
'description': l.description
}
2023-11-08 10:01:20 +00:00
for con in l.query['conditions']:
if "conditions" in con:
for sub in con["conditions"]:
if sub['key'] == "tags" and sub['operator'] == '==':
fragment['tags'].append(sub['value'])
elif sub['key'] == "tags" and sub['operator'] == '!=':
fragment['tags'].append(sub['value'])
else:
2023-11-18 20:31:45 +00:00
print(l.name, 'unknown sub condition', sub)
2023-11-08 10:01:20 +00:00
elif con.get('key') == "tags" and con['operator'] == '==':
fragment['tags'].append(con['value'])
elif con.get('key') == "tags" and con['operator'] == '!=':
fragment['anti-tags'].append(con['value'])
fragment["id"] = int(fragment['name'].split(' ')[0])
2023-11-08 10:01:20 +00:00
originals = []
for i in l.get_items(l.user):
orig = i.files.filter(selected=True).first()
if orig:
ext = os.path.splitext(orig.data.path)[1]
type_ = i.data['type'][0].lower()
target = os.path.join(prefix, type_, i.data['title'] + ext)
originals.append(target)
2023-10-10 15:16:59 +00:00
fragment['clips'] = []
for clip in clips:
2023-11-08 10:01:20 +00:00
#if set(clip['tags']) & set(fragment['tags']) and not set(clip['tags']) & set(fragment['anti-tags']):
if clip['original'] in originals:
2023-10-10 15:16:59 +00:00
fragment['clips'].append(clip)
fragment["voice_over"] = voice_over.get(str(fragment["id"]), {})
2023-10-10 15:16:59 +00:00
fragments.append(fragment)
fragments.sort(key=lambda f: ox.sort_string(f['name']))
return fragments
def render_all(options):
prefix = options['prefix']
duration = int(options['duration'])
base = int(options['offset'])
_cache = os.path.join(prefix, "cache.json")
if os.path.exists(_cache):
with open(_cache) as fd:
_CACHE.update(json.load(fd))
2023-10-10 15:16:59 +00:00
with open(os.path.join(prefix, "clips.json")) as fd:
clips = json.load(fd)
with open(os.path.join(prefix, "voice_over.json")) as fd:
voice_over = json.load(fd)
2023-11-08 10:01:20 +00:00
fragments = get_fragments(clips, voice_over, prefix)
with open(os.path.join(prefix, "fragments.json"), "w") as fd:
json.dump(fragments, fd, indent=2, ensure_ascii=False)
2023-10-10 15:16:59 +00:00
position = target_position = 0
target = fragment_target = duration / len(fragments)
base_prefix = os.path.join(prefix, 'render', str(base))
2023-11-01 08:23:00 +00:00
clips_used = []
2023-11-12 17:30:06 +00:00
stats = defaultdict(lambda: 0)
2023-11-20 23:08:48 +00:00
fragment_base = base
2023-10-10 15:16:59 +00:00
for fragment in fragments:
2023-11-20 23:08:48 +00:00
fragment_base += 1
fragment_id = int(fragment['name'].split(' ')[0])
2023-10-10 15:16:59 +00:00
name = fragment['name'].replace(' ', '_')
if fragment_id < 10:
2023-10-10 15:16:59 +00:00
name = '0' + name
if not fragment['clips']:
print("skipping empty fragment", name)
continue
fragment_prefix = os.path.join(base_prefix, name)
os.makedirs(fragment_prefix, exist_ok=True)
2023-11-01 08:23:00 +00:00
fragment_clips = fragment['clips']
unused_fragment_clips = [c for c in fragment_clips if c not in clips_used]
print('fragment clips', len(fragment_clips), 'unused', len(unused_fragment_clips))
2023-11-20 23:08:48 +00:00
scene, used = compose(unused_fragment_clips, target=target, base=fragment_base, voice_over=fragment['voice_over'])
2023-11-01 08:23:00 +00:00
clips_used += used
2023-10-10 15:16:59 +00:00
scene_duration = get_scene_duration(scene)
2023-10-29 18:51:26 +00:00
print("%s %6.3f -> %6.3f (%6.3f)" % (name, target, scene_duration, fragment_target))
2023-11-12 17:30:06 +00:00
src = [a for a in scene['audio-rear']['A1'] if 'src' in a][0]['src']
stats[src.split('/')[-2]] += 1
2023-10-10 15:16:59 +00:00
position += scene_duration
target_position += fragment_target
if position > target_position:
target = fragment_target - (position-target_position)
2023-10-29 18:51:26 +00:00
print("adjusting target duration for next fragment: %6.3f -> %6.3f" % (fragment_target, target))
2023-10-10 15:16:59 +00:00
elif position < target_position:
target = target + 0.1 * fragment_target
timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/', options)
2023-10-10 15:16:59 +00:00
2023-11-16 08:08:03 +00:00
scene_json = json.dumps(scene, indent=2, ensure_ascii=False)
write_if_new(os.path.join(fragment_prefix, 'scene.json'), scene_json)
2023-10-10 15:16:59 +00:00
2024-12-03 20:12:15 +00:00
if not options['no_video'] and not options["single_file"]:
2023-10-10 15:16:59 +00:00
for timeline in timelines:
print(timeline)
2023-10-10 15:16:59 +00:00
ext = '.mp4'
if '/audio' in timeline:
2023-10-10 15:16:59 +00:00
ext = '.wav'
cmd = get_melt() + [
timeline,
2023-10-22 10:18:03 +00:00
'-quiet',
'-consumer', 'avformat:%s' % timeline.replace('.kdenlive', ext),
2023-10-10 15:16:59 +00:00
]
2023-10-22 10:18:03 +00:00
if ext == '.wav':
cmd += ['vn=1']
else:
#if not timeline.endswith("back.kdenlive"):
cmd += ['an=1']
2023-11-03 21:15:20 +00:00
cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
2023-10-10 15:16:59 +00:00
subprocess.call(cmd)
if ext == '.wav' and timeline.endswith('audio.kdenlive'):
2023-10-10 15:16:59 +00:00
cmd = [
'ffmpeg', '-y',
'-nostats', '-loglevel', 'error',
'-i',
2023-10-10 15:16:59 +00:00
timeline.replace('.kdenlive', ext),
timeline.replace('.kdenlive', '.mp4')
]
subprocess.call(cmd)
os.unlink(timeline.replace('.kdenlive', ext))
cmds = []
2024-12-03 20:12:15 +00:00
fragment_prefix = Path(fragment_prefix)
for src, out1, out2 in (
2023-11-03 21:15:20 +00:00
("audio-front.wav", "fl.wav", "fr.wav"),
("audio-center.wav", "fc.wav", "lfe.wav"),
("audio-rear.wav", "bl.wav", "br.wav"),
):
cmds.append([
2023-10-19 12:10:23 +00:00
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / src,
"-filter_complex",
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
"-map", "[left]", fragment_prefix / out1,
"-map", "[right]", fragment_prefix / out2,
])
2023-10-19 12:10:23 +00:00
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "fl.wav",
"-i", fragment_prefix / "fr.wav",
"-i", fragment_prefix / "fc.wav",
"-i", fragment_prefix / "lfe.wav",
"-i", fragment_prefix / "bl.wav",
"-i", fragment_prefix / "br.wav",
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]", "-c:a", "aac", fragment_prefix / "audio-5.1.mp4"
])
2023-10-28 09:24:51 +00:00
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
2023-11-03 21:15:20 +00:00
"-i", fragment_prefix / "front.mp4",
2023-10-28 09:24:51 +00:00
"-i", fragment_prefix / "audio-5.1.mp4",
"-c", "copy",
2023-11-03 21:15:20 +00:00
fragment_prefix / "front-5.1.mp4",
2023-10-28 09:24:51 +00:00
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "back.mp4",
"-i", fragment_prefix / "audio-back.wav",
"-c:v", "copy",
fragment_prefix / "back-audio.mp4",
])
for cmd in cmds:
2024-12-03 20:12:15 +00:00
if options["debug"]:
print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd)
for a, b in (
("back-audio.mp4", "back.mp4"),
("front-5.1.mp4", "back.mp4"),
):
duration_a = ox.avinfo(str(fragment_prefix / a))['duration']
duration_b = ox.avinfo(str(fragment_prefix / b))['duration']
if duration_a != duration_b:
print('!!', duration_a, fragment_prefix / a)
print('!!', duration_b, fragment_prefix / b)
sys.exit(-1)
shutil.move(fragment_prefix / "back-audio.mp4", fragment_prefix / "back.mp4")
2023-11-03 21:15:20 +00:00
shutil.move(fragment_prefix / "front-5.1.mp4", fragment_prefix / "front.mp4")
2024-12-03 20:12:15 +00:00
if options["keep_audio"]:
shutil.move(fragment_prefix / "audio-center.wav", fragment_prefix / "vocals.wav")
shutil.move(fragment_prefix / "audio-front.wav", fragment_prefix / "foley.wav")
shutil.move(fragment_prefix / "audio-back.wav", fragment_prefix / "original.wav")
2023-10-19 12:10:23 +00:00
for fn in (
"audio-5.1.mp4",
2024-08-29 15:36:18 +00:00
"audio-center.wav", "audio-rear.wav",
2023-11-16 14:12:03 +00:00
"audio-front.wav", "audio-back.wav", "back-audio.mp4",
"fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
2023-10-19 12:10:23 +00:00
):
fn = fragment_prefix / fn
if os.path.exists(fn):
os.unlink(fn)
2024-12-03 20:12:15 +00:00
if options["single_file"]:
cmds = []
base_prefix = Path(base_prefix)
for timeline in (
"front",
"back",
"audio-back",
"audio-center",
"audio-front",
"audio-rear",
):
timelines = list(sorted(glob('%s/*/%s.kdenlive' % (base_prefix, timeline))))
ext = '.mp4'
if '/audio' in timelines[0]:
ext = '.wav'
out = base_prefix / (timeline + ext)
cmd = get_melt() + timelines + [
2024-12-03 20:12:15 +00:00
'-quiet',
'-consumer', 'avformat:%s' % out,
]
if ext == '.wav':
cmd += ['vn=1']
else:
cmd += ['an=1']
cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
cmds.append(cmd)
for src, out1, out2 in (
("audio-front.wav", "fl.wav", "fr.wav"),
("audio-center.wav", "fc.wav", "lfe.wav"),
("audio-rear.wav", "bl.wav", "br.wav"),
):
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / src,
"-filter_complex",
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
"-map", "[left]", base_prefix / out1,
"-map", "[right]", base_prefix / out2,
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / "fl.wav",
"-i", base_prefix / "fr.wav",
"-i", base_prefix / "fc.wav",
"-i", base_prefix / "lfe.wav",
"-i", base_prefix / "bl.wav",
"-i", base_prefix / "br.wav",
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]", "-c:a", "aac", base_prefix / "audio-5.1.mp4"
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / "front.mp4",
"-i", base_prefix / "audio-5.1.mp4",
"-c", "copy",
base_prefix / "front-5.1.mp4",
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", base_prefix / "back.mp4",
"-i", base_prefix / "audio-back.wav",
"-c:v", "copy",
base_prefix / "back-audio.mp4",
])
for cmd in cmds:
if options["debug"]:
print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd)
for a, b in (
("back-audio.mp4", "back.mp4"),
("front-5.1.mp4", "back.mp4"),
):
duration_a = ox.avinfo(str(base_prefix / a))['duration']
duration_b = ox.avinfo(str(base_prefix / b))['duration']
if duration_a != duration_b:
print('!!', duration_a, base_prefix / a)
print('!!', duration_b, base_prefix / b)
sys.exit(-1)
shutil.move(base_prefix / "back-audio.mp4", base_prefix / "back.mp4")
shutil.move(base_prefix / "front-5.1.mp4", base_prefix / "front.mp4")
if options["keep_audio"]:
shutil.move(base_prefix / "audio-center.wav", base_prefix / "vocals.wav")
shutil.move(base_prefix / "audio-front.wav", base_prefix / "foley.wav")
shutil.move(base_prefix / "audio-back.wav", base_prefix / "original.wav")
for fn in (
"audio-5.1.mp4",
"audio-center.wav", "audio-rear.wav",
"audio-front.wav", "audio-back.wav", "back-audio.mp4",
"fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
):
fn = base_prefix / fn
if os.path.exists(fn):
os.unlink(fn)
join_subtitles(base_prefix)
print("Duration - Target: %s Actual: %s" % (target_position, position))
2023-11-12 17:30:06 +00:00
print(json.dumps(dict(stats), sort_keys=True, indent=2))
with open(_cache, "w") as fd:
json.dump(_CACHE, fd)
2023-11-16 08:08:03 +00:00
2024-03-22 10:33:39 +00:00
def add_translations(sub, lang):
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if sub.languages:
value = ox.strip_tags(value)
if lang:
for slang in lang:
if slang == "en":
slang = None
for tsub in sub.item.annotations.filter(layer="subtitles", start=sub.start, end=sub.end, languages=slang):
tvalue = tsub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if tsub.languages:
tvalue = ox.strip_tags(tvalue)
value += '\n' + tvalue
return value
def get_srt(sub, offset=0, lang=None):
2023-11-16 08:12:53 +00:00
sdata = sub.json(keys=['in', 'out', 'value'])
2024-03-22 10:33:39 +00:00
sdata['value'] = sdata['value'].replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if lang:
sdata['value'] = add_translations(sub, lang)
2023-11-16 08:12:53 +00:00
if offset:
sdata["in"] += offset
sdata["out"] += offset
return sdata
2023-12-08 12:13:00 +00:00
def fix_overlaps(data):
previous = None
for sub in data:
if previous is None:
previous = sub
else:
if sub['in'] < previous['out']:
previous['out'] = sub['in'] - 0.001
previous = sub
return data
2023-11-16 08:08:03 +00:00
def update_subtitles(options):
import item.models
prefix = Path(options['prefix'])
base = int(options['offset'])
2024-03-22 09:56:50 +00:00
lang = options["lang"]
2024-03-22 10:33:39 +00:00
if lang and "," in lang:
lang = lang.split(',')
2024-03-22 11:29:11 +00:00
if isinstance(lang, list):
tlang = lang[1:]
lang = lang[0]
else:
tlang = None
if lang == "en":
lang = None
2023-11-16 08:08:03 +00:00
_cache = os.path.join(prefix, "cache.json")
if os.path.exists(_cache):
with open(_cache) as fd:
_CACHE.update(json.load(fd))
base_prefix = prefix / 'render' / str(base)
for folder in os.listdir(base_prefix):
folder = base_prefix / folder
2024-04-02 10:37:17 +00:00
scene_json = folder / "scene.json"
if not os.path.exists(scene_json):
continue
with open(scene_json) as fd:
2023-11-16 08:08:03 +00:00
scene = json.load(fd)
offset = 0
subs = []
for clip in scene['audio-center']['A1']:
if not clip.get("blank"):
batch, fragment_id = clip['src'].replace('.wav', '').split('/')[-2:]
vo = item.models.Item.objects.filter(data__batch__icontains=batch, data__title__startswith=fragment_id + '_').first()
2023-11-16 08:08:03 +00:00
if vo:
#print("%s => %s %s" % (clip['src'], vo, vo.get('batch')))
2024-03-22 09:56:50 +00:00
for sub in vo.annotations.filter(layer="subtitles").filter(languages=lang).exclude(value="").order_by("start"):
2024-03-22 10:33:39 +00:00
sdata = get_srt(sub, offset, tlang)
2023-11-16 08:08:03 +00:00
subs.append(sdata)
else:
print("could not find vo for %s" % clip['src'])
2023-11-16 08:08:03 +00:00
offset += clip['duration']
path = folder / "front.srt"
2023-12-08 12:13:00 +00:00
data = fix_overlaps(subs)
2023-11-16 08:08:03 +00:00
srt = ox.srt.encode(subs)
write_if_new(str(path), srt, 'b')
2024-01-22 14:06:40 +00:00
def update_m3u(render_prefix, exclude=[]):
files = ox.sorted_strings(glob(render_prefix + "*/*/back.mp4"))
for ex in exclude:
files = [f for f in files if not f.startswith(ex + "/")]
back_m3u = "\n".join(files)
back_m3u = back_m3u.replace(render_prefix, "")
front_m3u = back_m3u.replace("back.mp4", "front.mp4")
back_m3u_f = render_prefix + "back.m3u"
front_m3u_f = render_prefix + "front.m3u"
with open(back_m3u_f + "_", "w") as fd:
fd.write(back_m3u)
with open(front_m3u_f + "_", "w") as fd:
fd.write(front_m3u)
shutil.move(front_m3u_f + "_", front_m3u_f)
cmd = ["scp", front_m3u_f, "front:" + front_m3u_f]
subprocess.check_call(cmd)
shutil.move(back_m3u_f + "_", back_m3u_f)
def render_infinity(options):
prefix = options['prefix']
duration = int(options['duration'])
state_f = os.path.join(prefix, "infinity.json")
if os.path.exists(state_f):
with open(state_f) as fd:
state = json.load(fd)
else:
state = {
"offset": 100,
"max-items": 30,
"no_video": False,
2024-01-22 14:06:40 +00:00
}
for key in ("prefix", "duration"):
state[key] = options[key]
while True:
render_prefix = state["prefix"] + "/render/"
current = [
f for f in os.listdir(render_prefix)
if f.isdigit() and os.path.isdir(render_prefix + f) and state["offset"] > int(f) >= 100
2024-01-22 14:06:40 +00:00
]
if len(current) > state["max-items"]:
2024-03-19 10:48:36 +00:00
current = ox.sorted_strings(current)
remove = current[:-state["max-items"]]
2024-01-22 14:06:40 +00:00
update_m3u(render_prefix, exclude=remove)
for folder in remove:
folder = render_prefix + folder
print("remove", folder)
shutil.rmtree(folder)
cmd = ["ssh", "front", "rm", "-rf", folder]
#print(cmd)
subprocess.check_call(cmd)
2024-01-22 14:06:40 +00:00
render_all(state)
path = "%s%s/" % (render_prefix, state["offset"])
cmd = ['rsync', '-a', path, "front:" + path]
subprocess.check_call(cmd)
update_m3u(render_prefix)
state["offset"] += 1
with open(state_f + "~", "w") as fd:
json.dump(state, fd, indent=2)
shutil.move(state_f + "~", state_f)
2024-12-03 20:12:15 +00:00
def join_subtitles(base_prefix):
subtitles = list(sorted(glob('%s/*/front.srt' % base_prefix)))
data = []
position = 0
for srt in subtitles:
scene = srt.replace('front.srt', 'scene.json')
data += ox.srt.load(srt, offset=position)
position += get_scene_duration(scene)
with open(base_prefix / 'front.srt', 'wb') as fd:
fd.write(ox.srt.encode(data))