pandora_t_for_time/render.py

386 lines
13 KiB
Python
Raw Normal View History

2023-10-08 11:19:05 +00:00
#!/usr/bin/python3
import json
import os
import subprocess
import sys
import time
2023-10-28 09:24:51 +00:00
import shutil
from pathlib import Path
2023-10-08 11:19:05 +00:00
import ox
2023-10-09 13:10:34 +00:00
from .pi import random
from .render_kdenlive import KDEnliveProject, _CACHE
2023-10-08 11:19:05 +00:00
def random_choice(seq, items, pop=False):
n = n_ = len(items) - 1
#print('len', n)
if n == 0:
if pop:
return items.pop(n)
return items[n]
2023-10-08 11:19:05 +00:00
r = seq()
base = 10
while n > 10:
n /= 10
#print(r)
r += seq()
base += 10
r = int(n_ * r / base)
#print('result', r, items)
if pop:
return items.pop(r)
return items[r]
def chance(seq, chance):
return (seq() / 10) >= chance
def get_clip_by_seqid(clips, seqid):
selected = None
for i, clip in enumerate(clips):
if clip['seqid'] == seqid:
selected = i
break
if selected is not None:
return clips.pop(i)
return None
2023-10-08 11:19:05 +00:00
def compose(clips, target=150, base=1024, voice_over=None):
2023-10-08 11:19:05 +00:00
length = 0
scene = {
'front': {
'V1': [],
'V2': [],
},
'back': {
'V1': [],
'V2': [],
},
'audio': {
'A1': [],
'A2': [],
'A3': [],
'A4': [],
}
}
all_clips = clips.copy()
2023-10-08 11:19:05 +00:00
seq = random(base)
voice_overs = []
if voice_over:
vo_keys = list(voice_over)
if chance(seq, 0.5):
voice_overs.append(voice_over[vo_keys[chance(seq, len(vo_keys))]])
elif len(vo_keys) >= 2:
vo1 = vo_keys.pop(chance(seq, len(vo_keys)))
vo2 = vo_keys.pop(chance(seq, len(vo_keys)))
voice_overs.append(voice_over[vo1])
if voice_over[vo1]["duration"] + voice_over[vo2]["duration"] < target:
voice_overs.append(voice_over[vo2])
vo_min = sum([vo['duration'] for vo in voice_overs])
if vo_min > target:
target = vo_min
if vo_min < target:
offset = (target - vo_min) / 2
scene['audio']['A3'].append({
'blank': True,
'duration': offset
})
for vo in voice_overs:
scene['audio']['A3'].append(vo)
clip = None
2023-10-10 15:16:59 +00:00
while target - length > 0 and clips:
# coin flip which site is visible (50% chance)
if clip:
if chance(seq, 0.5):
next_seqid = clip['seqid'] + 1
clip = get_clip_by_seqid(clips, next_seqid)
else:
clip = None
if not clip:
clip = random_choice(seq, clips, True)
if not clips:
clips = [c for c in all_clips if c != clip]
2023-10-10 15:16:59 +00:00
if not clips:
clips = all_clips.copy()
2023-10-08 11:19:05 +00:00
if length + clip['duration'] > target:
break
length += clip['duration']
2023-10-28 09:33:29 +00:00
if "foreground" not in clip and "animation" in clip:
fg = clip['animation']
2023-10-19 12:10:23 +00:00
else:
2023-10-28 09:33:29 +00:00
fg = clip['foreground']
if 'foley' in clip:
foley = clip['foley']
else:
foley = fg
if 'foreground2' in clip:
if 'foreground3' in clip:
n = seq()
if n <= 3: # 0,1,2,3
clip['foreground']
elif n <= 6: # 4,5,6
clip['foreground2']
else: # 7,8,9
clip['foreground3']
elif chance(seq, 0.5):
fg = clip['foreground2']
2023-10-10 09:19:47 +00:00
2023-10-08 11:19:05 +00:00
scene['front']['V1'].append({
'duration': clip['duration'],
2023-10-10 09:19:47 +00:00
'src': fg,
2023-10-08 11:19:05 +00:00
"filter": {
'transparency': seq() / 10,
}
})
transparency = seq() / 10
# coin flip which site is visible (50% chance)
if chance(seq, 0.5):
transparency_front = transparency
transparency_back = 0
else:
transparency_back = transparency
transparency_front = 0
2023-10-28 09:33:29 +00:00
if "background" in clip:
scene['front']['V2'].append({
'duration': clip['duration'],
'src': clip['background'],
"filter": {
'transparency': transparency_front
}
})
scene['back']['V1'].append({
'duration': clip['duration'],
'src': clip['background'],
"filter": {
'transparency': transparency_back
}
})
else:
scene['front']['V2'].append({
'blank': True,
'duration': clip['duration'],
})
scene['back']['V1'].append({
'blank': True,
'duration': clip['duration'],
})
2023-10-08 11:19:05 +00:00
scene['back']['V2'].append({
'duration': clip['duration'],
'src': clip['original'],
"filter": {
'transparency': seq() / 10,
}
})
# 50 % chance to blur original from 0 to 30
if chance(seq, 0.5):
blur = seq() * 3
2023-10-09 13:10:34 +00:00
if blur:
scene['back']['V2'][-1]['filter']['blur'] = blur
2023-10-08 11:19:05 +00:00
scene['audio']['A1'].append({
'duration': clip['duration'],
'src': clip['original'],
})
2023-10-19 12:10:23 +00:00
# TBD: Foley
2023-10-08 11:19:05 +00:00
scene['audio']['A2'].append({
'duration': clip['duration'],
2023-10-19 12:10:23 +00:00
'src': foley,
2023-10-08 11:19:05 +00:00
})
2023-10-08 11:19:05 +00:00
return scene
2023-10-10 15:16:59 +00:00
def get_scene_duration(scene):
duration = 0
for key, value in scene.items():
for name, clips in value.items():
for clip in clips:
duration += clip['duration']
return duration
2023-10-08 11:19:05 +00:00
2023-10-09 13:10:34 +00:00
def render(root, scene, prefix=''):
2023-10-08 11:19:05 +00:00
fps = 24
files = []
2023-10-08 11:19:05 +00:00
for timeline, data in scene.items():
2023-10-10 15:16:59 +00:00
#print(timeline)
2023-10-08 11:19:05 +00:00
project = KDEnliveProject(root)
tracks = []
for track, clips in data.items():
2023-10-10 15:16:59 +00:00
#print(track)
2023-10-08 11:19:05 +00:00
for clip in clips:
project.append_clip(track, clip)
path = os.path.join(root, prefix + "%s.kdenlive" % timeline)
with open(path, 'w') as fd:
2023-10-08 11:19:05 +00:00
fd.write(project.to_xml())
files.append(path)
if timeline == "audio":
duration = project.get_duration()
for track, clips in data.items():
project = KDEnliveProject(root)
for clip in clips:
project.append_clip(track, clip)
track_duration = project.get_duration()
delta = duration - track_duration
if delta > 0:
project.append_clip(track, {'blank': True, "duration": delta/24})
path = os.path.join(root, prefix + "%s-%s.kdenlive" % (timeline, track))
with open(path, 'w') as fd:
fd.write(project.to_xml())
files.append(path)
return files
2023-10-10 15:16:59 +00:00
def get_fragments(clips, voice_over):
2023-10-10 15:16:59 +00:00
import itemlist.models
import item.models
from collections import defaultdict
2023-10-10 15:16:59 +00:00
fragments = []
2023-10-10 15:16:59 +00:00
for l in itemlist.models.List.objects.filter(status='featured').order_by('name'):
if l.name.split(' ')[0].isdigit():
fragment = {
'name': l.name,
'tags': [t['value'] for t in l.query['conditions'][1]['conditions']],
'description': l.description
}
fragment["id"] = int(fragment['name'].split(' ')[0])
2023-10-10 15:16:59 +00:00
fragment['clips'] = []
for clip in clips:
if set(clip['tags']) & set(fragment['tags']):
fragment['clips'].append(clip)
fragment["voice_over"] = voice_over.get(str(fragment["id"]), {})
2023-10-10 15:16:59 +00:00
fragments.append(fragment)
fragments.sort(key=lambda f: ox.sort_string(f['name']))
return fragments
def render_all(options):
prefix = options['prefix']
duration = int(options['duration'])
base = int(options['offset'])
_cache = os.path.join(prefix, "cache.json")
if os.path.exists(_cache):
with open(_cache) as fd:
_CACHE.update(json.load(fd))
2023-10-10 15:16:59 +00:00
with open(os.path.join(prefix, "clips.json")) as fd:
clips = json.load(fd)
with open(os.path.join(prefix, "voice_over.json")) as fd:
voice_over = json.load(fd)
fragments = get_fragments(clips, voice_over)
with open(os.path.join(prefix, "fragments.json"), "w") as fd:
json.dump(fragments, fd, indent=2, ensure_ascii=False)
2023-10-10 15:16:59 +00:00
position = target_position = 0
target = fragment_target = duration / len(fragments)
base_prefix = os.path.join(prefix, 'render', str(base))
for fragment in fragments:
fragment_id = int(fragment['name'].split(' ')[0])
2023-10-10 15:16:59 +00:00
name = fragment['name'].replace(' ', '_')
if fragment_id < 10:
2023-10-10 15:16:59 +00:00
name = '0' + name
if not fragment['clips']:
print("skipping empty fragment", name)
continue
fragment_prefix = os.path.join(base_prefix, name)
os.makedirs(fragment_prefix, exist_ok=True)
scene = compose(fragment['clips'], target=target, base=base, voice_over=fragment['voice_over'])
2023-10-10 15:16:59 +00:00
scene_duration = get_scene_duration(scene)
print("%s %s -> %s (%s)" % (name, target, scene_duration, fragment_target))
position += scene_duration
target_position += fragment_target
if position > target_position:
target = fragment_target - (position-target_position)
print("adjusting target from", fragment_target, target)
elif position < target_position:
target = target + 0.1 * fragment_target
timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/')
with open(os.path.join(fragment_prefix, 'scene.json'), 'w') as fd:
json.dump(scene, fd, indent=2, ensure_ascii=False)
if not options['no_video']:
for timeline in timelines:
print(timeline)
2023-10-10 15:16:59 +00:00
ext = '.mp4'
if '/audio' in timeline:
2023-10-10 15:16:59 +00:00
ext = '.wav'
cmd = [
'xvfb-run', '-a',
'melt', timeline,
2023-10-22 10:18:03 +00:00
'-quiet',
'-consumer', 'avformat:%s' % timeline.replace('.kdenlive', ext),
2023-10-10 15:16:59 +00:00
]
2023-10-22 10:18:03 +00:00
if ext == '.wav':
cmd += ['vn=1']
else:
cmd += ['an=1', 'vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
2023-10-10 15:16:59 +00:00
subprocess.call(cmd)
if ext == '.wav' and timeline.endswith('audio.kdenlive'):
2023-10-10 15:16:59 +00:00
cmd = [
'ffmpeg', '-y',
'-nostats', '-loglevel', 'error',
'-i',
2023-10-10 15:16:59 +00:00
timeline.replace('.kdenlive', ext),
timeline.replace('.kdenlive', '.mp4')
]
subprocess.call(cmd)
os.unlink(timeline.replace('.kdenlive', ext))
fragment_prefix = Path(fragment_prefix)
cmds = []
for src, out1, out2 in (
2023-10-19 12:10:23 +00:00
("audio-A1.wav", "fl.wav", "fr.wav"),
("audio-A2.wav", "fc.wav", "lfe.wav"),
("audio-A3.wav", "bl.wav", "br.wav"),
):
cmds.append([
2023-10-19 12:10:23 +00:00
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / src,
"-filter_complex",
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
"-map", "[left]", fragment_prefix / out1,
"-map", "[right]", fragment_prefix / out2,
])
2023-10-19 12:10:23 +00:00
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "fl.wav",
"-i", fragment_prefix / "fr.wav",
"-i", fragment_prefix / "fc.wav",
"-i", fragment_prefix / "lfe.wav",
"-i", fragment_prefix / "bl.wav",
"-i", fragment_prefix / "br.wav",
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]", "-c:a", "aac", fragment_prefix / "audio-5.1.mp4"
])
2023-10-28 09:24:51 +00:00
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "back.mp4",
"-i", fragment_prefix / "audio-5.1.mp4",
"-c", "copy",
fragment_prefix / "back-5.1.mp4",
])
for cmd in cmds:
2023-10-19 12:10:23 +00:00
#print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd)
2023-10-28 09:24:51 +00:00
shutil.move(fragment_prefix / "back-5.1.mp4", fragment_prefix / "back.mp4")
2023-10-19 12:10:23 +00:00
for fn in (
2023-10-28 09:24:51 +00:00
"audio-5.1.mp4", "fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
2023-10-19 12:10:23 +00:00
):
fn = fragment_prefix / fn
if os.path.exists(fn):
os.unlink(fn)
print("Duration - Target: %s Actual: %s" % (target_position, position))
with open(_cache, "w") as fd:
json.dump(_CACHE, fd)