pandora_t_for_time/render.py

403 lines
14 KiB
Python

#!/usr/bin/python3
import json
import os
import subprocess
import sys
import time
import shutil
from pathlib import Path
import ox
from .pi import random
from .render_kdenlive import KDEnliveProject, _CACHE
def random_choice(seq, items, pop=False):
n = n_ = len(items) - 1
#print('len', n)
if n == 0:
if pop:
return items.pop(n)
return items[n]
r = seq()
base = 10
while n > 10:
n /= 10
#print(r)
r += seq()
base += 10
r = int(n_ * r / base)
#print('result', r, items)
if pop:
return items.pop(r)
return items[r]
def chance(seq, chance):
return (seq() / 10) >= chance
def get_clip_by_seqid(clips, seqid):
selected = None
for i, clip in enumerate(clips):
if clip['seqid'] == seqid:
selected = i
break
if selected is not None:
return clips.pop(i)
return None
def compose(clips, target=150, base=1024, voice_over=None):
length = 0
scene = {
'front': {
'V1': [],
'V2': [],
},
'back': {
'V1': [],
'V2': [],
},
'audio': {
'A1': [],
'A2': [],
'A3': [],
'A4': [],
}
}
all_clips = clips.copy()
seq = random(base)
voice_overs = []
if voice_over:
vo_keys = list(voice_over)
if chance(seq, 0.5):
voice_overs.append(voice_over[vo_keys[chance(seq, len(vo_keys))]])
elif len(vo_keys) >= 2:
vo1 = vo_keys.pop(chance(seq, len(vo_keys)))
vo2 = vo_keys.pop(chance(seq, len(vo_keys)))
voice_overs.append(voice_over[vo1])
if voice_over[vo1]["duration"] + voice_over[vo2]["duration"] < target:
print("adding second vo")
voice_overs.append(voice_over[vo2])
vo_min = sum([vo['duration'] for vo in voice_overs])
if vo_min > target:
target = vo_min
elif vo_min < target:
offset = (target - vo_min) / 2
scene['audio']['A3'].append({
'blank': True,
'duration': offset
})
vo_min += offset
for vo in voice_overs:
scene['audio']['A3'].append(vo)
clip = None
while target - length > 0 and clips:
# coin flip which site is visible (50% chance)
if length:
remaining = target - length
remaining = remaining * 1.05 # allow for max of 10% over time
clips = [c for c in clips if c['duration'] <= remaining]
if not clips:
break
if clip:
if chance(seq, 0.5):
next_seqid = clip['seqid'] + 1
clip = get_clip_by_seqid(clips, next_seqid)
else:
clip = None
if not clip:
clip = random_choice(seq, clips, True)
if not clips:
clips = [c for c in all_clips if c != clip]
if not clips:
clips = all_clips.copy()
if length + clip['duration'] > target and length >= vo_min:
break
print('%06.3f %06.3f' % (length, clip['duration']), os.path.basename(clip['original']))
length += clip['duration']
if "foreground" not in clip and "animation" in clip:
fg = clip['animation']
transparancy = 1
else:
fg = clip['foreground']
if 'foreground2' in clip:
if 'foreground3' in clip:
n = seq()
if n <= 3: # 0,1,2,3
clip['foreground']
elif n <= 6: # 4,5,6
clip['foreground2']
else: # 7,8,9
clip['foreground3']
elif chance(seq, 0.5):
fg = clip['foreground2']
transparancy = seq() / 10
if 'foley' in clip:
foley = clip['foley']
else:
foley = fg
scene['front']['V2'].append({
'duration': clip['duration'],
'src': fg,
"filter": {
'transparency': transparancy,
}
})
transparency = seq() / 10
# coin flip which site is visible (50% chance)
if chance(seq, 0.5):
transparency_front = transparency
transparency_back = 0
else:
transparency_back = transparency
transparency_front = 0
#transparency_original = seq() / 10
transparency_original = 1
if "background" in clip:
scene['front']['V1'].append({
'duration': clip['duration'],
'src': clip['background'],
"filter": {
'transparency': transparency_front
}
})
scene['back']['V2'].append({
'duration': clip['duration'],
'src': clip['background'],
"filter": {
'transparency': transparency_back
}
})
else:
scene['front']['V1'].append({
'duration': clip['duration'],
'src': clip['animation'],
"filter": {
'transparency': 0,
}
})
scene['back']['V2'].append({
'duration': clip['duration'],
'src': clip['original'],
"filter": {
'transparency': 0,
}
})
scene['back']['V1'].append({
'duration': clip['duration'],
'src': clip['original'],
"filter": {
'transparency': transparency_original,
}
})
# 50 % chance to blur original from 0 to 30
if chance(seq, 0.5):
blur = seq() * 3
if blur:
scene['back']['V1'][-1]['filter']['blur'] = blur
scene['audio']['A1'].append({
'duration': clip['duration'],
'src': clip['original'],
})
# TBD: Foley
scene['audio']['A2'].append({
'duration': clip['duration'],
'src': foley,
})
print("scene duration %0.3f (target: %0.3f, vo_min: %0.3f)" % (length, target, vo_min))
return scene
def get_scene_duration(scene):
duration = 0
for key, value in scene.items():
for name, clips in value.items():
for clip in clips:
duration += clip['duration']
return duration
def render(root, scene, prefix=''):
fps = 24
files = []
for timeline, data in scene.items():
#print(timeline)
project = KDEnliveProject(root)
tracks = []
for track, clips in data.items():
#print(track)
for clip in clips:
project.append_clip(track, clip)
path = os.path.join(root, prefix + "%s.kdenlive" % timeline)
with open(path, 'w') as fd:
fd.write(project.to_xml())
files.append(path)
if timeline == "audio":
duration = project.get_duration()
for track, clips in data.items():
project = KDEnliveProject(root)
for clip in clips:
project.append_clip(track, clip)
track_duration = project.get_duration()
delta = duration - track_duration
if delta > 0:
project.append_clip(track, {'blank': True, "duration": delta/24})
path = os.path.join(root, prefix + "%s-%s.kdenlive" % (timeline, track))
with open(path, 'w') as fd:
fd.write(project.to_xml())
files.append(path)
return files
def get_fragments(clips, voice_over):
import itemlist.models
import item.models
from collections import defaultdict
fragments = []
for l in itemlist.models.List.objects.filter(status='featured').order_by('name'):
if l.name.split(' ')[0].isdigit():
fragment = {
'name': l.name,
'tags': [t['value'] for t in l.query['conditions'][1]['conditions']],
'description': l.description
}
fragment["id"] = int(fragment['name'].split(' ')[0])
fragment['clips'] = []
for clip in clips:
if set(clip['tags']) & set(fragment['tags']):
fragment['clips'].append(clip)
fragment["voice_over"] = voice_over.get(str(fragment["id"]), {})
fragments.append(fragment)
fragments.sort(key=lambda f: ox.sort_string(f['name']))
return fragments
def render_all(options):
prefix = options['prefix']
duration = int(options['duration'])
base = int(options['offset'])
_cache = os.path.join(prefix, "cache.json")
if os.path.exists(_cache):
with open(_cache) as fd:
_CACHE.update(json.load(fd))
with open(os.path.join(prefix, "clips.json")) as fd:
clips = json.load(fd)
with open(os.path.join(prefix, "voice_over.json")) as fd:
voice_over = json.load(fd)
fragments = get_fragments(clips, voice_over)
with open(os.path.join(prefix, "fragments.json"), "w") as fd:
json.dump(fragments, fd, indent=2, ensure_ascii=False)
position = target_position = 0
target = fragment_target = duration / len(fragments)
base_prefix = os.path.join(prefix, 'render', str(base))
for fragment in fragments:
fragment_id = int(fragment['name'].split(' ')[0])
name = fragment['name'].replace(' ', '_')
if fragment_id < 10:
name = '0' + name
if not fragment['clips']:
print("skipping empty fragment", name)
continue
fragment_prefix = os.path.join(base_prefix, name)
os.makedirs(fragment_prefix, exist_ok=True)
scene = compose(fragment['clips'], target=target, base=base, voice_over=fragment['voice_over'])
scene_duration = get_scene_duration(scene)
print("%s %6.3f -> %6.3f (%6.3f)" % (name, target, scene_duration, fragment_target))
position += scene_duration
target_position += fragment_target
if position > target_position:
target = fragment_target - (position-target_position)
print("adjusting target duration for next fragment: %6.3f -> %6.3f" % (fragment_target, target))
elif position < target_position:
target = target + 0.1 * fragment_target
timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/')
with open(os.path.join(fragment_prefix, 'scene.json'), 'w') as fd:
json.dump(scene, fd, indent=2, ensure_ascii=False)
if not options['no_video']:
for timeline in timelines:
print(timeline)
ext = '.mp4'
if '/audio' in timeline:
ext = '.wav'
cmd = [
'xvfb-run', '-a',
'melt', timeline,
'-quiet',
'-consumer', 'avformat:%s' % timeline.replace('.kdenlive', ext),
]
if ext == '.wav':
cmd += ['vn=1']
else:
cmd += ['an=1', 'vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
subprocess.call(cmd)
if ext == '.wav' and timeline.endswith('audio.kdenlive'):
cmd = [
'ffmpeg', '-y',
'-nostats', '-loglevel', 'error',
'-i',
timeline.replace('.kdenlive', ext),
timeline.replace('.kdenlive', '.mp4')
]
subprocess.call(cmd)
os.unlink(timeline.replace('.kdenlive', ext))
fragment_prefix = Path(fragment_prefix)
cmds = []
for src, out1, out2 in (
("audio-A1.wav", "fl.wav", "fr.wav"),
("audio-A2.wav", "fc.wav", "lfe.wav"),
("audio-A3.wav", "bl.wav", "br.wav"),
):
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / src,
"-filter_complex",
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
"-map", "[left]", fragment_prefix / out1,
"-map", "[right]", fragment_prefix / out2,
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "fl.wav",
"-i", fragment_prefix / "fr.wav",
"-i", fragment_prefix / "fc.wav",
"-i", fragment_prefix / "lfe.wav",
"-i", fragment_prefix / "bl.wav",
"-i", fragment_prefix / "br.wav",
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]", "-c:a", "aac", fragment_prefix / "audio-5.1.mp4"
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "back.mp4",
"-i", fragment_prefix / "audio-5.1.mp4",
"-c", "copy",
fragment_prefix / "back-5.1.mp4",
])
for cmd in cmds:
#print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd)
shutil.move(fragment_prefix / "back-5.1.mp4", fragment_prefix / "back.mp4")
for fn in (
"audio-5.1.mp4", "fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
):
fn = fragment_prefix / fn
if os.path.exists(fn):
os.unlink(fn)
print("Duration - Target: %s Actual: %s" % (target_position, position))
with open(_cache, "w") as fd:
json.dump(_CACHE, fd)