318 lines
11 KiB
Python
318 lines
11 KiB
Python
#!/usr/bin/python3
|
|
import json
|
|
import os
|
|
import subprocess
|
|
import sys
|
|
import time
|
|
from pathlib import Path
|
|
|
|
import ox
|
|
from .pi import random
|
|
from .render_kdenlive import KDEnliveProject, _CACHE
|
|
|
|
|
|
def random_choice(seq, items, pop=False):
|
|
n = n_ = len(items) - 1
|
|
#print('len', n)
|
|
if n == 0:
|
|
if pop:
|
|
return items.pop(n)
|
|
return items[n]
|
|
r = seq()
|
|
base = 10
|
|
while n > 10:
|
|
n /= 10
|
|
#print(r)
|
|
r += seq()
|
|
base += 10
|
|
r = int(n_ * r / base)
|
|
#print('result', r, items)
|
|
if pop:
|
|
return items.pop(r)
|
|
return items[r]
|
|
|
|
def chance(seq, chance):
|
|
return (seq() / 10) >= chance
|
|
|
|
|
|
def compose(clips, target=150, base=1024, voice_over=None):
|
|
length = 0
|
|
scene = {
|
|
'front': {
|
|
'V1': [],
|
|
'V2': [],
|
|
},
|
|
'back': {
|
|
'V1': [],
|
|
'V2': [],
|
|
},
|
|
'audio': {
|
|
'A1': [],
|
|
'A2': [],
|
|
'A3': [],
|
|
'A4': [],
|
|
}
|
|
}
|
|
all_clips = clips.copy()
|
|
seq = random(base)
|
|
|
|
voice_overs = []
|
|
if voice_over:
|
|
vo_keys = list(voice_over)
|
|
if chance(seq, 0.5):
|
|
voice_overs.append(voice_over[vo_keys[chance(seq, len(vo_keys))]])
|
|
elif len(vo_keys) >= 2:
|
|
vo1 = vo_keys.pop(chance(seq, len(vo_keys)))
|
|
vo2 = vo_keys.pop(chance(seq, len(vo_keys)))
|
|
voice_overs.append(voice_over[vo1])
|
|
if voice_over[vo1]["duration"] + voice_over[vo2]["duration"] < target:
|
|
voice_overs.append(voice_over[vo2])
|
|
vo_min = sum([vo['duration'] for vo in voice_overs])
|
|
if vo_min > target:
|
|
target = vo_min
|
|
if vo_min < target:
|
|
offset = (target - vo_min) / 2
|
|
scene['audio']['A3'].append({
|
|
'blank': True,
|
|
'duration': offset
|
|
})
|
|
for vo in voice_overs:
|
|
scene['audio']['A3'].append(vo)
|
|
|
|
while target - length > 0 and clips:
|
|
clip = random_choice(seq, clips, True)
|
|
if not clips:
|
|
clips = [c for c in all_clips if c != clip]
|
|
if not clips:
|
|
clips = all_clips.copy()
|
|
if length + clip['duration'] > target:
|
|
break
|
|
length += clip['duration']
|
|
|
|
fg = clip['foreground']
|
|
if 'foreground2' in clip:
|
|
if chance(seq, 0.5):
|
|
fg = clip['foreground2']
|
|
|
|
scene['front']['V1'].append({
|
|
'duration': clip['duration'],
|
|
'src': fg,
|
|
"filter": {
|
|
'transparency': seq() / 10,
|
|
}
|
|
})
|
|
|
|
transparency = seq() / 10
|
|
# coin flip which site is visible (50% chance)
|
|
if chance(seq, 0.5):
|
|
transparency_front = transparency
|
|
transparency_back = 0
|
|
else:
|
|
transparency_back = transparency
|
|
transparency_front = 0
|
|
scene['front']['V2'].append({
|
|
'duration': clip['duration'],
|
|
'src': clip['background'],
|
|
"filter": {
|
|
'transparency': transparency_front
|
|
}
|
|
})
|
|
scene['back']['V1'].append({
|
|
'duration': clip['duration'],
|
|
'src': clip['background'],
|
|
"filter": {
|
|
'transparency': transparency_back
|
|
}
|
|
})
|
|
scene['back']['V2'].append({
|
|
'duration': clip['duration'],
|
|
'src': clip['original'],
|
|
"filter": {
|
|
'transparency': seq() / 10,
|
|
}
|
|
})
|
|
# 50 % chance to blur original from 0 to 30
|
|
if chance(seq, 0.5):
|
|
blur = seq() * 3
|
|
if blur:
|
|
scene['back']['V2'][-1]['filter']['blur'] = blur
|
|
scene['audio']['A1'].append({
|
|
'duration': clip['duration'],
|
|
'src': clip['original'],
|
|
})
|
|
scene['audio']['A2'].append({
|
|
'duration': clip['duration'],
|
|
'src': fg,
|
|
})
|
|
|
|
return scene
|
|
|
|
def get_scene_duration(scene):
|
|
duration = 0
|
|
for key, value in scene.items():
|
|
for name, clips in value.items():
|
|
for clip in clips:
|
|
duration += clip['duration']
|
|
return duration
|
|
|
|
def render(root, scene, prefix=''):
|
|
fps = 24
|
|
files = []
|
|
for timeline, data in scene.items():
|
|
#print(timeline)
|
|
project = KDEnliveProject(root)
|
|
|
|
tracks = []
|
|
for track, clips in data.items():
|
|
#print(track)
|
|
for clip in clips:
|
|
project.append_clip(track, clip)
|
|
path = os.path.join(root, prefix + "%s.kdenlive" % timeline)
|
|
with open(path, 'w') as fd:
|
|
fd.write(project.to_xml())
|
|
files.append(path)
|
|
if timeline == "audio":
|
|
duration = project.get_duration()
|
|
for track, clips in data.items():
|
|
project = KDEnliveProject(root)
|
|
for clip in clips:
|
|
project.append_clip(track, clip)
|
|
track_duration = project.get_duration()
|
|
delta = duration - track_duration
|
|
if delta > 0:
|
|
project.append_clip(track, {'blank': True, "duration": delta/24})
|
|
path = os.path.join(root, prefix + "%s-%s.kdenlive" % (timeline, track))
|
|
with open(path, 'w') as fd:
|
|
fd.write(project.to_xml())
|
|
files.append(path)
|
|
return files
|
|
|
|
def get_fragments(clips, voice_over):
|
|
import itemlist.models
|
|
import item.models
|
|
from collections import defaultdict
|
|
|
|
fragments = []
|
|
|
|
for l in itemlist.models.List.objects.filter(status='featured').order_by('name'):
|
|
if l.name.split(' ')[0].isdigit():
|
|
fragment = {
|
|
'name': l.name,
|
|
'tags': [t['value'] for t in l.query['conditions'][1]['conditions']],
|
|
'description': l.description
|
|
}
|
|
fragment["id"] = int(fragment['name'].split(' ')[0])
|
|
fragment['clips'] = []
|
|
for clip in clips:
|
|
if set(clip['tags']) & set(fragment['tags']):
|
|
fragment['clips'].append(clip)
|
|
fragment["voice_over"] = voice_over.get(str(fragment["id"]), {})
|
|
fragments.append(fragment)
|
|
fragments.sort(key=lambda f: ox.sort_string(f['name']))
|
|
return fragments
|
|
|
|
|
|
def render_all(options):
|
|
prefix = options['prefix']
|
|
duration = int(options['duration'])
|
|
base = int(options['offset'])
|
|
|
|
_cache = os.path.join(prefix, "cache.json")
|
|
if os.path.exists(_cache):
|
|
with open(_cache) as fd:
|
|
_CACHE.update(json.load(fd))
|
|
|
|
with open(os.path.join(prefix, "clips.json")) as fd:
|
|
clips = json.load(fd)
|
|
with open(os.path.join(prefix, "voice_over.json")) as fd:
|
|
voice_over = json.load(fd)
|
|
fragments = get_fragments(clips, voice_over)
|
|
with open(os.path.join(prefix, "fragments.json"), "w") as fd:
|
|
json.dump(fragments, fd, indent=2, ensure_ascii=False)
|
|
position = target_position = 0
|
|
target = fragment_target = duration / len(fragments)
|
|
base_prefix = os.path.join(prefix, 'render', str(base))
|
|
for fragment in fragments:
|
|
fragment_id = int(fragment['name'].split(' ')[0])
|
|
name = fragment['name'].replace(' ', '_')
|
|
if fragment_id < 10:
|
|
name = '0' + name
|
|
if not fragment['clips']:
|
|
print("skipping empty fragment", name)
|
|
continue
|
|
fragment_prefix = os.path.join(base_prefix, name)
|
|
os.makedirs(fragment_prefix, exist_ok=True)
|
|
|
|
scene = compose(fragment['clips'], target=target, base=base, voice_over=fragment['voice_over'])
|
|
scene_duration = get_scene_duration(scene)
|
|
print("%s %s -> %s (%s)" % (name, target, scene_duration, fragment_target))
|
|
position += scene_duration
|
|
target_position += fragment_target
|
|
if position > target_position:
|
|
target = fragment_target - (position-target_position)
|
|
print("adjusting target from", fragment_target, target)
|
|
elif position < target_position:
|
|
target = target + 0.1 * fragment_target
|
|
|
|
timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/')
|
|
|
|
with open(os.path.join(fragment_prefix, 'scene.json'), 'w') as fd:
|
|
json.dump(scene, fd, indent=2, ensure_ascii=False)
|
|
|
|
if not options['no_video']:
|
|
for timeline in timelines:
|
|
print(timeline)
|
|
ext = '.mp4'
|
|
if '/audio' in timeline:
|
|
ext = '.wav'
|
|
cmd = [
|
|
'xvfb-run', '-a',
|
|
'melt', timeline,
|
|
'-consumer', 'avformat:%s' % timeline.replace('.kdenlive', ext),
|
|
'-quiet'
|
|
]
|
|
subprocess.call(cmd)
|
|
if ext == '.wav' and timeline.endswith('audio.kdenlive'):
|
|
cmd = [
|
|
'ffmpeg', '-y',
|
|
'-nostats', '-loglevel', 'error',
|
|
'-i',
|
|
timeline.replace('.kdenlive', ext),
|
|
timeline.replace('.kdenlive', '.mp4')
|
|
]
|
|
subprocess.call(cmd)
|
|
os.unlink(timeline.replace('.kdenlive', ext))
|
|
|
|
fragment_prefix = Path(fragment_prefix)
|
|
cmds = []
|
|
for src, out1, out2 in (
|
|
('audio-A1.wav', 'fl.wav', 'fr.wav'),
|
|
('audio-A2.wav', 'fc.wav', 'lfe.wav'),
|
|
('audio-A3.wav', 'bl.wav', 'br.wav'),
|
|
):
|
|
cmds.append([
|
|
'ffmpeg', '-y',
|
|
'-nostats', '-loglevel', 'error',
|
|
'-i', fragment_prefix / src,
|
|
'-filter_complex',
|
|
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
|
|
"-map", "[left]", fragment_prefix / out1,
|
|
"-map", "[right]", fragment_prefix / out2,
|
|
])
|
|
cmds.append([
|
|
'ffmpeg', '-y',
|
|
'-nostats', '-loglevel', 'error',
|
|
'-i', fragment_prefix / "fl.wav",
|
|
'-i', fragment_prefix / "fr.wav",
|
|
'-i', fragment_prefix / "fc.wav",
|
|
'-i', fragment_prefix / "lfe.wav",
|
|
'-i', fragment_prefix / "bl.wav",
|
|
'-i', fragment_prefix / "br.wav",
|
|
'-filter_complex', "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
|
|
"-map", "[a]", "-c:a", "aac", fragment_prefix / "audio-5.1.mp4"
|
|
])
|
|
for cmd in cmds:
|
|
subprocess.call(cmd)
|
|
print("Duration - Target: %s Actual: %s" % (target_position, position))
|
|
with open(_cache, "w") as fd:
|
|
json.dump(_CACHE, fd)
|