pandora_t_for_time/render.py

714 lines
25 KiB
Python

#!/usr/bin/python3
from collections import defaultdict
from glob import glob
import json
import os
import re
import shutil
import subprocess
import sys
import time
from pathlib import Path
import ox
from .pi import random
from .render_kdenlive import KDEnliveProject, _CACHE
def random_int(seq, length):
n = n_ = length - 1
#print('len', n)
if n == 0:
return n
r = seq() / 9 * 10
base = 10
while n > 10:
n /= 10
r += seq() / 9 * 10
base += 10
r = int(round(n_ * r / base))
return r
def random_choice(seq, items, pop=False):
n = random_int(seq, len(items))
if pop:
return items.pop(n)
return items[n]
def chance(seq, chance):
return (seq() / 10) < chance
def get_clip_by_seqid(clips, seqid):
selected = None
for i, clip in enumerate(clips):
if clip['seqid'] == seqid:
selected = i
break
if selected is not None:
return clips.pop(i)
return None
def write_if_new(path, data, mode=''):
read_mode = 'r' + mode
write_mode = 'w' + mode
if os.path.exists(path):
with open(path, read_mode) as fd:
old = fd.read()
else:
old = ""
is_new = data != old
if path.endswith(".kdenlive"):
is_new = re.sub('\{.{36}\}', '', data) != re.sub('\{.{36}\}', '', old)
if is_new:
with open(path, write_mode) as fd:
fd.write(data)
def compose(clips, target=150, base=1024, voice_over=None):
length = 0
scene = {
'front': {
'V1': [],
'V2': [],
},
'back': {
'V1': [],
'V2': [],
},
'audio-back': {
'A1': [],
},
'audio-center': {
'A1': [],
},
'audio-front': {
'A1': [],
'A2': [],
'A3': [],
'A4': [],
},
'audio-rear': {
'A1': [],
'A2': [],
'A3': [],
'A4': [],
},
}
all_clips = clips.copy()
seq = random(10000 + base * 1000)
used = []
voice_overs = []
if voice_over:
vo_keys = list(sorted(voice_over))
if chance(seq, 0.5):
vo_key = vo_keys[random_int(seq, len(vo_keys))]
voice_overs.append(voice_over[vo_key])
elif len(vo_keys) >= 2:
vo1 = vo_keys.pop(random_int(seq, len(vo_keys)))
vo2 = vo_keys.pop(random_int(seq, len(vo_keys)))
voice_overs.append(voice_over[vo1])
if voice_over[vo1]["duration"] + voice_over[vo2]["duration"] < target:
print("adding second vo")
voice_overs.append(voice_over[vo2])
print("vo:", [x['src'] for x in voice_overs], list(sorted(voice_over)))
vo_min = sum([vo['duration'] for vo in voice_overs])
sub_offset = 0
if vo_min > target:
target = vo_min
elif vo_min < target:
offset = (target - vo_min) / 2
scene['audio-center']['A1'].append({
'blank': True,
'duration': offset
})
scene['audio-rear']['A1'].append({
'blank': True,
'duration': offset
})
vo_min += offset
sub_offset = offset
subs = []
for vo in voice_overs:
voc = vo.copy()
a, b = '-11', '-3'
if 'Whispered' in voc['src']:
a, b = '-8', '0'
elif 'Read' in voc['src']:
a, b = '-7.75', '0.25'
elif 'Free' in voc['src']:
a, b = '-8.8', '-0.8'
elif 'Ashley' in voc['src']:
a, b = '-9.5', '-1.50'
elif 'Melody' in voc['src']:
a, b = '-5.75', '-0.75'
voc['filter'] = {'volume': a}
scene['audio-center']['A1'].append(voc)
vo_low = vo.copy()
vo_low['filter'] = {'volume': b}
scene['audio-rear']['A1'].append(vo_low)
for sub in voc.get("subs", []):
sub = sub.copy()
sub["in"] += sub_offset
sub["out"] += sub_offset
subs.append(sub)
sub_offset += voc["duration"]
if subs:
scene["subtitles"] = subs
clip = None
while target - length > 0 and clips:
# coin flip which site is visible (50% chance)
if length:
remaining = target - length
remaining = remaining * 1.05 # allow for max of 10% over time
clips_ = [c for c in clips if c['duration'] <= remaining]
if clips_:
clips = clips_
if clip:
if chance(seq, 0.5):
next_seqid = clip['seqid'] + 1
clip = get_clip_by_seqid(clips, next_seqid)
else:
clip = None
if not clip:
clip = random_choice(seq, clips, True)
if not clips:
print("not enough clips, need to reset")
clips = [c for c in all_clips if c != clip and c not in used]
if not clips:
print("not enough clips, also consider used")
clips = [c for c in all_clips if c != clip]
if not clips:
print("not enough clips, also consider last clip")
clips = all_clips.copy()
if length + clip['duration'] > target and length >= vo_min:
break
print('%06.3f %06.3f' % (length, clip['duration']), os.path.basename(clip['original']))
length += clip['duration']
if "foreground" not in clip and "animation" in clip:
fg = clip['animation']
transparancy = 1
else:
fg = clip['foreground']
if 'animation' in clip and chance(seq, 0.15):
fg = clip['animation']
transparancy = 1
else:
if 'foreground2' in clip:
if 'foreground3' in clip:
n = seq()
if n <= 3: # 0,1,2,3
clip['foreground']
elif n <= 6: # 4,5,6
clip['foreground2']
else: # 7,8,9
clip['foreground3']
elif chance(seq, 0.5):
fg = clip['foreground2']
transparancy = seq() / 9
transparancy = 1
if 'foley' in clip:
foley = clip['foley']
else:
foley = fg
scene['front']['V2'].append({
'duration': clip['duration'],
'src': fg,
"filter": {
'transparency': transparancy,
}
})
transparency = seq() / 9
# 50% of time no transparancy of foregroudnd layer
# 50% some transparancy, 25%, 50%, 75% levels of transparancy
transparancy = 1
# coin flip which site is visible (50% chance)
#if chance(seq, 0.5):
if chance(seq, 0.8):
transparency_front = transparency
transparency_back = 0
else:
transparency_back = random_choice(seq, [0.25, 0.5, 0.75, 1])
transparency_front = 0
transparency_original = seq() / 9
transparency_original = 1
if "background" in clip:
scene['front']['V1'].append({
'duration': clip['duration'],
'src': clip['background'],
"filter": {
'transparency': transparency_front
}
})
scene['back']['V2'].append({
'duration': clip['duration'],
'src': clip['background'],
"filter": {
'transparency': transparency_back
}
})
else:
scene['front']['V1'].append({
'duration': clip['duration'],
'src': clip['animation'],
"filter": {
'transparency': 0,
}
})
scene['back']['V2'].append({
'duration': clip['duration'],
'src': clip['original'],
"filter": {
'transparency': 0,
}
})
scene['back']['V1'].append({
'duration': clip['duration'],
'src': clip['original'],
"filter": {
'transparency': transparency_original,
}
})
# 50 % chance to blur original from 0 to 30
if chance(seq, 0.5):
blur = seq() * 3
if blur:
scene['back']['V1'][-1]['filter']['blur'] = blur
scene['audio-back']['A1'].append({
'duration': clip['duration'],
'src': clip['original'],
'filter': {'volume': '-8.2'},
})
# TBD: Foley
cf_volume = '-2.5'
scene['audio-front']['A2'].append({
'duration': clip['duration'],
'src': foley,
'filter': {'volume': cf_volume},
})
scene['audio-rear']['A2'].append({
'duration': clip['duration'],
'src': foley,
'filter': {'volume': cf_volume},
})
used.append(clip)
print("scene duration %0.3f (target: %0.3f, vo_min: %0.3f)" % (length, target, vo_min))
return scene, used
def get_scene_duration(scene):
duration = 0
for key, value in scene.items():
for name, clips in value.items():
for clip in clips:
duration += clip['duration']
return duration
def get_offset_duration(prefix):
duration = 0
for root, folders, files in os.walk(prefix):
for f in files:
if f == 'scene.json':
path = os.path.join(root, f)
scene = json.load(open(path))
duration += get_scene_duration(scene)
return duration
def render(root, scene, prefix=''):
fps = 24
files = []
scene_duration = int(get_scene_duration(scene) * 24)
for timeline, data in scene.items():
if timeline == "subtitles":
path = os.path.join(root, prefix + "front.srt")
data = fix_overlaps(data)
srt = ox.srt.encode(data)
write_if_new(path, srt, 'b')
continue
#print(timeline)
project = KDEnliveProject(root)
tracks = []
track_durations = {}
for track, clips in data.items():
#print(track)
for clip in clips:
project.append_clip(track, clip)
track_durations[track] = int(sum([c['duration'] for c in clips]) * 24)
if timeline.startswith('audio-'):
track_duration = project.get_duration()
delta = scene_duration - track_duration
if delta > 0:
for track in track_durations:
if track_durations[track] == track_duration:
project.append_clip(track, {'blank': True, "duration": delta/24})
break
path = os.path.join(root, prefix + "%s.kdenlive" % timeline)
project_xml = project.to_xml()
write_if_new(path, project_xml)
files.append(path)
return files
def get_fragments(clips, voice_over, prefix):
import itemlist.models
import item.models
fragments = []
for l in itemlist.models.List.objects.filter(status='featured').order_by('name'):
if l.name.split(' ')[0].isdigit():
fragment = {
'name': l.name,
'tags': [],
'anti-tags': [],
'description': l.description
}
for con in l.query['conditions']:
if "conditions" in con:
for sub in con["conditions"]:
if sub['key'] == "tags" and sub['operator'] == '==':
fragment['tags'].append(sub['value'])
elif sub['key'] == "tags" and sub['operator'] == '!=':
fragment['tags'].append(sub['value'])
else:
print(l.name, 'unknown sub condition', sub)
elif con.get('key') == "tags" and con['operator'] == '==':
fragment['tags'].append(con['value'])
elif con.get('key') == "tags" and con['operator'] == '!=':
fragment['anti-tags'].append(con['value'])
fragment["id"] = int(fragment['name'].split(' ')[0])
originals = []
for i in l.get_items(l.user):
orig = i.files.filter(selected=True).first()
if orig:
ext = os.path.splitext(orig.data.path)[1]
type_ = i.data['type'][0].lower()
target = os.path.join(prefix, type_, i.data['title'] + ext)
originals.append(target)
fragment['clips'] = []
for clip in clips:
#if set(clip['tags']) & set(fragment['tags']) and not set(clip['tags']) & set(fragment['anti-tags']):
if clip['original'] in originals:
fragment['clips'].append(clip)
fragment["voice_over"] = voice_over.get(str(fragment["id"]), {})
fragments.append(fragment)
fragments.sort(key=lambda f: ox.sort_string(f['name']))
return fragments
def render_all(options):
prefix = options['prefix']
duration = int(options['duration'])
base = int(options['offset'])
_cache = os.path.join(prefix, "cache.json")
if os.path.exists(_cache):
with open(_cache) as fd:
_CACHE.update(json.load(fd))
with open(os.path.join(prefix, "clips.json")) as fd:
clips = json.load(fd)
with open(os.path.join(prefix, "voice_over.json")) as fd:
voice_over = json.load(fd)
fragments = get_fragments(clips, voice_over, prefix)
with open(os.path.join(prefix, "fragments.json"), "w") as fd:
json.dump(fragments, fd, indent=2, ensure_ascii=False)
position = target_position = 0
target = fragment_target = duration / len(fragments)
base_prefix = os.path.join(prefix, 'render', str(base))
clips_used = []
stats = defaultdict(lambda: 0)
fragment_base = base
for fragment in fragments:
fragment_base += 1
fragment_id = int(fragment['name'].split(' ')[0])
name = fragment['name'].replace(' ', '_')
if fragment_id < 10:
name = '0' + name
if not fragment['clips']:
print("skipping empty fragment", name)
continue
fragment_prefix = os.path.join(base_prefix, name)
os.makedirs(fragment_prefix, exist_ok=True)
fragment_clips = fragment['clips']
unused_fragment_clips = [c for c in fragment_clips if c not in clips_used]
print('fragment clips', len(fragment_clips), 'unused', len(unused_fragment_clips))
scene, used = compose(unused_fragment_clips, target=target, base=fragment_base, voice_over=fragment['voice_over'])
clips_used += used
scene_duration = get_scene_duration(scene)
print("%s %6.3f -> %6.3f (%6.3f)" % (name, target, scene_duration, fragment_target))
src = [a for a in scene['audio-rear']['A1'] if 'src' in a][0]['src']
stats[src.split('/')[-2]] += 1
position += scene_duration
target_position += fragment_target
if position > target_position:
target = fragment_target - (position-target_position)
print("adjusting target duration for next fragment: %6.3f -> %6.3f" % (fragment_target, target))
elif position < target_position:
target = target + 0.1 * fragment_target
timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/')
scene_json = json.dumps(scene, indent=2, ensure_ascii=False)
write_if_new(os.path.join(fragment_prefix, 'scene.json'), scene_json)
if not options['no_video']:
for timeline in timelines:
print(timeline)
ext = '.mp4'
if '/audio' in timeline:
ext = '.wav'
cmd = [
'xvfb-run', '-a',
'melt', timeline,
'-quiet',
'-consumer', 'avformat:%s' % timeline.replace('.kdenlive', ext),
]
if ext == '.wav':
cmd += ['vn=1']
else:
#if not timeline.endswith("back.kdenlive"):
cmd += ['an=1']
cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
subprocess.call(cmd)
if ext == '.wav' and timeline.endswith('audio.kdenlive'):
cmd = [
'ffmpeg', '-y',
'-nostats', '-loglevel', 'error',
'-i',
timeline.replace('.kdenlive', ext),
timeline.replace('.kdenlive', '.mp4')
]
subprocess.call(cmd)
os.unlink(timeline.replace('.kdenlive', ext))
fragment_prefix = Path(fragment_prefix)
cmds = []
for src, out1, out2 in (
("audio-front.wav", "fl.wav", "fr.wav"),
("audio-center.wav", "fc.wav", "lfe.wav"),
("audio-rear.wav", "bl.wav", "br.wav"),
):
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / src,
"-filter_complex",
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
"-map", "[left]", fragment_prefix / out1,
"-map", "[right]", fragment_prefix / out2,
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "fl.wav",
"-i", fragment_prefix / "fr.wav",
"-i", fragment_prefix / "fc.wav",
"-i", fragment_prefix / "lfe.wav",
"-i", fragment_prefix / "bl.wav",
"-i", fragment_prefix / "br.wav",
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]", "-c:a", "aac", fragment_prefix / "audio-5.1.mp4"
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "front.mp4",
"-i", fragment_prefix / "audio-5.1.mp4",
"-c", "copy",
fragment_prefix / "front-5.1.mp4",
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", fragment_prefix / "back.mp4",
"-i", fragment_prefix / "audio-back.wav",
"-c:v", "copy",
fragment_prefix / "back-audio.mp4",
])
for cmd in cmds:
#print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd)
for a, b in (
("back-audio.mp4", "back.mp4"),
("front-5.1.mp4", "back.mp4"),
):
duration_a = ox.avinfo(str(fragment_prefix / a))['duration']
duration_b = ox.avinfo(str(fragment_prefix / b))['duration']
if duration_a != duration_b:
print('!!', duration_a, fragment_prefix / a)
print('!!', duration_b, fragment_prefix / b)
sys.exit(-1)
shutil.move(fragment_prefix / "back-audio.mp4", fragment_prefix / "back.mp4")
shutil.move(fragment_prefix / "front-5.1.mp4", fragment_prefix / "front.mp4")
for fn in (
"audio-5.1.mp4",
"audio-center.wav", "audio-rear.wav", "audio-center.wav",
"audio-front.wav", "audio-back.wav", "back-audio.mp4",
"fl.wav", "fr.wav", "fc.wav", "lfe.wav", "bl.wav", "br.wav",
):
fn = fragment_prefix / fn
if os.path.exists(fn):
os.unlink(fn)
print("Duration - Target: %s Actual: %s" % (target_position, position))
print(json.dumps(dict(stats), sort_keys=True, indent=2))
with open(_cache, "w") as fd:
json.dump(_CACHE, fd)
def add_translations(sub, lang):
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if sub.languages:
value = ox.strip_tags(value)
if lang:
for slang in lang:
if slang == "en":
slang = None
for tsub in sub.item.annotations.filter(layer="subtitles", start=sub.start, end=sub.end, languages=slang):
tvalue = tsub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if tsub.languages:
tvalue = ox.strip_tags(tvalue)
value += '\n' + tvalue
return value
def get_srt(sub, offset=0, lang=None):
sdata = sub.json(keys=['in', 'out', 'value'])
sdata['value'] = sdata['value'].replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
if lang:
sdata['value'] = add_translations(sub, lang)
if offset:
sdata["in"] += offset
sdata["out"] += offset
return sdata
def fix_overlaps(data):
previous = None
for sub in data:
if previous is None:
previous = sub
else:
if sub['in'] < previous['out']:
previous['out'] = sub['in'] - 0.001
previous = sub
return data
def update_subtitles(options):
import item.models
prefix = Path(options['prefix'])
base = int(options['offset'])
lang = options["lang"]
if lang and "," in lang:
lang = lang.split(',')
if isinstance(lang, list):
tlang = lang[1:]
lang = lang[0]
else:
tlang = None
if lang == "en":
lang = None
_cache = os.path.join(prefix, "cache.json")
if os.path.exists(_cache):
with open(_cache) as fd:
_CACHE.update(json.load(fd))
base_prefix = prefix / 'render' / str(base)
for folder in os.listdir(base_prefix):
folder = base_prefix / folder
with open(folder / "scene.json") as fd:
scene = json.load(fd)
offset = 0
subs = []
for clip in scene['audio-center']['A1']:
if not clip.get("blank"):
batch, fragment_id = clip['src'].replace('.wav', '').split('/')[-2:]
vo = item.models.Item.objects.filter(data__batch__icontains=batch, data__title__startswith=fragment_id + '_').first()
if vo:
#print("%s => %s %s" % (clip['src'], vo, vo.get('batch')))
for sub in vo.annotations.filter(layer="subtitles").filter(languages=lang).exclude(value="").order_by("start"):
sdata = get_srt(sub, offset, tlang)
subs.append(sdata)
else:
print("could not find vo for %s" % clip['src'])
offset += clip['duration']
path = folder / "front.srt"
data = fix_overlaps(subs)
srt = ox.srt.encode(subs)
write_if_new(str(path), srt, 'b')
def update_m3u(render_prefix, exclude=[]):
files = ox.sorted_strings(glob(render_prefix + "*/*/back.mp4"))
for ex in exclude:
files = [f for f in files if not f.startswith(ex + "/")]
back_m3u = "\n".join(files)
back_m3u = back_m3u.replace(render_prefix, "")
front_m3u = back_m3u.replace("back.mp4", "front.mp4")
back_m3u_f = render_prefix + "back.m3u"
front_m3u_f = render_prefix + "front.m3u"
with open(back_m3u_f + "_", "w") as fd:
fd.write(back_m3u)
with open(front_m3u_f + "_", "w") as fd:
fd.write(front_m3u)
shutil.move(front_m3u_f + "_", front_m3u_f)
cmd = ["scp", front_m3u_f, "front:" + front_m3u_f]
subprocess.check_call(cmd)
shutil.move(back_m3u_f + "_", back_m3u_f)
def render_infinity(options):
prefix = options['prefix']
duration = int(options['duration'])
state_f = os.path.join(prefix, "infinity.json")
if os.path.exists(state_f):
with open(state_f) as fd:
state = json.load(fd)
else:
state = {
"offset": 100,
"max-items": 30,
"no_video": False,
}
for key in ("prefix", "duration"):
state[key] = options[key]
while True:
render_prefix = state["prefix"] + "/render/"
current = [
f for f in os.listdir(render_prefix)
if f.isdigit() and os.path.isdir(render_prefix + f) and state["offset"] > int(f) >= 100
]
if len(current) > state["max-items"]:
current = ox.sorted_strings(current)
remove = current[:-state["max-items"]]
update_m3u(render_prefix, exclude=remove)
for folder in remove:
folder = render_prefix + folder
print("remove", folder)
shutil.rmtree(folder)
cmd = ["ssh", "front", "rm", "-rf", folder]
#print(cmd)
subprocess.check_call(cmd)
render_all(state)
path = "%s%s/" % (render_prefix, state["offset"])
cmd = ['rsync', '-a', path, "front:" + path]
subprocess.check_call(cmd)
update_m3u(render_prefix)
state["offset"] += 1
with open(state_f + "~", "w") as fd:
json.dump(state, fd, indent=2)
shutil.move(state_f + "~", state_f)