diff --git a/management/commands/generate_clips.py b/management/commands/generate_clips.py index 23bc61c..e2ecca9 100644 --- a/management/commands/generate_clips.py +++ b/management/commands/generate_clips.py @@ -23,6 +23,9 @@ def resolve_roman(s): return s.replace(extra, new) return s +def format_duration(duration, fps): + return float('%0.5f' % (round(duration * fps) / fps)) + class Command(BaseCommand): help = 'generate symlinks to clips and clips.json' @@ -68,6 +71,10 @@ class Command(BaseCommand): if not clip["duration"]: print('!!', durations, clip) continue + cd = format_duration(clip["duration"], 24) + #if cd != clip["duration"]: + # print(clip["duration"], '->', cd, durations, clip) + clip["duration"] = cd clip['tags'] = i.data.get('tags', []) clip['editingtags'] = i.data.get('editingtags', []) name = os.path.basename(clip['original']) @@ -117,7 +124,7 @@ class Command(BaseCommand): subs.append(sdata) voice_over[fragment_id][batch] = { "src": target, - "duration": source.duration, + "duration": format_duration(source.duration, 24), "subs": subs } with open(os.path.join(prefix, 'voice_over.json'), 'w') as fd: diff --git a/render.py b/render.py index 32d43a7..36518bf 100644 --- a/render.py +++ b/render.py @@ -11,8 +11,10 @@ import time from pathlib import Path import ox +import lxml.etree + from .pi import random -from .render_kdenlive import KDEnliveProject, _CACHE +from .render_kdenlive import KDEnliveProject, _CACHE, melt_xml, get_melt def random_int(seq, length): @@ -64,6 +66,8 @@ def write_if_new(path, data, mode=''): with open(path, write_mode) as fd: fd.write(data) +def format_duration(duration, fps): + return float('%0.5f' % (round(duration * fps) / fps)) def compose(clips, target=150, base=1024, voice_over=None): fps = 24 @@ -120,7 +124,7 @@ def compose(clips, target=150, base=1024, voice_over=None): if vo_min > target: target = vo_min elif vo_min < target: - offset = int(((target - vo_min) / 2) * fps) / fps + offset = format_duration((target - vo_min) / 2, fps) scene['audio-center']['A1'].append({ 'blank': True, 'duration': offset @@ -188,7 +192,7 @@ def compose(clips, target=150, base=1024, voice_over=None): if length + clip['duration'] > target and length >= vo_min: break print('%06.3f %06.3f' % (length, clip['duration']), os.path.basename(clip['original'])) - length += clip['duration'] + length += int(clip['duration'] * fps) / fps if "foreground" not in clip and "animation" in clip: fg = clip['animation'] @@ -300,8 +304,11 @@ def compose(clips, target=150, base=1024, voice_over=None): }) used.append(clip) print("scene duration %0.3f (target: %0.3f, vo_min: %0.3f)" % (length, target, vo_min)) - if sub_offset < length: - delta = length - sub_offset + scene_duration = int(get_scene_duration(scene) * fps) + sub_offset = int(sub_offset * fps) + if sub_offset < scene_duration: + delta = format_duration((scene_duration - sub_offset) / fps, fps) + print(">> add %0.3f of silence.. %0.3f (scene_duration)" % (delta, scene_duration / fps)) scene['audio-center']['A1'].append({ 'blank': True, 'duration': delta @@ -310,8 +317,24 @@ def compose(clips, target=150, base=1024, voice_over=None): 'blank': True, 'duration': delta }) + elif sub_offset > scene_duration: + delta = format_duration((scene_duration - sub_offset) / fps, fps) + scene['audio-center']['A1'][-1]["duration"] += delta + scene['audio-rear']['A1'][-1]["duration"] += delta + print("WTF, needed to cut %s new duration: %s" % (delta, scene['audio-center']['A1'][-1]["duration"])) + print(scene['audio-center']['A1'][-1]) return scene, used +def get_track_duration(scene, k, n): + duration = 0 + for key, value in scene.items(): + if key == k: + for name, clips in value.items(): + if name == n: + for clip in clips: + duration += int(clip['duration'] * 24) + return duration / 24 + def get_scene_duration(scene): if isinstance(scene, str): with open(scene) as fd: @@ -320,8 +343,8 @@ def get_scene_duration(scene): for key, value in scene.items(): for name, clips in value.items(): for clip in clips: - duration += clip['duration'] - return duration + duration += int(clip['duration'] * 24) + return duration / 24 def get_offset_duration(prefix): duration = 0 @@ -331,7 +354,8 @@ def get_offset_duration(prefix): duration += get_scene_duration(scene) return duration -def render(root, scene, prefix=''): +def render(root, scene, prefix='', options=None): + if options is None: options = {} fps = 24 files = [] scene_duration = int(get_scene_duration(scene) * fps) @@ -351,7 +375,7 @@ def render(root, scene, prefix=''): #print(track) for clip in clips: project.append_clip(track, clip) - track_durations[track] = int(sum([c['duration'] for c in clips]) * fps) + track_durations[track] = sum([int(c['duration'] * fps) for c in clips]) if timeline.startswith('audio-'): track_duration = project.get_duration() delta = scene_duration - track_duration @@ -359,13 +383,34 @@ def render(root, scene, prefix=''): for track in track_durations: if track_durations[track] == track_duration: project.append_clip(track, {'blank': True, "duration": delta/fps}) - break + path = os.path.join(root, prefix + "%s.kdenlive" % timeline) project_xml = project.to_xml() write_if_new(path, project_xml) + + if options["debug"]: + # check duration + out_duration = get_project_duration(path) + p_duration = project.get_duration() + print(path, 'out: %s, project: %s, scene: %s' %(out_duration, p_duration, scene_duration)) + if p_duration != scene_duration: + print(path, 'FAIL project: %s, scene: %s' %(p_duration, scene_duration)) + _cache = os.path.join(root, "cache.json") + with open(_cache, "w") as fd: + json.dump(_CACHE, fd) + sys.exit(1) + if out_duration != p_duration: + print(path, 'fail got: %s expected: %s' %(out_duration, p_duration)) + sys.exit(1) + files.append(path) return files +def get_project_duration(file): + out = melt_xml(file) + chain = lxml.etree.fromstring(out).xpath('producer')[0] + duration = int(chain.attrib['out']) + 1 + return duration def get_fragments(clips, voice_over, prefix): import itemlist.models @@ -415,8 +460,6 @@ def get_fragments(clips, voice_over, prefix): return fragments -def render_timeline(options): - def render_all(options): prefix = options['prefix'] duration = int(options['duration']) @@ -470,7 +513,7 @@ def render_all(options): elif position < target_position: target = target + 0.1 * fragment_target - timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/') + timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/', options) scene_json = json.dumps(scene, indent=2, ensure_ascii=False) write_if_new(os.path.join(fragment_prefix, 'scene.json'), scene_json) @@ -481,9 +524,8 @@ def render_all(options): ext = '.mp4' if '/audio' in timeline: ext = '.wav' - cmd = [ - 'xvfb-run', '-a', - 'melt', timeline, + cmd = get_melt() + [ + timeline, '-quiet', '-consumer', 'avformat:%s' % timeline.replace('.kdenlive', ext), ] @@ -596,10 +638,7 @@ def render_all(options): if '/audio' in timelines[0]: ext = '.wav' out = base_prefix / (timeline + ext) - cmd = [ - 'xvfb-run', '-a', - 'melt' - ] + timelines + [ + cmd = get_melt() + timelines + [ '-quiet', '-consumer', 'avformat:%s' % out, ] diff --git a/render_kdenlive.py b/render_kdenlive.py index 2431500..cdf755b 100644 --- a/render_kdenlive.py +++ b/render_kdenlive.py @@ -4,6 +4,7 @@ import subprocess import lxml.etree import uuid import os +import sys _CACHE = {} _IDS = defaultdict(int) @@ -12,6 +13,14 @@ def get_propery(element, name): return element.xpath('property[@name="%s"]' % name)[0].text +def get_melt(): + cmd = ['melt'] + if 'XDG_RUNTIME_DIR' not in os.environ: + os.environ['XDG_RUNTIME_DIR'] = '/tmp/runtime-pandora' + if 'DISPLAY' not in os.environ: + cmd = ['xvfb-run', '-a'] + cmd + return cmd + def melt_xml(file): out = None real_path = os.path.realpath(file) @@ -20,7 +29,8 @@ def melt_xml(file): if os.stat(real_path).st_mtime != ts: out = None if not out: - out = subprocess.check_output(['melt', file, '-consumer', 'xml']).decode() + cmd = get_melt() + [file, '-consumer', 'xml'] + out = subprocess.check_output(cmd).decode() _CACHE[file] = [os.stat(real_path).st_mtime, out] return out