#!/usr/bin/python3 import json import os import subprocess import sys import time import ox from .pi import random from .render_kdenlive import KDEnliveProject def random_choice(seq, items, pop=False): n = n_ = len(items) - 1 #print('len', n) if n == 0: if pop: return items.pop(n) return items[n] r = seq() base = 10 while n > 10: n /= 10 #print(r) r += seq() base += 10 r = int(n_ * r / base) #print('result', r, items) if pop: return items.pop(r) return items[r] def chance(seq, chance): return (seq() / 10) >= chance def compose(clips, target=150, base=1024): length = 0 scene = { 'front': { 'V1': [], 'V2': [], }, 'back': { 'V1': [], 'V2': [], }, 'audio': { 'A1': [], 'A2': [], 'A3': [], 'A4': [], } } all_clips = clips.copy() seq = random(base) while target - length > 0 and clips: clip = random_choice(seq, clips, True) if not clips: clips = [c for c in all_clips if c != clip] if not clips: clips = all_clips.copy() if length + clip['duration'] > target: break length += clip['duration'] fg = clip['foreground'] if 'foreground2' in clip: if chance(seq, 0.5): fg = clip['foreground2'] scene['front']['V1'].append({ 'duration': clip['duration'], 'src': fg, "filter": { 'transparency': seq() / 10, } }) transparency = seq() / 10 # coin flip which site is visible (50% chance) if chance(seq, 0.5): transparency_front = transparency transparency_back = 0 else: transparency_back = transparency transparency_front = 0 scene['front']['V2'].append({ 'duration': clip['duration'], 'src': clip['background'], "filter": { 'transparency': transparency_front } }) scene['back']['V1'].append({ 'duration': clip['duration'], 'src': clip['background'], "filter": { 'transparency': transparency_back } }) scene['back']['V2'].append({ 'duration': clip['duration'], 'src': clip['original'], "filter": { 'transparency': seq() / 10, } }) # 50 % chance to blur original from 0 to 30 if chance(seq, 0.5): blur = seq() * 3 if blur: scene['back']['V2'][-1]['filter']['blur'] = blur scene['audio']['A1'].append({ 'duration': clip['duration'], 'src': clip['original'], }) scene['audio']['A2'].append({ 'duration': clip['duration'], 'src': fg, }) return scene def get_scene_duration(scene): duration = 0 for key, value in scene.items(): for name, clips in value.items(): for clip in clips: duration += clip['duration'] return duration def render(root, scene, prefix=''): fps = 24 files = [] for timeline, data in scene.items(): #print(timeline) project = KDEnliveProject(root) tracks = [] for track, clips in data.items(): #print(track) for clip in clips: project.append_clip(track, clip) path = os.path.join(root, prefix + "%s.kdenlive" % timeline) with open(path, 'w') as fd: fd.write(project.to_xml()) files.append(path) return files def get_fragments(clips): import itemlist.models fragments = [] for l in itemlist.models.List.objects.filter(status='featured').order_by('name'): if l.name.split(' ')[0].isdigit(): fragment = { 'name': l.name, 'tags': [t['value'] for t in l.query['conditions'][1]['conditions']], 'description': l.description } fragment['clips'] = [] for clip in clips: if set(clip['tags']) & set(fragment['tags']): fragment['clips'].append(clip) fragments.append(fragment) fragments.sort(key=lambda f: ox.sort_string(f['name'])) return fragments def render_all(options): prefix = options['prefix'] duration = int(options['duration']) base = int(options['offset']) with open(os.path.join(prefix, "clips.json")) as fd: clips = json.load(fd) fragments = get_fragments(clips) position = target_position = 0 target = fragment_target = duration / len(fragments) base_prefix = os.path.join(prefix, 'render', str(base)) for fragment in fragments: n = int(fragment['name'].split(' ')[0]) name = fragment['name'].replace(' ', '_') if n < 10: name = '0' + name if not fragment['clips']: print("skipping empty fragment", name) continue fragment_prefix = os.path.join(base_prefix, name) os.makedirs(fragment_prefix, exist_ok=True) scene = compose(fragment['clips'], target=target, base=base) scene_duration = get_scene_duration(scene) print("%s %s -> %s (%s)" % (name, target, scene_duration, fragment_target)) position += scene_duration target_position += fragment_target if position > target_position: target = fragment_target - (position-target_position) print("adjusting target from", fragment_target, target) elif position < target_position: target = target + 0.1 * fragment_target timelines = render(prefix, scene, fragment_prefix[len(prefix) + 1:] + '/') with open(os.path.join(fragment_prefix, 'scene.json'), 'w') as fd: json.dump(scene, fd, indent=2, ensure_ascii=False) if not options['no_video']: for timeline in timelines: ext = '.mp4' if '-audio.kdenlive' in timeline: ext = '.wav' cmd = [ 'xvfb-run', '-a', 'melt', timeline, '-consumer', 'avformat:%s' % timeline.replace('.kdenlive', ext) ] subprocess.call(cmd) if ext == '.wav': cmd = [ 'ffmpeg', '-i', timeline.replace('.kdenlive', ext), timeline.replace('.kdenlive', '.mp4') ] subprocess.call(cmd) os.unlink(timeline.replace('.kdenlive', ext)) print("Duration - Target: %s Actual: %s" % (target_position, position))