#!/usr/bin/python from argparse import ArgumentParser import json import os import subprocess import sys import time import ox import mlt from PyQt5 import QtWidgets usage = "usage: %(prog)s [options] json" parser = ArgumentParser(usage=usage) parser.add_argument('-p', '--prefix', dest='prefix', help='version prefix', default='.') parser.add_argument('files', metavar='path', type=str, nargs='*', help='json source file') opts = parser.parse_args() if 'performance' in opts.prefix: version = 'performance' else: version = 'main' # Avoid segfault in webvfx app = QtWidgets.QApplication(sys.argv) #mlt.mlt_log_set_level(40) # verbose mlt.Factory.init() def add_color(playlist, color, duration): red = mlt.Producer(profile, 'color:' + color) red.set_in_and_out(0, duration) playlist.append(red) def add_clip(playlist, clip, in_, duration): file_ = clip['path'] if not isinstance(file_, str): file_ = file_.encode('utf-8') info = ox.avinfo(file_) tractor = mlt.Tractor(profile) tracks = tractor.multitrack() video = mlt.Playlist() c = mlt.Producer(profile, file_) c.set_in_and_out(in_, in_+duration-1) video.append(c) tracks.connect(video, 0) if not info.get('audio'): audio = mlt.Playlist() add_silence(audio, duration) tracks.connect(audio, 1) else: volume = mlt.Filter(profile, "volume") if clip.get('tag', '') == 'gong': volume.set("gain", '0.8') # Vocal Cords in Action elif clip.get('id', '').split('/')[0] in ('EBB', 'ECE', 'ECK', 'ECJ') and \ clip.get('tag', '') in ('vagina', 'voice'): volume.set("gain", '0.4') else: volume.set("gain", '0.12') tractor.plant_filter(volume) playlist.append(tractor) def add_audio_clip(playlist, file_, duration, in_=0): if not isinstance(file_, str): file_ = file_.encode('utf-8') clip = mlt.Producer(profile, file_) clip.set_in_and_out(in_, in_+duration-1) playlist.append(clip) def add_silence(playlist, length): file_ = 'silence.wav' add_audio_clip(playlist, file_, length) def add_blank(playlist, length): playlist.blank(length) def add_text(playlist, value, length): if not isinstance(value, str): value = value.encode('utf-8') text = mlt.Producer(profile, 'webvfx:text.html') text.set('transparent', 1) text.set('title', value) text.set('length', length) playlist.append(text) def mix_audio_tracks(a, b, ratio, combine=False): tractor = mlt.Tractor(profile) audio = tractor.multitrack() audio.connect(a, 0) audio.connect(b, 1) mix = mlt.Transition(profile, "mix") mix.set("start", ratio) mix.set("end", ratio) #mix.set("always_active", 1) if combine: mix.set("combine", 1) tractor.plant_transition(mix) return tractor def save_xml(track, filename): consumer = mlt.Consumer(profile, 'xml', filename) consumer.connect(track) consumer.start() # main fps = 60 profile = mlt.Profile("atsc_1080p_%d" % fps) #profile.set_explicit(1) source = opts.files[0] target = source.replace('.json', '.xml') target_audio = source.replace('.json', '.audio.xml') target_audio_wav = target_audio + '.wav' target_vocals = source.replace('.json', '.vocals.xml') target_music = source.replace('.json', '.music.xml') target_drones = source.replace('.json', '.drones.xml') target_source = source.replace('.json', '.source.xml') gongs_wav = source.replace('.json', '.gongs.wav') with open(source) as fd: data = json.load(fd) video = mlt.Playlist() overlay = mlt.Playlist() music = mlt.Playlist() vocals = mlt.Playlist() drones0 = mlt.Playlist() drones1 = mlt.Playlist() gongs = mlt.Playlist() # hide Set to 1 to hide the video (make it an audio-only track), # 2 to hide the audio (make it a video-only track), # or 3 to hide audio and video (hidden track). drones0.set("hide", 1) drones1.set("hide", 1) gongs.set("hide", 1) vocals.set("hide", 1) music.set("hide", 1) for clip in data['clips']: frames = int(clip['duration'] * fps) if not frames: continue if clip.get('black'): add_color(video, 'black', frames) else: #print(clip['duration'], clip['path']) if not os.path.exists(clip['path']): print(clip['path'], 'is missing') sys.exit(1) # fixme seconds to fps! in_ = int(clip['in'] * fps) add_clip(video, clip, in_, frames) add_color(video, 'black', 60) for clip in data['text']: frames = int(clip['duration'] * fps) if not frames: continue if clip.get('blank'): add_blank(overlay, frames) else: add_text(overlay, clip['text'], frames) for clip in data['music']: frames = int(clip['duration'] * fps) if not frames: continue if clip.get('blank'): add_silence(music, frames) else: add_audio_clip(music, clip['path'], frames) for clip in data['vocals']: frames = int(clip['duration'] * fps) if not frames: continue if clip.get('blank'): add_silence(vocals, frames) else: add_audio_clip(vocals, clip['path'], frames) for name, plist in ( ('drones0', drones0), ('drones1', drones1), ): for clip in data[name]: frames = int(clip['duration'] * fps) if clip.get('blank'): add_silence(plist, frames) else: add_audio_clip(plist, clip['path'], frames) duration = sum(clip['duration'] for clip in data['clips']) save_xml(vocals, target_vocals) video.set("hide", 1) save_xml(video, target_source) video.set("hide", 0) # mix drones drones = mix_audio_tracks(drones0, drones1, 0.5) save_xml(drones, target_drones) save_xml(music, target_music) if version == 'performance': # render gongs render_gongs = not os.path.exists(gongs_wav) #if not render_gongs: # render_gongs = os.path.getmtime(source) > os.path.getmtime(gongs_wav) if render_gongs: subprocess.call([ # offset in pi, duration, number of tracks, target './render_gongs.py', str(data['gongs']['offset']), str(duration), str(data['gongs']['tracks']), gongs_wav ]) # load gongs add_audio_clip(gongs, gongs_wav, int(duration * fps), int(5*fps)) # mix gongs + music mtractor = mix_audio_tracks(gongs, music, 0.15) else: # mix drones + music mtractor = mix_audio_tracks(drones, music, 0.3) norm = mlt.Filter(profile, "volume") # lower volume if version == 'performance': norm.set("gain", "-6dB") else: norm.set("gain", "-12dB") mtractor.plant_filter(norm) # background and vocals # vocals are on extra track now #atractor = mix_audio_tracks(vocals, mtractor, 0.4) atractor = mtractor save_xml(atractor, target_audio) ''' ''' subprocess.call([ 'qmelt', target_audio, '-consumer', 'avformat:' + target_audio_wav, ]) audiomix = mlt.Playlist() add_audio_clip(audiomix, target_audio_wav, int(duration * fps)) # mix video + audio #tractor = mix_audio_tracks(atractor, video, 0.29) # with vocals to background 0.4 -> 0.29 # with vocals as extra track 0.725 #tractor = mix_audio_tracks(audiomix, video, 0.5) tractor = mix_audio_tracks(audiomix, video, 0.6) output = mlt.Tractor(profile) output_tracks = output.multitrack() output_tracks.connect(tractor, 0) output_tracks.connect(overlay, 1) norm = mlt.Filter(profile, "volume") #norm.set("gain", "-6dB") norm.set("gain", "6dB") output.plant_filter(norm) composite = mlt.Transition(profile, "composite") #composite.set('fill', 1) output.plant_transition(composite) save_xml(output, target)