#!/usr/bin/python import os import time import sys import json import subprocess import ox import mlt from PyQt5 import QtWidgets # Avoid segfault in webvfx app = QtWidgets.QApplication(sys.argv) #mlt.mlt_log_set_level(40) # verbose mlt.Factory.init() fps = 60 profile = mlt.Profile("atsc_1080p_%d" % fps) #profile.set_explicit(1) tractor = mlt.Tractor(profile) tractor.mark_in = -1 tractor.mark_out = -1 multitrack = tractor.multitrack() source = sys.argv[1] if len(sys.argv) > 2: encode = sys.argv[2] == 'encode' else: encode = False target = source.replace('.json', '.xml') target_audio = source.replace('.json', '.audio.xml') target_vocals = source.replace('.json', '.vocals.xml') with open(source) as fd: data = json.load(fd) video = mlt.Playlist() overlay = mlt.Playlist() music = mlt.Playlist() vocals = mlt.Playlist() drones0 = mlt.Playlist() drones1 = mlt.Playlist() def add_color(playlist, color, duration): red = mlt.Producer(profile, 'color:' + color) red.set_in_and_out(0, duration) playlist.append(red) def add_clip(playlist, file_, in_, duration): if not isinstance(file_, str): file_ = file_.encode('utf-8') ''' info = ox.avinfo(file_) tractor = mlt.Tractor(profile) tracks = tractor.multitrack() video = mlt.Playlist() ''' clip = mlt.Producer(profile, file_) clip.set_in_and_out(in_, in_+duration-1) playlist.append(clip) ''' video.append(clip) tracks.connect(video, 0) if not not info.get('audio'): audio = mlt.Playlist() add_silence(audio, duration) tracks.connect(audio, 1) #tracks.set_in_and_out(in_, in_+duration-1) playlist.append(tractor) ''' def add_audio_clip(playlist, file_, duration): in_ = 0 if not isinstance(file_, str): file_ = file_.encode('utf-8') clip = mlt.Producer(profile, file_) clip.set_in_and_out(in_, in_+duration-1) playlist.append(clip) def add_silence(playlist, length): file_ = 'silence.wav' add_audio_clip(playlist, file_, length) def add_blank(playlist, length): playlist.blank(length) def add_text(playlist, value, length): if not isinstance(value, str): value = value.encode('utf-8') text = mlt.Producer(profile, 'webvfx:text.html') text.set('transparent', 1) text.set('title', value) text.set('length', length) playlist.append(text) for clip in data['clips']: frames = int(clip['duration'] * fps) if not frames: continue if clip.get('black'): # fixme seconds to fps! duration fame etc!! add_color(video, 'black', frames) else: #print(clip['duration'], clip['path']) if not os.path.exists(clip['path']): print(clip['path'], 'is missing') sys.exit(1) # fixme seconds to fps! in_ = int(clip['in'] * fps) add_clip(video, clip['path'], in_, frames) add_color(video, 'black', 60) for clip in data['text']: frames = int(clip['duration'] * fps) if not frames: continue if clip.get('blank'): add_blank(overlay, frames) else: add_text(overlay, clip['text'], frames) for clip in data['music']: frames = int(clip['duration'] * fps) if not frames: continue if clip.get('blank'): add_silence(music, frames) else: add_audio_clip(music, clip['path'], frames) for clip in data['vocals']: frames = int(clip['duration'] * fps) if not frames: continue if clip.get('blank'): add_silence(vocals, frames) else: add_audio_clip(vocals, clip['path'], frames) for name, plist in ( ('drones0', drones0), ('drones1', drones1), ): for clip in data[name]: frames = int(clip['duration'] * fps) if clip.get('blank'): add_blank(plist, frames) else: add_audio_clip(plist, clip['path'], frames) multitrack.connect(video, 0) multitrack.connect(overlay, 1) composite = mlt.Transition(profile, "composite") #composite.set('fill', 1) tractor.plant_transition(composite) volume = mlt.Filter(profile, "volume") volume.set("gain", '0.12') tractor.plant_filter(volume) def mix_audio_tracks(a, b, ratio): tractor = mlt.Tractor(profile) tractor.mark_in = -1 tractor.mark_out = -1 audio = tractor.multitrack() audio.connect(a, 0) audio.connect(b, 1) mix = mlt.Transition(profile, "mix") mix.set("start", ratio) mix.set("end", ratio) #mix.set("always_active", 1) #mix.set("combine", 1) tractor.plant_transition(mix) return tractor consumer = 'xml' consumer = mlt.Consumer(profile, consumer, target_vocals) consumer.connect(vocals) consumer.start() # mix drones drones = mix_audio_tracks(drones0, drones1, 0.5) # mix drones + music mtractor = mix_audio_tracks(drones, music, 0.3) norm = mlt.Filter(profile, "volume") norm.set("gain", "-12dB") mtractor.plant_filter(norm) # background and vocals atractor = mix_audio_tracks(vocals, mtractor, 0.4) consumer = mlt.Consumer(profile, 'xml', target_audio) consumer.connect(atractor) consumer.start() target_audio_wav = target_audio + '.wav' subprocess.call([ 'qmelt', target_audio, '-consumer', 'avformat:' + target_audio_wav, ]) audiomix = mlt.Playlist() duration = sum(clip['duration'] for clip in data['clips']) add_audio_clip(audiomix, target_audio_wav, int(duration * fps)) # mix vocals and music #atractor = mix_audio_tracks(vocals, mtractor, 0.20) # mix video + audio #dtractor = mix_audio_tracks(atractor, tractor, 0.5) dtractor = mix_audio_tracks(audiomix, tractor, 0.29) output = mlt.Tractor(profile) tractor.mark_in = -1 tractor.mark_out = -1 output_tracks = output.multitrack() output_tracks.connect(dtractor, 0) norm = mlt.Filter(profile, "volume") #norm.set("gain", "-6dB") norm.set("gain", "3dB") output.plant_filter(norm) consumer = 'xml' consumer = mlt.Consumer(profile, consumer, target) consumer.connect(output) #consumer.set("real_time", -2) consumer.start() if encode: subprocess.call(['./encode.py', target])