From 536cde43d36bb9669fd4de88a506c9b075f5fb44 Mon Sep 17 00:00:00 2001 From: j Date: Wed, 15 Mar 2017 13:00:12 +0100 Subject: [PATCH] 2 stage sound mix --- encode.py | 3 +- render.py | 19 +++++------ render_mlt.py | 87 ++++++++++++++++++++++++++++++++++++--------------- 3 files changed, 72 insertions(+), 37 deletions(-) diff --git a/encode.py b/encode.py index 10f5734..9115bc1 100755 --- a/encode.py +++ b/encode.py @@ -27,7 +27,8 @@ def is_new(xml, mp4): vtime = max( os.path.getmtime(mp4), os.path.getmtime('text.html'), - os.path.getmtime('VOCALS.json') + os.path.getmtime('DRONES.json'), + os.path.getmtime('VOCALS.json'), ) return vtime < xtime diff --git a/render.py b/render.py index 643af97..324985c 100755 --- a/render.py +++ b/render.py @@ -360,17 +360,14 @@ def sequence(seq, letter): position += add_blank(result[track], min(3.141, duration - position)) else: clip = DRONES[letter][n] - position += clip['duration'] - if result[track] and position > duration \ - and result[track][-1].get('blank') \ - and result[track][-1]['duration'] > clip['duration']: - result[track][-1]['duration'] -= (position-duration) - position = duration - if position <= duration: - result[track].append(clip) - else: - position -= clip['duration'] - break + if position + clip['duration'] < duration: + position += clip['duration'] + result[track].append(clip) + else: + c = clip.copy() + c['duration'] = duration - position + result[track].append(c) + position += c['duration'] if position < duration: position += add_blank(result[track], duration - position) diff --git a/render_mlt.py b/render_mlt.py index 239f70e..3e98aa0 100755 --- a/render_mlt.py +++ b/render_mlt.py @@ -3,6 +3,7 @@ import os import time import sys import json +import subprocess import mlt from PyQt5 import QtWidgets @@ -22,6 +23,8 @@ multitrack = tractor.multitrack() source = sys.argv[1] target = source.replace('.json', '.xml') +target_audio = source.replace('.json', '.audio.xml') +target_vocals = source.replace('.json', '.vocals.xml') with open(source) as fd: data = json.load(fd) @@ -30,6 +33,8 @@ video = mlt.Playlist() overlay = mlt.Playlist() music = mlt.Playlist() vocals = mlt.Playlist() +drones0 = mlt.Playlist() +drones1 = mlt.Playlist() fps = 60 profile = mlt.Profile("atsc_1080p_%d" % fps) @@ -59,6 +64,10 @@ def add_audio_clip(playlist, file_, duration): clip.set_in_and_out(in_, in_+duration-1) playlist.append(clip) +def add_silence(playlist, length): + file_ = 'silence.wav' + add_audio_clip(playlist, file_, length) + def add_blank(playlist, length): playlist.blank(length) @@ -99,17 +108,28 @@ for clip in data['text']: for clip in data['music']: frames = int(clip['duration'] * fps) if clip.get('blank'): - add_blank(music, frames) + add_silence(music, frames) else: add_audio_clip(music, clip['path'], frames) for clip in data['vocals']: frames = int(clip['duration'] * fps) if clip.get('blank'): - add_blank(vocals, frames) + add_silence(vocals, frames) else: add_audio_clip(vocals, clip['path'], frames) +for name, plist in ( + ('drones0', drones0), + ('drones1', drones1), +): + for clip in data[name]: + frames = int(clip['duration'] * fps) + if clip.get('blank'): + add_blank(plist, frames) + else: + add_audio_clip(plist, clip['path'], frames) + multitrack.connect(video, 0) multitrack.connect(overlay, 1) composite = mlt.Transition(profile, "composite") @@ -121,32 +141,49 @@ volume = mlt.Filter(profile, "volume") volume.set("gain", '0.1') tractor.plant_filter(volume) +def mix_audio_tracks(a, b, ratio): + tractor = mlt.Tractor() + tractor.mark_in = -1 + tractor.mark_out = -1 + + audio = tractor.multitrack() + audio.connect(a, 0) + audio.connect(b, 1) + mix = mlt.Transition(profile, "mix") + mix.set("start", ratio) + mix.set("end", ratio) + tractor.plant_transition(mix) + return tractor + + +consumer = 'xml' +consumer = mlt.Consumer(profile, consumer, target_vocals) +consumer.connect(vocals) +consumer.start() + +# mix drones +drones = mix_audio_tracks(drones0, drones1, 0.5) + +# mix drones + music +mtractor = mix_audio_tracks(drones, music, 0.20) +atractor = mix_audio_tracks(vocals, mtractor, 0.20) + +consumer = mlt.Consumer(profile, 'xml', target_audio) +consumer.connect(atractor) +consumer.start() + +target_audio_wav = target_audio + '.wav' +subprocess.call([ + 'qmelt', target_audio, '-consumer', 'avformat:' + target_audio_wav, +]) +audiomix = mlt.Producer(profile, target_audio_wav) + # mix vocals and music - -atractor = mlt.Tractor() -atractor.mark_in = -1 -atractor.mark_out = -1 - -audio = atractor.multitrack() -audio.connect(vocals, 0) -audio.connect(music, 1) -mix = mlt.Transition(profile, "mix") -mix.set("start", 0.20) -mix.set("end", 0.20) -atractor.plant_transition(mix) +#atractor = mix_audio_tracks(vocals, mtractor, 0.20) # mix video + audio -dtractor = mlt.Tractor() -dtractor.mark_in = -1 -dtractor.mark_out = -1 -dmix = dtractor.multitrack() -dmix.connect(atractor, 0) -dmix.connect(tractor, 1) - -mix2 = mlt.Transition(profile, "mix") -mix2.set("start", 0.5) -mix2.set("end", 0.5) -dtractor.plant_transition(mix2) +#dtractor = mix_audio_tracks(atractor, tractor, 0.5) +dtractor = mix_audio_tracks(audiomix, tractor, 0.5) output = mlt.Tractor()