2 stage soundmix

This commit is contained in:
j 2017-03-15 12:00:36 +00:00
parent 934f0c4086
commit c496a1da3c
2 changed files with 70 additions and 36 deletions

View file

@ -360,17 +360,14 @@ def sequence(seq, letter):
position += add_blank(result[track], min(3.141, duration - position))
else:
clip = DRONES[letter][n]
if position + clip['duration'] < duration:
position += clip['duration']
if result[track] and position > duration \
and result[track][-1].get('blank') \
and result[track][-1]['duration'] > clip['duration']:
result[track][-1]['duration'] -= (position-duration)
position = duration
if position <= duration:
result[track].append(clip)
else:
position -= clip['duration']
break
c = clip.copy()
c['duration'] = duration - position
result[track].append(c)
position += c['duration']
if position < duration:
position += add_blank(result[track], duration - position)

View file

@ -3,6 +3,7 @@ import os
import time
import sys
import json
import subprocess
import mlt
from PyQt5 import QtWidgets
@ -22,6 +23,8 @@ multitrack = tractor.multitrack()
source = sys.argv[1]
target = source.replace('.json', '.xml')
target_audio = source.replace('.json', '.audio.xml')
target_vocals = source.replace('.json', '.vocals.xml')
with open(source) as fd:
data = json.load(fd)
@ -30,6 +33,8 @@ video = mlt.Playlist()
overlay = mlt.Playlist()
music = mlt.Playlist()
vocals = mlt.Playlist()
drones0 = mlt.Playlist()
drones1 = mlt.Playlist()
fps = 60
profile = mlt.Profile("atsc_1080p_%d" % fps)
@ -59,6 +64,10 @@ def add_audio_clip(playlist, file_, duration):
clip.set_in_and_out(in_, in_+duration-1)
playlist.append(clip)
def add_silence(playlist, length):
file_ = 'silence.wav'
add_audio_clip(playlist, file_, length)
def add_blank(playlist, length):
playlist.blank(length)
@ -99,17 +108,28 @@ for clip in data['text']:
for clip in data['music']:
frames = int(clip['duration'] * fps)
if clip.get('blank'):
add_blank(music, frames)
add_silence(music, frames)
else:
add_audio_clip(music, clip['path'], frames)
for clip in data['vocals']:
frames = int(clip['duration'] * fps)
if clip.get('blank'):
add_blank(vocals, frames)
add_silence(vocals, frames)
else:
add_audio_clip(vocals, clip['path'], frames)
for name, plist in (
('drones0', drones0),
('drones1', drones1),
):
for clip in data[name]:
frames = int(clip['duration'] * fps)
if clip.get('blank'):
add_blank(plist, frames)
else:
add_audio_clip(plist, clip['path'], frames)
multitrack.connect(video, 0)
multitrack.connect(overlay, 1)
composite = mlt.Transition(profile, "composite")
@ -121,32 +141,49 @@ volume = mlt.Filter(profile, "volume")
volume.set("gain", '0.1')
tractor.plant_filter(volume)
def mix_audio_tracks(a, b, ratio):
tractor = mlt.Tractor()
tractor.mark_in = -1
tractor.mark_out = -1
audio = tractor.multitrack()
audio.connect(a, 0)
audio.connect(b, 1)
mix = mlt.Transition(profile, "mix")
mix.set("start", ratio)
mix.set("end", ratio)
tractor.plant_transition(mix)
return tractor
consumer = 'xml'
consumer = mlt.Consumer(profile, consumer, target_vocals)
consumer.connect(vocals)
consumer.start()
# mix drones
drones = mix_audio_tracks(drones0, drones1, 0.5)
# mix drones + music
mtractor = mix_audio_tracks(drones, music, 0.20)
atractor = mix_audio_tracks(vocals, mtractor, 0.20)
consumer = mlt.Consumer(profile, 'xml', target_audio)
consumer.connect(atractor)
consumer.start()
target_audio_wav = target_audio + '.wav'
subprocess.call([
'qmelt', target_audio, '-consumer', 'avformat:' + target_audio_wav,
])
audiomix = mlt.Producer(profile, target_audio_wav)
# mix vocals and music
atractor = mlt.Tractor()
atractor.mark_in = -1
atractor.mark_out = -1
audio = atractor.multitrack()
audio.connect(vocals, 0)
audio.connect(music, 1)
mix = mlt.Transition(profile, "mix")
mix.set("start", 0.20)
mix.set("end", 0.20)
atractor.plant_transition(mix)
#atractor = mix_audio_tracks(vocals, mtractor, 0.20)
# mix video + audio
dtractor = mlt.Tractor()
dtractor.mark_in = -1
dtractor.mark_out = -1
dmix = dtractor.multitrack()
dmix.connect(atractor, 0)
dmix.connect(tractor, 1)
mix2 = mlt.Transition(profile, "mix")
mix2.set("start", 0.5)
mix2.set("end", 0.5)
dtractor.plant_transition(mix2)
#dtractor = mix_audio_tracks(atractor, tractor, 0.5)
dtractor = mix_audio_tracks(audiomix, tractor, 0.5)
output = mlt.Tractor()