2 stage sound mix

This commit is contained in:
j 2017-03-15 13:00:12 +01:00
parent 934f0c4086
commit 536cde43d3
3 changed files with 72 additions and 37 deletions

View file

@ -27,7 +27,8 @@ def is_new(xml, mp4):
vtime = max( vtime = max(
os.path.getmtime(mp4), os.path.getmtime(mp4),
os.path.getmtime('text.html'), os.path.getmtime('text.html'),
os.path.getmtime('VOCALS.json') os.path.getmtime('DRONES.json'),
os.path.getmtime('VOCALS.json'),
) )
return vtime < xtime return vtime < xtime

View file

@ -360,17 +360,14 @@ def sequence(seq, letter):
position += add_blank(result[track], min(3.141, duration - position)) position += add_blank(result[track], min(3.141, duration - position))
else: else:
clip = DRONES[letter][n] clip = DRONES[letter][n]
position += clip['duration'] if position + clip['duration'] < duration:
if result[track] and position > duration \ position += clip['duration']
and result[track][-1].get('blank') \ result[track].append(clip)
and result[track][-1]['duration'] > clip['duration']: else:
result[track][-1]['duration'] -= (position-duration) c = clip.copy()
position = duration c['duration'] = duration - position
if position <= duration: result[track].append(c)
result[track].append(clip) position += c['duration']
else:
position -= clip['duration']
break
if position < duration: if position < duration:
position += add_blank(result[track], duration - position) position += add_blank(result[track], duration - position)

View file

@ -3,6 +3,7 @@ import os
import time import time
import sys import sys
import json import json
import subprocess
import mlt import mlt
from PyQt5 import QtWidgets from PyQt5 import QtWidgets
@ -22,6 +23,8 @@ multitrack = tractor.multitrack()
source = sys.argv[1] source = sys.argv[1]
target = source.replace('.json', '.xml') target = source.replace('.json', '.xml')
target_audio = source.replace('.json', '.audio.xml')
target_vocals = source.replace('.json', '.vocals.xml')
with open(source) as fd: with open(source) as fd:
data = json.load(fd) data = json.load(fd)
@ -30,6 +33,8 @@ video = mlt.Playlist()
overlay = mlt.Playlist() overlay = mlt.Playlist()
music = mlt.Playlist() music = mlt.Playlist()
vocals = mlt.Playlist() vocals = mlt.Playlist()
drones0 = mlt.Playlist()
drones1 = mlt.Playlist()
fps = 60 fps = 60
profile = mlt.Profile("atsc_1080p_%d" % fps) profile = mlt.Profile("atsc_1080p_%d" % fps)
@ -59,6 +64,10 @@ def add_audio_clip(playlist, file_, duration):
clip.set_in_and_out(in_, in_+duration-1) clip.set_in_and_out(in_, in_+duration-1)
playlist.append(clip) playlist.append(clip)
def add_silence(playlist, length):
file_ = 'silence.wav'
add_audio_clip(playlist, file_, length)
def add_blank(playlist, length): def add_blank(playlist, length):
playlist.blank(length) playlist.blank(length)
@ -99,17 +108,28 @@ for clip in data['text']:
for clip in data['music']: for clip in data['music']:
frames = int(clip['duration'] * fps) frames = int(clip['duration'] * fps)
if clip.get('blank'): if clip.get('blank'):
add_blank(music, frames) add_silence(music, frames)
else: else:
add_audio_clip(music, clip['path'], frames) add_audio_clip(music, clip['path'], frames)
for clip in data['vocals']: for clip in data['vocals']:
frames = int(clip['duration'] * fps) frames = int(clip['duration'] * fps)
if clip.get('blank'): if clip.get('blank'):
add_blank(vocals, frames) add_silence(vocals, frames)
else: else:
add_audio_clip(vocals, clip['path'], frames) add_audio_clip(vocals, clip['path'], frames)
for name, plist in (
('drones0', drones0),
('drones1', drones1),
):
for clip in data[name]:
frames = int(clip['duration'] * fps)
if clip.get('blank'):
add_blank(plist, frames)
else:
add_audio_clip(plist, clip['path'], frames)
multitrack.connect(video, 0) multitrack.connect(video, 0)
multitrack.connect(overlay, 1) multitrack.connect(overlay, 1)
composite = mlt.Transition(profile, "composite") composite = mlt.Transition(profile, "composite")
@ -121,32 +141,49 @@ volume = mlt.Filter(profile, "volume")
volume.set("gain", '0.1') volume.set("gain", '0.1')
tractor.plant_filter(volume) tractor.plant_filter(volume)
def mix_audio_tracks(a, b, ratio):
tractor = mlt.Tractor()
tractor.mark_in = -1
tractor.mark_out = -1
audio = tractor.multitrack()
audio.connect(a, 0)
audio.connect(b, 1)
mix = mlt.Transition(profile, "mix")
mix.set("start", ratio)
mix.set("end", ratio)
tractor.plant_transition(mix)
return tractor
consumer = 'xml'
consumer = mlt.Consumer(profile, consumer, target_vocals)
consumer.connect(vocals)
consumer.start()
# mix drones
drones = mix_audio_tracks(drones0, drones1, 0.5)
# mix drones + music
mtractor = mix_audio_tracks(drones, music, 0.20)
atractor = mix_audio_tracks(vocals, mtractor, 0.20)
consumer = mlt.Consumer(profile, 'xml', target_audio)
consumer.connect(atractor)
consumer.start()
target_audio_wav = target_audio + '.wav'
subprocess.call([
'qmelt', target_audio, '-consumer', 'avformat:' + target_audio_wav,
])
audiomix = mlt.Producer(profile, target_audio_wav)
# mix vocals and music # mix vocals and music
#atractor = mix_audio_tracks(vocals, mtractor, 0.20)
atractor = mlt.Tractor()
atractor.mark_in = -1
atractor.mark_out = -1
audio = atractor.multitrack()
audio.connect(vocals, 0)
audio.connect(music, 1)
mix = mlt.Transition(profile, "mix")
mix.set("start", 0.20)
mix.set("end", 0.20)
atractor.plant_transition(mix)
# mix video + audio # mix video + audio
dtractor = mlt.Tractor() #dtractor = mix_audio_tracks(atractor, tractor, 0.5)
dtractor.mark_in = -1 dtractor = mix_audio_tracks(audiomix, tractor, 0.5)
dtractor.mark_out = -1
dmix = dtractor.multitrack()
dmix.connect(atractor, 0)
dmix.connect(tractor, 1)
mix2 = mlt.Transition(profile, "mix")
mix2.set("start", 0.5)
mix2.set("end", 0.5)
dtractor.plant_transition(mix2)
output = mlt.Tractor() output = mlt.Tractor()