pandora_cdosea/render_audio.py

74 lines
1.6 KiB
Python
Executable File

#!/usr/bin/python
import os
import time
import sys
import json
import mlt
from PyQt5 import QtWidgets
# Avoid segfault in webvfx
app = QtWidgets.QApplication(sys.argv)
#mlt.mlt_log_set_level(40) # verbose
mlt.Factory.init()
tractor = mlt.Tractor()
tractor.mark_in = -1
tractor.mark_out = -1
multitrack = tractor.multitrack()
source = sys.argv[1]
target = source.replace('.json', '.audio.xml')
with open(source) as fd:
data = json.load(fd)
music = mlt.Playlist()
vocals = mlt.Playlist()
fps = 60
profile = mlt.Profile("atsc_1080p_%d" % fps)
def add_clip(playlist, file_, duration):
in_ = 0
if not isinstance(file_, str):
file_ = file_.encode('utf-8')
clip = mlt.Producer(profile, file_)
clip.set_in_and_out(in_, in_+duration-1)
playlist.append(clip)
def add_blank(playlist, length):
playlist.blank(length)
for clip in data['music']:
frames = int(clip['duration'] * fps)
if clip.get('blank'):
add_blank(music, frames)
else:
add_clip(music, clip['path'], frames)
for clip in data['vocals']:
frames = int(clip['duration'] * fps)
if clip.get('blank'):
add_blank(vocals, frames)
else:
add_clip(vocals, clip['path'], frames)
multitrack.connect(vocals, 0)
multitrack.connect(music, 1)
composite = mlt.Transition(profile, "mix")
composite.set("start", 0.01)
composite.set("end", 0.01)
composite.set("combine", 1)
tractor.plant_transition(composite)
consumer = 'xml'
consumer = mlt.Consumer(profile, consumer, target)
consumer.connect(tractor)
#consumer.set("real_time", -2)
consumer.start()