pandora_cdosea/render_audio.py

74 lines
1.6 KiB
Python
Raw Normal View History

2017-02-17 22:08:20 +00:00
#!/usr/bin/python
import os
import time
import sys
import json
import mlt
from PyQt5 import QtWidgets
# Avoid segfault in webvfx
app = QtWidgets.QApplication(sys.argv)
#mlt.mlt_log_set_level(40) # verbose
mlt.Factory.init()
tractor = mlt.Tractor()
tractor.mark_in = -1
tractor.mark_out = -1
multitrack = tractor.multitrack()
source = sys.argv[1]
target = source.replace('.json', '.audio.xml')
with open(source) as fd:
data = json.load(fd)
music = mlt.Playlist()
vocals = mlt.Playlist()
fps = 60
profile = mlt.Profile("atsc_1080p_%d" % fps)
def add_clip(playlist, file_, duration):
in_ = 0
if not isinstance(file_, str):
file_ = file_.encode('utf-8')
clip = mlt.Producer(profile, file_)
clip.set_in_and_out(in_, in_+duration-1)
playlist.append(clip)
def add_blank(playlist, length):
playlist.blank(length)
for clip in data['music']:
frames = int(clip['duration'] * fps)
if clip.get('blank'):
add_blank(music, frames)
else:
add_clip(music, clip['path'], frames)
for clip in data['vocals']:
frames = int(clip['duration'] * fps)
if clip.get('blank'):
add_blank(vocals, frames)
else:
add_clip(vocals, clip['path'], frames)
2017-02-22 16:30:55 +00:00
multitrack.connect(vocals, 0)
multitrack.connect(music, 1)
2017-02-17 22:08:20 +00:00
composite = mlt.Transition(profile, "mix")
2017-02-22 16:30:55 +00:00
composite.set("start", 0.01)
composite.set("end", 0.01)
2017-02-17 22:08:20 +00:00
composite.set("combine", 1)
tractor.plant_transition(composite)
consumer = 'xml'
consumer = mlt.Consumer(profile, consumer, target)
consumer.connect(tractor)
#consumer.set("real_time", -2)
consumer.start()