pandora_cdosea/render_mlt.py

167 lines
4.0 KiB
Python
Executable File

#!/usr/bin/python
import os
import time
import sys
import json
import mlt
from PyQt5 import QtWidgets
# Avoid segfault in webvfx
app = QtWidgets.QApplication(sys.argv)
#mlt.mlt_log_set_level(40) # verbose
mlt.Factory.init()
tractor = mlt.Tractor()
tractor.mark_in = -1
tractor.mark_out = -1
multitrack = tractor.multitrack()
source = sys.argv[1]
target = source.replace('.json', '.xml')
with open(source) as fd:
data = json.load(fd)
video = mlt.Playlist()
overlay = mlt.Playlist()
music = mlt.Playlist()
vocals = mlt.Playlist()
fps = 60
profile = mlt.Profile("atsc_1080p_%d" % fps)
#profile.set_explicit(1)
# get profile from clip
#clip = mlt.Producer(profile, 'test.mp4')
#profile.from_producer(clip)
def add_color(playlist, color, duration):
red = mlt.Producer(profile, 'color:' + color)
red.set_in_and_out(0, duration)
playlist.append(red)
def add_clip(playlist, file_, in_, duration):
if not isinstance(file_, str):
file_ = file_.encode('utf-8')
clip = mlt.Producer(profile, file_)
clip.set_in_and_out(in_, in_+duration-1)
playlist.append(clip)
def add_audio_clip(playlist, file_, duration):
in_ = 0
if not isinstance(file_, str):
file_ = file_.encode('utf-8')
clip = mlt.Producer(profile, file_)
clip.set_in_and_out(in_, in_+duration-1)
playlist.append(clip)
def add_blank(playlist, length):
playlist.blank(length)
def add_text(playlist, value, length):
if not isinstance(value, str):
value = value.encode('utf-8')
text = mlt.Producer(profile, 'webvfx:text.html')
text.set('transparent', 1)
text.set('title', value)
text.set('length', length)
playlist.append(text)
for clip in data['clips']:
if clip.get('black'):
# fixme seconds to fps! duration fame etc!!
frames = int(clip['duration'] * fps)
add_color(video, 'black', frames)
else:
#print(clip['duration'], clip['path'])
if not os.path.exists(clip['path']):
print(clip['path'], 'is missing')
sys.exit(1)
# fixme seconds to fps!
in_ = int(clip['in'] * fps)
frames = int(clip['duration'] * fps)
add_clip(video, clip['path'], in_, frames)
add_color(video, 'black', 60)
for clip in data['text']:
if clip.get('blank'):
frames = int(clip['duration'] * fps)
add_blank(overlay, frames)
else:
frames = int(clip['duration'] * fps)
add_text(overlay, clip['text'], frames)
for clip in data['music']:
frames = int(clip['duration'] * fps)
if clip.get('blank'):
add_blank(music, frames)
else:
add_audio_clip(music, clip['path'], frames)
for clip in data['vocals']:
frames = int(clip['duration'] * fps)
if clip.get('blank'):
add_blank(vocals, frames)
else:
add_audio_clip(vocals, clip['path'], frames)
multitrack.connect(video, 0)
multitrack.connect(overlay, 1)
composite = mlt.Transition(profile, "composite")
#composite.set('fill', 1)
tractor.plant_transition(composite)
volume = mlt.Filter(profile, "volume")
volume.set("gain", '0.1')
tractor.plant_filter(volume)
# mix vocals and music
atractor = mlt.Tractor()
atractor.mark_in = -1
atractor.mark_out = -1
audio = atractor.multitrack()
audio.connect(vocals, 0)
audio.connect(music, 1)
mix = mlt.Transition(profile, "mix")
mix.set("start", 0.20)
mix.set("end", 0.20)
atractor.plant_transition(mix)
# mix video + audio
dtractor = mlt.Tractor()
dtractor.mark_in = -1
dtractor.mark_out = -1
dmix = dtractor.multitrack()
dmix.connect(atractor, 0)
dmix.connect(tractor, 1)
mix2 = mlt.Transition(profile, "mix")
mix2.set("start", 0.5)
mix2.set("end", 0.5)
dtractor.plant_transition(mix2)
output = mlt.Tractor()
tractor.mark_in = -1
tractor.mark_out = -1
output_tracks = output.multitrack()
output_tracks.connect(dtractor, 0)
norm = mlt.Filter(profile, "volume")
norm.set("gain", "-6dB")
output.plant_filter(norm)
consumer = 'xml'
consumer = mlt.Consumer(profile, consumer, target)
consumer.connect(output)
#consumer.set("real_time", -2)
consumer.start()