2017-01-04 17:26:18 +00:00
|
|
|
#!/usr/bin/python
|
|
|
|
import os
|
|
|
|
import time
|
|
|
|
import sys
|
|
|
|
import json
|
2017-03-15 12:00:36 +00:00
|
|
|
import subprocess
|
2017-01-04 17:26:18 +00:00
|
|
|
|
|
|
|
import mlt
|
|
|
|
from PyQt5 import QtWidgets
|
|
|
|
|
|
|
|
# Avoid segfault in webvfx
|
|
|
|
app = QtWidgets.QApplication(sys.argv)
|
|
|
|
|
2017-03-02 23:33:12 +00:00
|
|
|
#mlt.mlt_log_set_level(40) # verbose
|
2017-01-04 17:26:18 +00:00
|
|
|
mlt.Factory.init()
|
|
|
|
|
|
|
|
|
|
|
|
tractor = mlt.Tractor()
|
|
|
|
tractor.mark_in = -1
|
|
|
|
tractor.mark_out = -1
|
|
|
|
|
|
|
|
multitrack = tractor.multitrack()
|
|
|
|
|
|
|
|
source = sys.argv[1]
|
|
|
|
target = source.replace('.json', '.xml')
|
2017-03-15 12:00:36 +00:00
|
|
|
target_audio = source.replace('.json', '.audio.xml')
|
|
|
|
target_vocals = source.replace('.json', '.vocals.xml')
|
2017-01-04 17:26:18 +00:00
|
|
|
|
|
|
|
with open(source) as fd:
|
|
|
|
data = json.load(fd)
|
|
|
|
|
|
|
|
video = mlt.Playlist()
|
|
|
|
overlay = mlt.Playlist()
|
2017-02-22 16:30:55 +00:00
|
|
|
music = mlt.Playlist()
|
|
|
|
vocals = mlt.Playlist()
|
2017-03-15 12:00:36 +00:00
|
|
|
drones0 = mlt.Playlist()
|
|
|
|
drones1 = mlt.Playlist()
|
2017-01-04 17:26:18 +00:00
|
|
|
|
|
|
|
fps = 60
|
|
|
|
profile = mlt.Profile("atsc_1080p_%d" % fps)
|
|
|
|
#profile.set_explicit(1)
|
|
|
|
|
|
|
|
# get profile from clip
|
|
|
|
#clip = mlt.Producer(profile, 'test.mp4')
|
|
|
|
#profile.from_producer(clip)
|
|
|
|
|
|
|
|
def add_color(playlist, color, duration):
|
|
|
|
red = mlt.Producer(profile, 'color:' + color)
|
|
|
|
red.set_in_and_out(0, duration)
|
|
|
|
playlist.append(red)
|
|
|
|
|
|
|
|
def add_clip(playlist, file_, in_, duration):
|
|
|
|
if not isinstance(file_, str):
|
|
|
|
file_ = file_.encode('utf-8')
|
|
|
|
clip = mlt.Producer(profile, file_)
|
|
|
|
clip.set_in_and_out(in_, in_+duration-1)
|
|
|
|
playlist.append(clip)
|
|
|
|
|
2017-02-22 16:30:55 +00:00
|
|
|
def add_audio_clip(playlist, file_, duration):
|
|
|
|
in_ = 0
|
|
|
|
if not isinstance(file_, str):
|
|
|
|
file_ = file_.encode('utf-8')
|
|
|
|
clip = mlt.Producer(profile, file_)
|
|
|
|
clip.set_in_and_out(in_, in_+duration-1)
|
|
|
|
playlist.append(clip)
|
|
|
|
|
2017-03-15 12:00:36 +00:00
|
|
|
def add_silence(playlist, length):
|
|
|
|
file_ = 'silence.wav'
|
|
|
|
add_audio_clip(playlist, file_, length)
|
|
|
|
|
2017-01-04 17:26:18 +00:00
|
|
|
def add_blank(playlist, length):
|
|
|
|
playlist.blank(length)
|
|
|
|
|
|
|
|
def add_text(playlist, value, length):
|
|
|
|
if not isinstance(value, str):
|
|
|
|
value = value.encode('utf-8')
|
|
|
|
text = mlt.Producer(profile, 'webvfx:text.html')
|
|
|
|
text.set('transparent', 1)
|
|
|
|
text.set('title', value)
|
|
|
|
text.set('length', length)
|
|
|
|
playlist.append(text)
|
|
|
|
|
2017-02-22 16:30:55 +00:00
|
|
|
|
2017-01-04 17:26:18 +00:00
|
|
|
for clip in data['clips']:
|
|
|
|
if clip.get('black'):
|
|
|
|
# fixme seconds to fps! duration fame etc!!
|
|
|
|
frames = int(clip['duration'] * fps)
|
|
|
|
add_color(video, 'black', frames)
|
|
|
|
else:
|
2017-02-17 20:17:55 +00:00
|
|
|
#print(clip['duration'], clip['path'])
|
2017-01-04 17:26:18 +00:00
|
|
|
if not os.path.exists(clip['path']):
|
|
|
|
print(clip['path'], 'is missing')
|
|
|
|
sys.exit(1)
|
|
|
|
# fixme seconds to fps!
|
|
|
|
in_ = int(clip['in'] * fps)
|
|
|
|
frames = int(clip['duration'] * fps)
|
|
|
|
add_clip(video, clip['path'], in_, frames)
|
2017-03-02 23:32:10 +00:00
|
|
|
add_color(video, 'black', 60)
|
2017-02-17 20:17:55 +00:00
|
|
|
|
2017-01-04 17:26:18 +00:00
|
|
|
for clip in data['text']:
|
|
|
|
if clip.get('blank'):
|
|
|
|
frames = int(clip['duration'] * fps)
|
|
|
|
add_blank(overlay, frames)
|
|
|
|
else:
|
|
|
|
frames = int(clip['duration'] * fps)
|
2017-03-15 10:15:45 +00:00
|
|
|
add_text(overlay, clip['text'], frames)
|
2017-01-04 17:26:18 +00:00
|
|
|
|
2017-02-22 16:30:55 +00:00
|
|
|
for clip in data['music']:
|
|
|
|
frames = int(clip['duration'] * fps)
|
|
|
|
if clip.get('blank'):
|
2017-03-15 12:00:36 +00:00
|
|
|
add_silence(music, frames)
|
2017-02-22 16:30:55 +00:00
|
|
|
else:
|
|
|
|
add_audio_clip(music, clip['path'], frames)
|
|
|
|
|
|
|
|
for clip in data['vocals']:
|
|
|
|
frames = int(clip['duration'] * fps)
|
|
|
|
if clip.get('blank'):
|
2017-03-15 12:00:36 +00:00
|
|
|
add_silence(vocals, frames)
|
2017-02-22 16:30:55 +00:00
|
|
|
else:
|
|
|
|
add_audio_clip(vocals, clip['path'], frames)
|
|
|
|
|
2017-03-15 12:00:36 +00:00
|
|
|
for name, plist in (
|
|
|
|
('drones0', drones0),
|
|
|
|
('drones1', drones1),
|
|
|
|
):
|
|
|
|
for clip in data[name]:
|
|
|
|
frames = int(clip['duration'] * fps)
|
|
|
|
if clip.get('blank'):
|
|
|
|
add_blank(plist, frames)
|
|
|
|
else:
|
|
|
|
add_audio_clip(plist, clip['path'], frames)
|
|
|
|
|
2017-01-04 17:26:18 +00:00
|
|
|
multitrack.connect(video, 0)
|
|
|
|
multitrack.connect(overlay, 1)
|
|
|
|
composite = mlt.Transition(profile, "composite")
|
2017-02-17 20:17:55 +00:00
|
|
|
#composite.set('fill', 1)
|
2017-01-04 17:26:18 +00:00
|
|
|
|
|
|
|
tractor.plant_transition(composite)
|
|
|
|
|
2017-02-22 16:30:55 +00:00
|
|
|
volume = mlt.Filter(profile, "volume")
|
2017-03-02 21:11:16 +00:00
|
|
|
volume.set("gain", '0.1')
|
2017-02-22 16:30:55 +00:00
|
|
|
tractor.plant_filter(volume)
|
|
|
|
|
2017-03-15 12:00:36 +00:00
|
|
|
def mix_audio_tracks(a, b, ratio):
|
|
|
|
tractor = mlt.Tractor()
|
|
|
|
tractor.mark_in = -1
|
|
|
|
tractor.mark_out = -1
|
|
|
|
|
|
|
|
audio = tractor.multitrack()
|
|
|
|
audio.connect(a, 0)
|
|
|
|
audio.connect(b, 1)
|
|
|
|
mix = mlt.Transition(profile, "mix")
|
|
|
|
mix.set("start", ratio)
|
|
|
|
mix.set("end", ratio)
|
|
|
|
tractor.plant_transition(mix)
|
|
|
|
return tractor
|
|
|
|
|
|
|
|
|
|
|
|
consumer = 'xml'
|
|
|
|
consumer = mlt.Consumer(profile, consumer, target_vocals)
|
|
|
|
consumer.connect(vocals)
|
|
|
|
consumer.start()
|
2017-02-22 16:30:55 +00:00
|
|
|
|
2017-03-15 12:00:36 +00:00
|
|
|
# mix drones
|
|
|
|
drones = mix_audio_tracks(drones0, drones1, 0.5)
|
2017-02-22 16:30:55 +00:00
|
|
|
|
2017-03-15 12:00:36 +00:00
|
|
|
# mix drones + music
|
|
|
|
mtractor = mix_audio_tracks(drones, music, 0.20)
|
|
|
|
atractor = mix_audio_tracks(vocals, mtractor, 0.20)
|
|
|
|
|
|
|
|
consumer = mlt.Consumer(profile, 'xml', target_audio)
|
|
|
|
consumer.connect(atractor)
|
|
|
|
consumer.start()
|
|
|
|
|
|
|
|
target_audio_wav = target_audio + '.wav'
|
|
|
|
subprocess.call([
|
|
|
|
'qmelt', target_audio, '-consumer', 'avformat:' + target_audio_wav,
|
|
|
|
])
|
|
|
|
audiomix = mlt.Producer(profile, target_audio_wav)
|
|
|
|
|
|
|
|
# mix vocals and music
|
|
|
|
#atractor = mix_audio_tracks(vocals, mtractor, 0.20)
|
2017-02-22 16:30:55 +00:00
|
|
|
|
|
|
|
# mix video + audio
|
2017-03-15 12:00:36 +00:00
|
|
|
#dtractor = mix_audio_tracks(atractor, tractor, 0.5)
|
|
|
|
dtractor = mix_audio_tracks(audiomix, tractor, 0.5)
|
2017-02-22 16:30:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
output = mlt.Tractor()
|
|
|
|
tractor.mark_in = -1
|
|
|
|
tractor.mark_out = -1
|
|
|
|
output_tracks = output.multitrack()
|
|
|
|
output_tracks.connect(dtractor, 0)
|
|
|
|
|
|
|
|
norm = mlt.Filter(profile, "volume")
|
2017-03-02 21:11:16 +00:00
|
|
|
norm.set("gain", "-6dB")
|
2017-02-22 16:30:55 +00:00
|
|
|
output.plant_filter(norm)
|
|
|
|
|
2017-01-04 17:26:18 +00:00
|
|
|
consumer = 'xml'
|
|
|
|
consumer = mlt.Consumer(profile, consumer, target)
|
2017-02-22 16:30:55 +00:00
|
|
|
consumer.connect(output)
|
2017-01-04 17:26:18 +00:00
|
|
|
#consumer.set("real_time", -2)
|
|
|
|
consumer.start()
|