pandora_cdosea/render_mlt.py

285 lines
7.5 KiB
Python
Raw Normal View History

2017-01-04 17:26:18 +00:00
#!/usr/bin/python
from argparse import ArgumentParser
2017-01-04 17:26:18 +00:00
import json
import os
2017-03-15 12:00:36 +00:00
import subprocess
import sys
import time
2017-01-04 17:26:18 +00:00
2017-03-20 01:30:09 +00:00
import ox
2017-01-04 17:26:18 +00:00
import mlt
from PyQt5 import QtWidgets
usage = "usage: %(prog)s [options] json"
parser = ArgumentParser(usage=usage)
parser.add_argument('-p', '--prefix', dest='prefix', help='version prefix', default='.')
2017-10-03 14:12:07 +00:00
parser.add_argument('files', metavar='path', type=str, nargs='*', help='json source file')
opts = parser.parse_args()
2017-10-03 14:12:07 +00:00
if 'performance' in opts.prefix:
version = 'performance'
else:
version = 'main'
2017-01-04 17:26:18 +00:00
# Avoid segfault in webvfx
app = QtWidgets.QApplication(sys.argv)
2017-03-02 23:33:12 +00:00
#mlt.mlt_log_set_level(40) # verbose
2017-01-04 17:26:18 +00:00
mlt.Factory.init()
def add_color(playlist, color, duration):
red = mlt.Producer(profile, 'color:' + color)
red.set_in_and_out(0, duration)
playlist.append(red)
2017-03-22 10:19:38 +00:00
def add_clip(playlist, clip, in_, duration):
file_ = clip['path']
2017-01-04 17:26:18 +00:00
if not isinstance(file_, str):
file_ = file_.encode('utf-8')
2017-03-20 01:30:09 +00:00
info = ox.avinfo(file_)
2017-03-21 11:44:13 +00:00
tractor = mlt.Tractor(profile)
2017-03-20 01:30:09 +00:00
tracks = tractor.multitrack()
video = mlt.Playlist()
2017-03-22 10:19:38 +00:00
c = mlt.Producer(profile, file_)
c.set_in_and_out(in_, in_+duration-1)
video.append(c)
2017-03-20 01:30:09 +00:00
tracks.connect(video, 0)
2017-03-22 10:19:38 +00:00
if not info.get('audio'):
2017-03-20 01:30:09 +00:00
audio = mlt.Playlist()
add_silence(audio, duration)
tracks.connect(audio, 1)
2017-03-22 10:19:38 +00:00
else:
volume = mlt.Filter(profile, "volume")
if clip.get('tag', '') == 'gong':
volume.set("gain", '0.8')
2017-05-22 18:40:24 +00:00
# Vocal Cords in Action
elif clip.get('id', '').split('/')[0] in ('EBB', 'ECE', 'ECK', 'ECJ') and \
clip.get('tag', '') in ('vagina', 'voice'):
volume.set("gain", '0.4')
2017-03-22 10:19:38 +00:00
else:
volume.set("gain", '0.12')
tractor.plant_filter(volume)
2017-03-20 01:30:09 +00:00
playlist.append(tractor)
2017-01-04 17:26:18 +00:00
2017-10-02 11:37:53 +00:00
def add_audio_clip(playlist, file_, duration, in_=0):
2017-02-22 16:30:55 +00:00
if not isinstance(file_, str):
file_ = file_.encode('utf-8')
clip = mlt.Producer(profile, file_)
clip.set_in_and_out(in_, in_+duration-1)
playlist.append(clip)
2017-03-15 12:00:36 +00:00
def add_silence(playlist, length):
file_ = 'silence.wav'
add_audio_clip(playlist, file_, length)
2017-01-04 17:26:18 +00:00
def add_blank(playlist, length):
playlist.blank(length)
def add_text(playlist, value, length):
if not isinstance(value, str):
value = value.encode('utf-8')
text = mlt.Producer(profile, 'webvfx:text.html')
text.set('transparent', 1)
text.set('title', value)
text.set('length', length)
playlist.append(text)
2017-05-16 12:59:51 +00:00
def mix_audio_tracks(a, b, ratio, combine=False):
tractor = mlt.Tractor(profile)
audio = tractor.multitrack()
audio.connect(a, 0)
audio.connect(b, 1)
mix = mlt.Transition(profile, "mix")
mix.set("start", ratio)
mix.set("end", ratio)
#mix.set("always_active", 1)
if combine:
mix.set("combine", 1)
tractor.plant_transition(mix)
return tractor
def save_xml(track, filename):
consumer = mlt.Consumer(profile, 'xml', filename)
consumer.connect(track)
consumer.start()
# main
fps = 60
profile = mlt.Profile("atsc_1080p_%d" % fps)
#profile.set_explicit(1)
2017-10-03 14:12:07 +00:00
source = opts.files[0]
2017-05-16 12:59:51 +00:00
target = source.replace('.json', '.xml')
target_audio = source.replace('.json', '.audio.xml')
target_audio_wav = target_audio + '.wav'
target_vocals = source.replace('.json', '.vocals.xml')
target_music = source.replace('.json', '.music.xml')
target_drones = source.replace('.json', '.drones.xml')
target_source = source.replace('.json', '.source.xml')
2017-10-02 11:37:53 +00:00
gongs_wav = source.replace('.json', '.gongs.wav')
2017-05-16 12:59:51 +00:00
with open(source) as fd:
data = json.load(fd)
video = mlt.Playlist()
overlay = mlt.Playlist()
music = mlt.Playlist()
vocals = mlt.Playlist()
drones0 = mlt.Playlist()
drones1 = mlt.Playlist()
2017-10-02 11:37:53 +00:00
gongs = mlt.Playlist()
2017-05-16 12:59:51 +00:00
# hide Set to 1 to hide the video (make it an audio-only track),
# 2 to hide the audio (make it a video-only track),
# or 3 to hide audio and video (hidden track).
drones0.set("hide", 1)
drones1.set("hide", 1)
2017-10-02 11:37:53 +00:00
gongs.set("hide", 1)
2017-05-16 12:59:51 +00:00
vocals.set("hide", 1)
music.set("hide", 1)
2017-02-22 16:30:55 +00:00
2017-01-04 17:26:18 +00:00
for clip in data['clips']:
2017-03-20 01:30:09 +00:00
frames = int(clip['duration'] * fps)
if not frames:
continue
2017-01-04 17:26:18 +00:00
if clip.get('black'):
add_color(video, 'black', frames)
else:
2017-02-17 20:17:55 +00:00
#print(clip['duration'], clip['path'])
2017-01-04 17:26:18 +00:00
if not os.path.exists(clip['path']):
print(clip['path'], 'is missing')
sys.exit(1)
# fixme seconds to fps!
in_ = int(clip['in'] * fps)
2017-03-22 10:19:38 +00:00
add_clip(video, clip, in_, frames)
add_color(video, 'black', 60)
2017-02-17 20:17:55 +00:00
2017-01-04 17:26:18 +00:00
for clip in data['text']:
2017-03-20 01:30:09 +00:00
frames = int(clip['duration'] * fps)
if not frames:
continue
2017-01-04 17:26:18 +00:00
if clip.get('blank'):
add_blank(overlay, frames)
else:
2017-03-15 10:15:45 +00:00
add_text(overlay, clip['text'], frames)
2017-01-04 17:26:18 +00:00
2017-02-22 16:30:55 +00:00
for clip in data['music']:
frames = int(clip['duration'] * fps)
2017-03-20 01:30:09 +00:00
if not frames:
continue
2017-02-22 16:30:55 +00:00
if clip.get('blank'):
2017-03-15 12:00:36 +00:00
add_silence(music, frames)
2017-02-22 16:30:55 +00:00
else:
add_audio_clip(music, clip['path'], frames)
for clip in data['vocals']:
frames = int(clip['duration'] * fps)
2017-03-20 01:30:09 +00:00
if not frames:
continue
2017-02-22 16:30:55 +00:00
if clip.get('blank'):
2017-03-15 12:00:36 +00:00
add_silence(vocals, frames)
2017-02-22 16:30:55 +00:00
else:
add_audio_clip(vocals, clip['path'], frames)
2017-03-15 12:00:36 +00:00
for name, plist in (
('drones0', drones0),
('drones1', drones1),
):
for clip in data[name]:
frames = int(clip['duration'] * fps)
if clip.get('blank'):
2017-05-16 12:59:51 +00:00
add_silence(plist, frames)
2017-03-15 12:00:36 +00:00
else:
add_audio_clip(plist, clip['path'], frames)
2017-10-02 11:41:24 +00:00
duration = sum(clip['duration'] for clip in data['clips'])
2017-05-16 12:59:51 +00:00
save_xml(vocals, target_vocals)
video.set("hide", 1)
save_xml(video, target_source)
video.set("hide", 0)
2017-02-22 16:30:55 +00:00
2017-03-15 12:00:36 +00:00
# mix drones
drones = mix_audio_tracks(drones0, drones1, 0.5)
2017-05-16 12:59:51 +00:00
save_xml(drones, target_drones)
2017-02-22 16:30:55 +00:00
2017-05-16 12:59:51 +00:00
save_xml(music, target_music)
2017-03-20 01:30:09 +00:00
if version == 'performance':
# render gongs
2017-10-03 14:12:07 +00:00
render_gongs = not os.path.exists(gongs_wav)
2017-10-05 12:29:37 +00:00
#if not render_gongs:
# render_gongs = os.path.getmtime(source) > os.path.getmtime(gongs_wav)
2017-10-03 14:12:07 +00:00
if render_gongs:
subprocess.call([
# offset in pi, duration, number of tracks, target
'./render_gongs.py',
str(data['gongs']['offset']),
str(duration),
str(data['gongs']['tracks']),
gongs_wav
])
# load gongs
add_audio_clip(gongs, gongs_wav, int(duration * fps), int(5*fps))
# mix gongs + music
2017-10-03 14:12:07 +00:00
mtractor = mix_audio_tracks(gongs, music, 0.15)
2017-10-09 09:03:17 +00:00
else:
# mix drones + music
mtractor = mix_audio_tracks(drones, music, 0.3)
2017-10-02 11:37:53 +00:00
2017-03-20 01:30:09 +00:00
norm = mlt.Filter(profile, "volume")
2017-05-16 12:59:51 +00:00
# lower volume
2017-10-03 14:12:07 +00:00
if version == 'performance':
norm.set("gain", "-6dB")
else:
norm.set("gain", "-12dB")
2017-03-20 01:30:09 +00:00
mtractor.plant_filter(norm)
# background and vocals
2017-03-15 12:00:36 +00:00
2017-05-16 12:59:51 +00:00
# vocals are on extra track now
#atractor = mix_audio_tracks(vocals, mtractor, 0.4)
atractor = mtractor
2017-03-15 12:00:36 +00:00
2017-05-16 12:59:51 +00:00
save_xml(atractor, target_audio)
'''
'''
2017-03-15 12:00:36 +00:00
subprocess.call([
'qmelt', target_audio, '-consumer', 'avformat:' + target_audio_wav,
])
2017-03-20 01:30:09 +00:00
audiomix = mlt.Playlist()
add_audio_clip(audiomix, target_audio_wav, int(duration * fps))
2017-03-15 12:00:36 +00:00
2017-02-22 16:30:55 +00:00
# mix video + audio
2017-05-16 12:59:51 +00:00
#tractor = mix_audio_tracks(atractor, video, 0.29)
2017-02-22 16:30:55 +00:00
2017-05-16 12:59:51 +00:00
# with vocals to background 0.4 -> 0.29
# with vocals as extra track 0.725
#tractor = mix_audio_tracks(audiomix, video, 0.5)
tractor = mix_audio_tracks(audiomix, video, 0.6)
2017-02-22 16:30:55 +00:00
2017-03-21 11:44:13 +00:00
output = mlt.Tractor(profile)
2017-02-22 16:30:55 +00:00
output_tracks = output.multitrack()
2017-05-16 12:59:51 +00:00
output_tracks.connect(tractor, 0)
output_tracks.connect(overlay, 1)
2017-02-22 16:30:55 +00:00
norm = mlt.Filter(profile, "volume")
2017-03-20 01:30:09 +00:00
#norm.set("gain", "-6dB")
2017-05-21 11:55:53 +00:00
norm.set("gain", "6dB")
2017-02-22 16:30:55 +00:00
output.plant_filter(norm)
2017-05-16 12:59:51 +00:00
composite = mlt.Transition(profile, "composite")
#composite.set('fill', 1)
output.plant_transition(composite)
2017-03-20 01:30:09 +00:00
2017-05-16 12:59:51 +00:00
save_xml(output, target)