tune audio
This commit is contained in:
parent
10314d1c3d
commit
65ff5d5e32
2 changed files with 82 additions and 17 deletions
35
render.py
35
render.py
|
@ -21,6 +21,11 @@ HIDDEN_TAGS = [
|
||||||
"women with white males"
|
"women with white males"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# items to not use at all
|
||||||
|
BLACKLIST = [
|
||||||
|
'XN'
|
||||||
|
]
|
||||||
|
|
||||||
api = None
|
api = None
|
||||||
|
|
||||||
def get_api():
|
def get_api():
|
||||||
|
@ -130,7 +135,9 @@ def get_clips(tag):
|
||||||
'operator': '&'
|
'operator': '&'
|
||||||
},
|
},
|
||||||
'keys': ['id', 'in', 'out'],
|
'keys': ['id', 'in', 'out'],
|
||||||
'range': [0, 10000]})['data']['items']
|
'range': [0, 90000]})['data']['items']
|
||||||
|
clips = [clip for clip in clips if clip['id'].split('/')[0] not in BLACKLIST]
|
||||||
|
|
||||||
for clip in clips:
|
for clip in clips:
|
||||||
clip['path'] = get_path(clip['id'].split('/')[0])
|
clip['path'] = get_path(clip['id'].split('/')[0])
|
||||||
# or use round?
|
# or use round?
|
||||||
|
@ -138,6 +145,7 @@ def get_clips(tag):
|
||||||
clip['out'] = int(clip['out'] / FRAME_DURATION) * FRAME_DURATION
|
clip['out'] = int(clip['out'] / FRAME_DURATION) * FRAME_DURATION
|
||||||
clip['duration'] = clip['out'] - clip['in']
|
clip['duration'] = clip['out'] - clip['in']
|
||||||
clip['tag'] = tag
|
clip['tag'] = tag
|
||||||
|
clips = [clip for clip in clips if clip['duration']]
|
||||||
CLIPS[tag] = list(sorted(clips, key=lambda c: c['id']))
|
CLIPS[tag] = list(sorted(clips, key=lambda c: c['id']))
|
||||||
with open('CLIPS.json', 'w') as fd:
|
with open('CLIPS.json', 'w') as fd:
|
||||||
json.dump(CLIPS, fd, indent=4, sort_keys=True)
|
json.dump(CLIPS, fd, indent=4, sort_keys=True)
|
||||||
|
@ -250,7 +258,8 @@ def sequence(seq, letter):
|
||||||
n = seq()
|
n = seq()
|
||||||
if n == 0:
|
if n == 0:
|
||||||
blank = {'blank': True, 'duration': position - last_text}
|
blank = {'blank': True, 'duration': position - last_text}
|
||||||
result['text'].append(blank)
|
if blank['duration']:
|
||||||
|
result['text'].append(blank)
|
||||||
n = seq()
|
n = seq()
|
||||||
if n == 0:
|
if n == 0:
|
||||||
n = 10
|
n = 10
|
||||||
|
@ -301,6 +310,8 @@ def sequence(seq, letter):
|
||||||
and result[track][-1]['duration'] > clip['duration']:
|
and result[track][-1]['duration'] > clip['duration']:
|
||||||
result[track][-1]['duration'] -= (position-duration)
|
result[track][-1]['duration'] -= (position-duration)
|
||||||
position = duration
|
position = duration
|
||||||
|
if not result[track][-1]['duration']:
|
||||||
|
result[track].pop(-1)
|
||||||
if position <= duration:
|
if position <= duration:
|
||||||
result[track].append(clip)
|
result[track].append(clip)
|
||||||
else:
|
else:
|
||||||
|
@ -312,10 +323,15 @@ def sequence(seq, letter):
|
||||||
track = 'vocals'
|
track = 'vocals'
|
||||||
if letter in VOCALS:
|
if letter in VOCALS:
|
||||||
position = 0
|
position = 0
|
||||||
|
loop = 0
|
||||||
while position < duration:
|
while position < duration:
|
||||||
n = seq()
|
n = seq()
|
||||||
|
# vocals should start after one blank
|
||||||
|
if len(result[track]) and result[track][-1].get('blank'):
|
||||||
|
n = 10
|
||||||
|
# 50 % chance of a silence of up to 5 seconds
|
||||||
if n < 5:
|
if n < 5:
|
||||||
n = seq()
|
n = seq() / 2 # (0 - 5 seconds)
|
||||||
position += add_blank(result[track], min(n, duration-position))
|
position += add_blank(result[track], min(n, duration-position))
|
||||||
else:
|
else:
|
||||||
n = seq()
|
n = seq()
|
||||||
|
@ -326,11 +342,15 @@ def sequence(seq, letter):
|
||||||
and result[track][-1]['duration'] > clip['duration']:
|
and result[track][-1]['duration'] > clip['duration']:
|
||||||
result[track][-1]['duration'] -= (position-duration)
|
result[track][-1]['duration'] -= (position-duration)
|
||||||
position = duration
|
position = duration
|
||||||
|
if not result[track][-1]['duration']:
|
||||||
|
result[track].pop(-1)
|
||||||
if position <= duration:
|
if position <= duration:
|
||||||
result[track].append(clip)
|
result[track].append(clip)
|
||||||
else:
|
else:
|
||||||
position -= clip['duration']
|
position -= clip['duration']
|
||||||
break
|
if duration - position < 10 or loop > 10:
|
||||||
|
break
|
||||||
|
loop += 1
|
||||||
if position < duration:
|
if position < duration:
|
||||||
position += add_blank(result[track], duration - position)
|
position += add_blank(result[track], duration - position)
|
||||||
'''
|
'''
|
||||||
|
@ -380,6 +400,7 @@ def sequence(seq, letter):
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
encode = len(sys.argv) < 2 or sys.argv[1] != 'json'
|
||||||
for n in range(10):
|
for n in range(10):
|
||||||
seq = random(n * 1000)
|
seq = random(n * 1000)
|
||||||
#for letter in ('T', 'W'):
|
#for letter in ('T', 'W'):
|
||||||
|
@ -400,5 +421,7 @@ if __name__ == '__main__':
|
||||||
if current != old:
|
if current != old:
|
||||||
with open(tjson, 'w') as fd:
|
with open(tjson, 'w') as fd:
|
||||||
fd.write(current)
|
fd.write(current)
|
||||||
subprocess.call(['./render_mlt.py', tjson])
|
if encode:
|
||||||
#subprocess.call(['./render_audio.py', tjson])
|
if current != old or os.path.getmtime(tjson) < os.path.getmtime('render_mlt.py'):
|
||||||
|
subprocess.call(['./render_mlt.py', tjson, 'encode'])
|
||||||
|
#subprocess.call(['./render_audio.py', tjson])
|
||||||
|
|
|
@ -5,6 +5,7 @@ import sys
|
||||||
import json
|
import json
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
|
import ox
|
||||||
import mlt
|
import mlt
|
||||||
from PyQt5 import QtWidgets
|
from PyQt5 import QtWidgets
|
||||||
|
|
||||||
|
@ -14,7 +15,6 @@ app = QtWidgets.QApplication(sys.argv)
|
||||||
#mlt.mlt_log_set_level(40) # verbose
|
#mlt.mlt_log_set_level(40) # verbose
|
||||||
mlt.Factory.init()
|
mlt.Factory.init()
|
||||||
|
|
||||||
|
|
||||||
tractor = mlt.Tractor()
|
tractor = mlt.Tractor()
|
||||||
tractor.mark_in = -1
|
tractor.mark_in = -1
|
||||||
tractor.mark_out = -1
|
tractor.mark_out = -1
|
||||||
|
@ -22,6 +22,11 @@ tractor.mark_out = -1
|
||||||
multitrack = tractor.multitrack()
|
multitrack = tractor.multitrack()
|
||||||
|
|
||||||
source = sys.argv[1]
|
source = sys.argv[1]
|
||||||
|
if len(sys.argv) > 2:
|
||||||
|
encode = sys.argv[2] == 'encode'
|
||||||
|
else:
|
||||||
|
encode = False
|
||||||
|
|
||||||
target = source.replace('.json', '.xml')
|
target = source.replace('.json', '.xml')
|
||||||
target_audio = source.replace('.json', '.audio.xml')
|
target_audio = source.replace('.json', '.audio.xml')
|
||||||
target_vocals = source.replace('.json', '.vocals.xml')
|
target_vocals = source.replace('.json', '.vocals.xml')
|
||||||
|
@ -52,9 +57,25 @@ def add_color(playlist, color, duration):
|
||||||
def add_clip(playlist, file_, in_, duration):
|
def add_clip(playlist, file_, in_, duration):
|
||||||
if not isinstance(file_, str):
|
if not isinstance(file_, str):
|
||||||
file_ = file_.encode('utf-8')
|
file_ = file_.encode('utf-8')
|
||||||
|
'''
|
||||||
|
info = ox.avinfo(file_)
|
||||||
|
tractor = mlt.Tractor()
|
||||||
|
tracks = tractor.multitrack()
|
||||||
|
video = mlt.Playlist()
|
||||||
|
'''
|
||||||
clip = mlt.Producer(profile, file_)
|
clip = mlt.Producer(profile, file_)
|
||||||
clip.set_in_and_out(in_, in_+duration-1)
|
clip.set_in_and_out(in_, in_+duration-1)
|
||||||
playlist.append(clip)
|
playlist.append(clip)
|
||||||
|
'''
|
||||||
|
video.append(clip)
|
||||||
|
tracks.connect(video, 0)
|
||||||
|
if not not info.get('audio'):
|
||||||
|
audio = mlt.Playlist()
|
||||||
|
add_silence(audio, duration)
|
||||||
|
tracks.connect(audio, 1)
|
||||||
|
#tracks.set_in_and_out(in_, in_+duration-1)
|
||||||
|
playlist.append(tractor)
|
||||||
|
'''
|
||||||
|
|
||||||
def add_audio_clip(playlist, file_, duration):
|
def add_audio_clip(playlist, file_, duration):
|
||||||
in_ = 0
|
in_ = 0
|
||||||
|
@ -82,9 +103,11 @@ def add_text(playlist, value, length):
|
||||||
|
|
||||||
|
|
||||||
for clip in data['clips']:
|
for clip in data['clips']:
|
||||||
|
frames = int(clip['duration'] * fps)
|
||||||
|
if not frames:
|
||||||
|
continue
|
||||||
if clip.get('black'):
|
if clip.get('black'):
|
||||||
# fixme seconds to fps! duration fame etc!!
|
# fixme seconds to fps! duration fame etc!!
|
||||||
frames = int(clip['duration'] * fps)
|
|
||||||
add_color(video, 'black', frames)
|
add_color(video, 'black', frames)
|
||||||
else:
|
else:
|
||||||
#print(clip['duration'], clip['path'])
|
#print(clip['duration'], clip['path'])
|
||||||
|
@ -93,20 +116,22 @@ for clip in data['clips']:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
# fixme seconds to fps!
|
# fixme seconds to fps!
|
||||||
in_ = int(clip['in'] * fps)
|
in_ = int(clip['in'] * fps)
|
||||||
frames = int(clip['duration'] * fps)
|
|
||||||
add_clip(video, clip['path'], in_, frames)
|
add_clip(video, clip['path'], in_, frames)
|
||||||
add_color(video, 'black', 60)
|
add_color(video, 'black', 60)
|
||||||
|
|
||||||
for clip in data['text']:
|
for clip in data['text']:
|
||||||
|
frames = int(clip['duration'] * fps)
|
||||||
|
if not frames:
|
||||||
|
continue
|
||||||
if clip.get('blank'):
|
if clip.get('blank'):
|
||||||
frames = int(clip['duration'] * fps)
|
|
||||||
add_blank(overlay, frames)
|
add_blank(overlay, frames)
|
||||||
else:
|
else:
|
||||||
frames = int(clip['duration'] * fps)
|
|
||||||
add_text(overlay, clip['text'], frames)
|
add_text(overlay, clip['text'], frames)
|
||||||
|
|
||||||
for clip in data['music']:
|
for clip in data['music']:
|
||||||
frames = int(clip['duration'] * fps)
|
frames = int(clip['duration'] * fps)
|
||||||
|
if not frames:
|
||||||
|
continue
|
||||||
if clip.get('blank'):
|
if clip.get('blank'):
|
||||||
add_silence(music, frames)
|
add_silence(music, frames)
|
||||||
else:
|
else:
|
||||||
|
@ -114,6 +139,8 @@ for clip in data['music']:
|
||||||
|
|
||||||
for clip in data['vocals']:
|
for clip in data['vocals']:
|
||||||
frames = int(clip['duration'] * fps)
|
frames = int(clip['duration'] * fps)
|
||||||
|
if not frames:
|
||||||
|
continue
|
||||||
if clip.get('blank'):
|
if clip.get('blank'):
|
||||||
add_silence(vocals, frames)
|
add_silence(vocals, frames)
|
||||||
else:
|
else:
|
||||||
|
@ -138,7 +165,7 @@ composite = mlt.Transition(profile, "composite")
|
||||||
tractor.plant_transition(composite)
|
tractor.plant_transition(composite)
|
||||||
|
|
||||||
volume = mlt.Filter(profile, "volume")
|
volume = mlt.Filter(profile, "volume")
|
||||||
volume.set("gain", '0.1')
|
volume.set("gain", '0.12')
|
||||||
tractor.plant_filter(volume)
|
tractor.plant_filter(volume)
|
||||||
|
|
||||||
def mix_audio_tracks(a, b, ratio):
|
def mix_audio_tracks(a, b, ratio):
|
||||||
|
@ -152,6 +179,8 @@ def mix_audio_tracks(a, b, ratio):
|
||||||
mix = mlt.Transition(profile, "mix")
|
mix = mlt.Transition(profile, "mix")
|
||||||
mix.set("start", ratio)
|
mix.set("start", ratio)
|
||||||
mix.set("end", ratio)
|
mix.set("end", ratio)
|
||||||
|
#mix.set("always_active", 1)
|
||||||
|
#mix.set("combine", 1)
|
||||||
tractor.plant_transition(mix)
|
tractor.plant_transition(mix)
|
||||||
return tractor
|
return tractor
|
||||||
|
|
||||||
|
@ -164,9 +193,15 @@ consumer.start()
|
||||||
# mix drones
|
# mix drones
|
||||||
drones = mix_audio_tracks(drones0, drones1, 0.5)
|
drones = mix_audio_tracks(drones0, drones1, 0.5)
|
||||||
|
|
||||||
|
|
||||||
# mix drones + music
|
# mix drones + music
|
||||||
mtractor = mix_audio_tracks(drones, music, 0.20)
|
mtractor = mix_audio_tracks(drones, music, 0.3)
|
||||||
atractor = mix_audio_tracks(vocals, mtractor, 0.20)
|
norm = mlt.Filter(profile, "volume")
|
||||||
|
norm.set("gain", "-12dB")
|
||||||
|
mtractor.plant_filter(norm)
|
||||||
|
|
||||||
|
# background and vocals
|
||||||
|
atractor = mix_audio_tracks(vocals, mtractor, 0.4)
|
||||||
|
|
||||||
consumer = mlt.Consumer(profile, 'xml', target_audio)
|
consumer = mlt.Consumer(profile, 'xml', target_audio)
|
||||||
consumer.connect(atractor)
|
consumer.connect(atractor)
|
||||||
|
@ -176,14 +211,17 @@ target_audio_wav = target_audio + '.wav'
|
||||||
subprocess.call([
|
subprocess.call([
|
||||||
'qmelt', target_audio, '-consumer', 'avformat:' + target_audio_wav,
|
'qmelt', target_audio, '-consumer', 'avformat:' + target_audio_wav,
|
||||||
])
|
])
|
||||||
audiomix = mlt.Producer(profile, target_audio_wav)
|
|
||||||
|
audiomix = mlt.Playlist()
|
||||||
|
duration = sum(clip['duration'] for clip in data['clips'])
|
||||||
|
add_audio_clip(audiomix, target_audio_wav, int(duration * fps))
|
||||||
|
|
||||||
# mix vocals and music
|
# mix vocals and music
|
||||||
#atractor = mix_audio_tracks(vocals, mtractor, 0.20)
|
#atractor = mix_audio_tracks(vocals, mtractor, 0.20)
|
||||||
|
|
||||||
# mix video + audio
|
# mix video + audio
|
||||||
#dtractor = mix_audio_tracks(atractor, tractor, 0.5)
|
#dtractor = mix_audio_tracks(atractor, tractor, 0.5)
|
||||||
dtractor = mix_audio_tracks(audiomix, tractor, 0.5)
|
dtractor = mix_audio_tracks(audiomix, tractor, 0.29)
|
||||||
|
|
||||||
|
|
||||||
output = mlt.Tractor()
|
output = mlt.Tractor()
|
||||||
|
@ -193,7 +231,8 @@ output_tracks = output.multitrack()
|
||||||
output_tracks.connect(dtractor, 0)
|
output_tracks.connect(dtractor, 0)
|
||||||
|
|
||||||
norm = mlt.Filter(profile, "volume")
|
norm = mlt.Filter(profile, "volume")
|
||||||
norm.set("gain", "-6dB")
|
#norm.set("gain", "-6dB")
|
||||||
|
norm.set("gain", "3dB")
|
||||||
output.plant_filter(norm)
|
output.plant_filter(norm)
|
||||||
|
|
||||||
consumer = 'xml'
|
consumer = 'xml'
|
||||||
|
@ -201,3 +240,6 @@ consumer = mlt.Consumer(profile, consumer, target)
|
||||||
consumer.connect(output)
|
consumer.connect(output)
|
||||||
#consumer.set("real_time", -2)
|
#consumer.set("real_time", -2)
|
||||||
consumer.start()
|
consumer.start()
|
||||||
|
|
||||||
|
if encode:
|
||||||
|
subprocess.call(['./encode.py', target])
|
||||||
|
|
Loading…
Reference in a new issue