render music/vocals

This commit is contained in:
j 2017-02-17 22:42:57 +01:00
parent afb4454bfc
commit 9dda2f1c34
2 changed files with 101 additions and 0 deletions

24
encode.py Executable file
View file

@ -0,0 +1,24 @@
#!/usr/bin/python3
import subprocess
import os
from glob import glob
for xml in glob('output/*.xml'):
mp4 = xml.replace('.xml', '.mp4')
if not os.path.exists(mp4) or os.path.getmtime(xml) > os.path.getmtime(mp4):
subprocess.call([
'qmelt', xml, '-consumer', 'avformat:' + mp4 + '.mp4', 'vcodec=libx264'
])
subprocess.call([
'qmelt', xml, '-consumer', 'avformat:' + mp4 + '.wav',
])
subprocess.call([
'ffmpeg', '-y',
'-i', mp4 + '.mp4',
'-i', mp4 + '.wav',
'-c:v', 'copy',
'-map', '0:v',
'-map', '1:a',
'-strict', '-2',
mp4
])

View file

@ -3,6 +3,7 @@ import os
import sys
import json
import subprocess
from collections import defaultdict
import string
from glob import glob
@ -14,6 +15,8 @@ import ox.web.auth
base_url = 'https://cdosea.0x2620.org'
FRAME_DURATION = 1/60
api = None
def get_api():
@ -32,6 +35,34 @@ if os.path.exists('CLIPS.json'):
else:
CLIPS = {}
if not os.path.exists('MUSIC.json'):
MUSIC = defaultdict(list)
for letter in os.listdir('music'):
for d in range(10):
path = os.path.join('music', letter, '%d.mp3' % d)
MUSIC[letter].append({
'path': path,
'duration': ox.avinfo(path)['duration']
})
with open('MUSIC.json', 'w') as fd:
json.dump(MUSIC, fd, indent=2)
else:
MUSIC = json.load(open('MUSIC.json'))
if not os.path.exists('VOCALS.json'):
VOCALS = defaultdict(list)
for letter in os.listdir('vocals'):
for fn in sorted(os.listdir(os.path.join('vocals', letter))):
path = os.path.join('vocals', letter, fn)
VOCALS[letter].append({
'path': path,
'duration': ox.avinfo(path)['duration']
})
with open('VOCALS.json', 'w') as fd:
json.dump(VOCALS, fd, indent=2)
else:
VOCALS = json.load(open('VOCALS.json'))
def get_path(id):
global PATHS
if id not in PATHS:
@ -81,6 +112,9 @@ def get_clips(tag):
'range': [0, 10000]})['data']['items']
for clip in clips:
clip['path'] = get_path(clip['id'].split('/')[0])
# or use round?
clip['in'] = int(clip['in'] / FRAME_DURATION) * FRAME_DURATION
clip['out'] = int(clip['out'] / FRAME_DURATION) * FRAME_DURATION
clip['duration'] = clip['out'] - clip['in']
clip['tag'] = tag
CLIPS[tag] = list(sorted(clips, key=lambda c: c['id']))
@ -147,6 +181,8 @@ def sequence(seq, letter):
result = {
'clips': [],
'text': [],
'vocals': [],
'music': [],
}
duration = 0
MAX_DURATION = 65 * 2
@ -175,6 +211,8 @@ def sequence(seq, letter):
for clip in result['clips']:
if seq() == 0:
clip['black'] = True
# text overlay
position = last_text = 0
tags_text = []
while position < duration:
@ -203,6 +241,45 @@ def sequence(seq, letter):
blank = {'blank': True, 'duration': duration - last_text}
result['text'].append(blank)
# music
if letter in MUSIC:
position = last_music = 0
while position < duration:
n = seq()
if n == 0:
blank = {'blank': True, 'duration': position - last_music}
result['music'].append(blank)
n = seq()
clip = MUSIC[letter][n]
position += clip['duration']
if position > duration and result['music'][-1].get('blank'):
result['music'][-1]['duration'] -= (position-duration)
position = duration
result['music'].append(clip)
last_music = position
else:
position += n
if last_music < duration:
blank = {'blank': True, 'duration': duration - last_music}
result['music'].append(blank)
# vocals
if letter in VOCALS:
n = seq()
clip = VOCALS[letter][n]
n = 1.0 / (seq() + 1) # 0.1 - 1
silence = duration - clip['duration']
silence_start = n * silence
blank = {'blank': True, 'duration': silence_start}
if n != 0:
result['vocals'].append(blank)
result['vocals'].append(clip)
if n != 1:
blank = {'blank': True, 'duration': silence - silence_start}
result['vocals'].append(blank)
return result
if __name__ == '__main__':