This commit is contained in:
j 2017-05-16 12:59:51 +00:00
parent 130c790dc7
commit a407dfd1b5
5 changed files with 188 additions and 125 deletions

View File

@ -24,10 +24,11 @@ def get_videoduration(video):
def is_new(xml, mp4): def is_new(xml, mp4):
if not os.path.exists(mp4): if not os.path.exists(mp4):
return True return True
xtime = os.path.getmtime(xml) vtime = os.path.getmtime(mp4)
vtime = max( xtime = max(
os.path.getmtime(mp4), os.path.getmtime(xml),
os.path.getmtime('text.html'), os.path.getmtime('text.html'),
os.path.getmtime('encode.py'),
os.path.getmtime('DRONES.json'), os.path.getmtime('DRONES.json'),
os.path.getmtime('VOCALS.json'), os.path.getmtime('VOCALS.json'),
) )
@ -35,29 +36,76 @@ def is_new(xml, mp4):
def encode(xml, force=False): def encode(xml, force=False):
audio_xml = xml.replace('.xml', '.audio.xml') audio_xml = xml.replace('.xml', '.audio.xml')
vocals_xml = xml.replace('.xml', '.vocals.xml')
mp4 = xml.replace('.xml', '.mp4') mp4 = xml.replace('.xml', '.mp4')
mp4_480p = mp4.replace('.mp4', '.480p.mp4') mp4_480p = mp4.replace('.mp4', '.480p.mp4')
pre = mp4 + '.pre.mp4' video = mp4 + '.v.mov'
pre_480p = mp4_480p + '.pre.mp4' amix = mp4 + '.amix.mp4'
video = mp4 + '.v.mp4'
audio = mp4 + '.wav' audio = mp4 + '.wav'
if force or is_new(xml, mp4): vocals = mp4 + '.vocals.wav'
subprocess.call([ silence = 'silence_mono.wav'
'qmelt', xml, '-consumer', 'avformat:' + video, 'vcodec=libx264', 'strict=-2' left = video + '_left.wav'
]) right = video + '_right.wav'
public_mp4 = 'public/' + mp4.split('/')[-1][0].lower() + mp4.split('/')[-2] + '.1080p.mp4'
public_mp4_480p = public_mp4.replace('.1080p.mp4', '.480p.mp4')
if force or is_new(xml, public_mp4):
cmd = [
'qmelt', xml, '-consumer',
'avformat:' + video,
'vcodec=libx264',
'acodec=pcm_s16le'
]
subprocess.call(cmd)
duration = get_videoduration(video) duration = get_videoduration(video)
cmd = [
'ffmpeg', '-y', '-i', video,
'-map_channel', '0.1.0', left,
'-map_channel', '0.1.1', right,
]
subprocess.call(cmd)
cmd = [
'qmelt', vocals_xml, '-consumer',
'avformat:' + vocals,
'acodec=pcm_s16le',
'ac=1'
]
subprocess.call(cmd)
cmd = [
'ffmpeg', '-y',
'-i', left, # FL
'-i', right, # FR
'-i', vocals, # FC
'-i', silence, # LFE
'-i', vocals, # BL
'-i', vocals, # BR
'-filter_complex',
'[0:0][1:0][2:0][3:0][4:0][5:0] amerge=inputs=6[aout]',
'-map', "[aout]",
'-strict', '-2',
'-c:a', 'aac',
amix
]
subprocess.call(cmd)
os.unlink(left)
os.unlink(right)
os.unlink(vocals)
cmd = [ cmd = [
'ffmpeg', '-y', 'ffmpeg', '-y',
'-i', video, '-i', video,
'-i', amix,
'-t', duration,
'-c:a', 'copy', '-c:a', 'copy',
'-c:v', 'copy', '-c:v', 'copy',
'-t', duration, '-map', '0:v:0', '-map', '1:a:0',
'-movflags', '+faststart', '-movflags', '+faststart',
pre mp4
] ]
subprocess.call(cmd) subprocess.call(cmd)
os.unlink(video) os.unlink(video)
shutil.move(pre, mp4) os.unlink(amix)
cmd = [ cmd = [
'ffmpeg', '-y', 'ffmpeg', '-y',
'-i', mp4, '-i', mp4,
@ -68,14 +116,22 @@ def encode(xml, force=False):
'-b:v', '750k', '-b:v', '750k',
'-profile:v', 'high', '-profile:v', 'high',
'-movflags', '+faststart', '-movflags', '+faststart',
pre_480p mp4_480p
] ]
subprocess.call(cmd) subprocess.call(cmd)
shutil.move(pre_480p, mp4_480p) shutil.move(mp4, public_mp4)
shutil.move(mp4_480p, public_mp4_480p)
def encode_all(): def encode_all():
for xml in sorted(glob('output/*/*.xml')): for xml in sorted(glob('output/*/*.xml')):
if xml.endswith('.audio.xml') or xml.endswith('.vocals.xml'): parts = xml.split('.')
if len(parts) > 2 and parts[-2] in (
'audio',
'drones',
'music',
'source',
'vocals',
):
continue continue
encode(xml) encode(xml)

View File

@ -76,14 +76,13 @@ for letter in sorted(KEYWORDS):
print(size) print(size)
print('\t', len(buckets[size]), 'clips', len(bucket_tags), 'tags', 'durations from %.3f to %.3f' % (dmin, dmax)) print('\t', len(buckets[size]), 'clips', len(bucket_tags), 'tags', 'durations from %.3f to %.3f' % (dmin, dmax))
used_tags = [
'%s (%d)' % (t, bucket_tags[t])
for t in sorted(bucket_tags, key=lambda t: (-bucket_tags[t],t))
]
print('\t', 'used tags:', ', '.join(used_tags))
if set(letter_tags) - set(bucket_tags): if set(letter_tags) - set(bucket_tags):
used_tags = [ print('\t', 'missing tags:', ', '.join(sorted(set(letter_tags) - set(bucket_tags))))
'%s (%d)' % (t, bucket_tags[t])
for t in sorted(bucket_tags, key=lambda t: (-bucket_tags[t],t))
]
print('\t', 'used tags:', ', '.join(used_tags))
if set(letter_tags) - set(bucket_tags):
print('\t', 'missing tags:', ', '.join(sorted(set(letter_tags) - set(bucket_tags))))
for tag in sorted(known_tags): for tag in sorted(known_tags):

View File

@ -82,7 +82,8 @@ KEYWORDS = {
"geography", "geography",
"ghost", "spirit", "ghost", "spirit",
"guerillas", "guerillas",
"transmission" "transmission",
"gene z hanrahan"
], ],
"H": [ "H": [
"air-conditioner", "air-conditioner",

View File

@ -16,9 +16,11 @@ import ox.web.auth
base_url = 'http://127.0.0.1:2620' base_url = 'http://127.0.0.1:2620'
FRAME_DURATION = 1/60 FRAME_DURATION = 1/60
MAX_DURATION = 40
HIDDEN_TAGS = [ HIDDEN_TAGS = [
"women with white males" "women with white males",
"gene z hanrahan"
] ]
# items to not use at all # items to not use at all
@ -77,7 +79,7 @@ else:
if not os.path.exists('DRONES.json'): if not os.path.exists('DRONES.json'):
DRONES = defaultdict(list) DRONES = defaultdict(list)
prefix = 'drones' prefix = 'drones_wav'
for letter in os.listdir(prefix): for letter in os.listdir(prefix):
for fn in sorted(os.listdir(os.path.join(prefix, letter))): for fn in sorted(os.listdir(os.path.join(prefix, letter))):
path = os.path.join(prefix, letter, fn) path = os.path.join(prefix, letter, fn)
@ -146,7 +148,7 @@ def get_clips(tag):
clip['out'] = int(clip['out'] / FRAME_DURATION) * FRAME_DURATION clip['out'] = int(clip['out'] / FRAME_DURATION) * FRAME_DURATION
clip['duration'] = clip['out'] - clip['in'] clip['duration'] = clip['out'] - clip['in']
clip['tag'] = tag clip['tag'] = tag
clips = [clip for clip in clips if clip['duration']] clips = [clip for clip in clips if clip['duration'] and clip['duration'] <= MAX_DURATION]
for clip in clips: for clip in clips:
fduration = ox.avinfo(clip['path'])['duration'] fduration = ox.avinfo(clip['path'])['duration']
if clip['out'] > fduration: if clip['out'] > fduration:
@ -214,7 +216,17 @@ def filter_clips(clips, duration, max_duration=0):
clips_[clip['tag']].append(clip) clips_[clip['tag']].append(clip)
return clips_ return clips_
def add_blank(track, d):
if track and track[-1].get('blank'):
track[-1]['duration'] += d
else:
blank = {'blank': True, 'duration': d}
track.append(blank)
return d
def sequence(seq, letter): def sequence(seq, letter):
tags = KEYWORDS[letter] tags = KEYWORDS[letter]
clips = {tag: get_clips(tag) for tag in tags} clips = {tag: get_clips(tag) for tag in tags}
all_clips = clips.copy() all_clips = clips.copy()
@ -229,6 +241,11 @@ def sequence(seq, letter):
duration = 0 duration = 0
MAX_DURATION = 60 * 2 + 5 MAX_DURATION = 60 * 2 + 5
MIN_DURATION = 60 * 2 - 4 MIN_DURATION = 60 * 2 - 4
# add 1 black frame for sync playback
duration = 1 * FRAME_DURATION
result['clips'].append({'black': True, 'duration': duration})
while duration < MAX_DURATION and not duration >= MIN_DURATION: while duration < MAX_DURATION and not duration >= MIN_DURATION:
# clip duration: 1-10 # clip duration: 1-10
n = seq() n = seq()
@ -264,6 +281,10 @@ def sequence(seq, letter):
# text overlay # text overlay
position = last_text = 0 position = last_text = 0
tags_text = [] tags_text = []
# no overlay for the first 2 frames
position = last_text = add_blank(result['text'], 2 * FRAME_DURATION)
while position < duration: while position < duration:
n = seq() n = seq()
if n == 0: if n == 0:
@ -293,14 +314,6 @@ def sequence(seq, letter):
blank = {'blank': True, 'duration': duration - last_text} blank = {'blank': True, 'duration': duration - last_text}
result['text'].append(blank) result['text'].append(blank)
def add_blank(track, d):
if track and track[-1].get('blank'):
track[-1]['duration'] += d
else:
blank = {'blank': True, 'duration': d}
track.append(blank)
return d
position += d
# music # music
track = 'music' track = 'music'
@ -405,12 +418,12 @@ def sequence(seq, letter):
if result[track]: if result[track]:
tduration = sum([c['duration'] for c in result[track]]) tduration = sum([c['duration'] for c in result[track]])
if not abs(tduration - duration) < 0.000001: if not abs(tduration - duration) < 0.000001:
raise Exception('invalid duration %s vs %s %s' % (tduration, duration, result[track])) raise Exception('invalid duration on track: %s %s vs %s %s' % (track, tduration, duration, result[track]))
return result return result
if __name__ == '__main__': if __name__ == '__main__':
encode = len(sys.argv) < 2 or sys.argv[1] != 'json' render_xml = len(sys.argv) < 2 or sys.argv[1] != 'json'
for n in range(10): for n in range(10):
seq = random(n * 1000) seq = random(n * 1000)
#for letter in ('T', 'W'): #for letter in ('T', 'W'):
@ -431,7 +444,7 @@ if __name__ == '__main__':
if current != old: if current != old:
with open(tjson, 'w') as fd: with open(tjson, 'w') as fd:
fd.write(current) fd.write(current)
if encode: if render_xml:
if current != old or os.path.getmtime(tjson) < os.path.getmtime('render_mlt.py'): if current != old or os.path.getmtime(tjson) < os.path.getmtime('render_mlt.py'):
subprocess.call(['./render_mlt.py', tjson, 'encode']) subprocess.call(['./render_mlt.py', tjson])
#subprocess.call(['./render_audio.py', tjson]) #subprocess.call(['./render_audio.py', tjson])

View File

@ -15,35 +15,6 @@ app = QtWidgets.QApplication(sys.argv)
#mlt.mlt_log_set_level(40) # verbose #mlt.mlt_log_set_level(40) # verbose
mlt.Factory.init() mlt.Factory.init()
fps = 60
profile = mlt.Profile("atsc_1080p_%d" % fps)
#profile.set_explicit(1)
tractor = mlt.Tractor(profile)
tractor.mark_in = -1
tractor.mark_out = -1
multitrack = tractor.multitrack()
source = sys.argv[1]
if len(sys.argv) > 2:
encode = sys.argv[2] == 'encode'
else:
encode = False
target = source.replace('.json', '.xml')
target_audio = source.replace('.json', '.audio.xml')
target_vocals = source.replace('.json', '.vocals.xml')
with open(source) as fd:
data = json.load(fd)
video = mlt.Playlist()
overlay = mlt.Playlist()
music = mlt.Playlist()
vocals = mlt.Playlist()
drones0 = mlt.Playlist()
drones1 = mlt.Playlist()
def add_color(playlist, color, duration): def add_color(playlist, color, duration):
red = mlt.Producer(profile, 'color:' + color) red = mlt.Producer(profile, 'color:' + color)
@ -99,13 +70,66 @@ def add_text(playlist, value, length):
text.set('length', length) text.set('length', length)
playlist.append(text) playlist.append(text)
def mix_audio_tracks(a, b, ratio, combine=False):
tractor = mlt.Tractor(profile)
audio = tractor.multitrack()
audio.connect(a, 0)
audio.connect(b, 1)
mix = mlt.Transition(profile, "mix")
mix.set("start", ratio)
mix.set("end", ratio)
#mix.set("always_active", 1)
if combine:
mix.set("combine", 1)
tractor.plant_transition(mix)
return tractor
def save_xml(track, filename):
consumer = mlt.Consumer(profile, 'xml', filename)
consumer.connect(track)
consumer.start()
# main
fps = 60
profile = mlt.Profile("atsc_1080p_%d" % fps)
#profile.set_explicit(1)
source = sys.argv[1]
target = source.replace('.json', '.xml')
target_audio = source.replace('.json', '.audio.xml')
target_audio_wav = target_audio + '.wav'
target_vocals = source.replace('.json', '.vocals.xml')
target_music = source.replace('.json', '.music.xml')
target_drones = source.replace('.json', '.drones.xml')
target_source = source.replace('.json', '.source.xml')
with open(source) as fd:
data = json.load(fd)
video = mlt.Playlist()
overlay = mlt.Playlist()
music = mlt.Playlist()
vocals = mlt.Playlist()
drones0 = mlt.Playlist()
drones1 = mlt.Playlist()
# hide Set to 1 to hide the video (make it an audio-only track),
# 2 to hide the audio (make it a video-only track),
# or 3 to hide audio and video (hidden track).
drones0.set("hide", 1)
drones1.set("hide", 1)
vocals.set("hide", 1)
music.set("hide", 1)
for clip in data['clips']: for clip in data['clips']:
frames = int(clip['duration'] * fps) frames = int(clip['duration'] * fps)
if not frames: if not frames:
continue continue
if clip.get('black'): if clip.get('black'):
# fixme seconds to fps! duration fame etc!!
add_color(video, 'black', frames) add_color(video, 'black', frames)
else: else:
#print(clip['duration'], clip['path']) #print(clip['duration'], clip['path'])
@ -151,95 +175,65 @@ for name, plist in (
for clip in data[name]: for clip in data[name]:
frames = int(clip['duration'] * fps) frames = int(clip['duration'] * fps)
if clip.get('blank'): if clip.get('blank'):
add_blank(plist, frames) add_silence(plist, frames)
else: else:
add_audio_clip(plist, clip['path'], frames) add_audio_clip(plist, clip['path'], frames)
multitrack.connect(video, 0) save_xml(vocals, target_vocals)
multitrack.connect(overlay, 1) video.set("hide", 1)
composite = mlt.Transition(profile, "composite") save_xml(video, target_source)
#composite.set('fill', 1) video.set("hide", 0)
tractor.plant_transition(composite)
'''
volume = mlt.Filter(profile, "volume")
volume.set("gain", '0.12')
tractor.plant_filter(volume)
'''
def mix_audio_tracks(a, b, ratio):
tractor = mlt.Tractor(profile)
tractor.mark_in = -1
tractor.mark_out = -1
audio = tractor.multitrack()
audio.connect(a, 0)
audio.connect(b, 1)
mix = mlt.Transition(profile, "mix")
mix.set("start", ratio)
mix.set("end", ratio)
#mix.set("always_active", 1)
#mix.set("combine", 1)
tractor.plant_transition(mix)
return tractor
consumer = 'xml'
consumer = mlt.Consumer(profile, consumer, target_vocals)
consumer.connect(vocals)
consumer.start()
# mix drones # mix drones
drones = mix_audio_tracks(drones0, drones1, 0.5) drones = mix_audio_tracks(drones0, drones1, 0.5)
save_xml(drones, target_drones)
save_xml(music, target_music)
# mix drones + music # mix drones + music
mtractor = mix_audio_tracks(drones, music, 0.3) mtractor = mix_audio_tracks(drones, music, 0.3)
norm = mlt.Filter(profile, "volume") norm = mlt.Filter(profile, "volume")
# lower volume
norm.set("gain", "-12dB") norm.set("gain", "-12dB")
mtractor.plant_filter(norm) mtractor.plant_filter(norm)
# background and vocals # background and vocals
atractor = mix_audio_tracks(vocals, mtractor, 0.4)
consumer = mlt.Consumer(profile, 'xml', target_audio) # vocals are on extra track now
consumer.connect(atractor) #atractor = mix_audio_tracks(vocals, mtractor, 0.4)
consumer.start() atractor = mtractor
target_audio_wav = target_audio + '.wav' save_xml(atractor, target_audio)
'''
'''
subprocess.call([ subprocess.call([
'qmelt', target_audio, '-consumer', 'avformat:' + target_audio_wav, 'qmelt', target_audio, '-consumer', 'avformat:' + target_audio_wav,
]) ])
audiomix = mlt.Playlist() audiomix = mlt.Playlist()
duration = sum(clip['duration'] for clip in data['clips']) duration = sum(clip['duration'] for clip in data['clips'])
add_audio_clip(audiomix, target_audio_wav, int(duration * fps)) add_audio_clip(audiomix, target_audio_wav, int(duration * fps))
# mix vocals and music
#atractor = mix_audio_tracks(vocals, mtractor, 0.20)
# mix video + audio # mix video + audio
#dtractor = mix_audio_tracks(atractor, tractor, 0.5) #tractor = mix_audio_tracks(atractor, video, 0.29)
dtractor = mix_audio_tracks(audiomix, tractor, 0.29)
# with vocals to background 0.4 -> 0.29
# with vocals as extra track 0.725
#tractor = mix_audio_tracks(audiomix, video, 0.5)
tractor = mix_audio_tracks(audiomix, video, 0.6)
output = mlt.Tractor(profile) output = mlt.Tractor(profile)
tractor.mark_in = -1
tractor.mark_out = -1
output_tracks = output.multitrack() output_tracks = output.multitrack()
output_tracks.connect(dtractor, 0) output_tracks.connect(tractor, 0)
output_tracks.connect(overlay, 1)
norm = mlt.Filter(profile, "volume") norm = mlt.Filter(profile, "volume")
#norm.set("gain", "-6dB") #norm.set("gain", "-6dB")
norm.set("gain", "3dB") norm.set("gain", "3dB")
output.plant_filter(norm) output.plant_filter(norm)
consumer = 'xml' composite = mlt.Transition(profile, "composite")
consumer = mlt.Consumer(profile, consumer, target) #composite.set('fill', 1)
consumer.connect(output) output.plant_transition(composite)
#consumer.set("real_time", -2)
consumer.start()
if encode: save_xml(output, target)
subprocess.call(['./encode.py', target])