From baf6625bde6a2b1f5772716e5db54ae5b5f382fb Mon Sep 17 00:00:00 2001 From: j Date: Tue, 3 Oct 2017 14:12:07 +0000 Subject: [PATCH] tune gongs --- render.py | 12 +++++++++--- render_mlt.py | 34 +++++++++++++++++++++------------- 2 files changed, 30 insertions(+), 16 deletions(-) diff --git a/render.py b/render.py index 9556cf5..2d20ab0 100755 --- a/render.py +++ b/render.py @@ -348,6 +348,7 @@ def sequence(seq, letter): if letter in VOCALS: position = 0 loop = 0 + used = [] while position < duration: n = seq() # vocals should start after one blank @@ -358,8 +359,12 @@ def sequence(seq, letter): n = seq() / 2 # (0 - 5 seconds) position += add_blank(result[track], min(n, duration-position)) else: - n = seq() - clip = VOCALS[letter][n] + clip = None + if len(used) == len(VOCALS[letter]): + break + while clip is None or clip['path'] in used: + n = seq() + clip = VOCALS[letter][n] position += clip['duration'] if result[track] and position > duration \ and result[track][-1].get('blank') \ @@ -369,6 +374,7 @@ def sequence(seq, letter): if not result[track][-1]['duration']: result[track].pop(-1) if position <= duration: + used.append(clip['path']) result[track].append(clip) else: position -= clip['duration'] @@ -458,5 +464,5 @@ if __name__ == '__main__': fd.write(current) if render_xml: if current != old or os.path.getmtime(tjson) < os.path.getmtime('render_mlt.py'): - subprocess.call(['./render_mlt.py', '--prefix', prefix, tjson]) + subprocess.call(['./render_mlt.py', '--prefix', opts.prefix, tjson]) #subprocess.call(['./render_audio.py', tjson]) diff --git a/render_mlt.py b/render_mlt.py index 4e06fb0..2dcf268 100755 --- a/render_mlt.py +++ b/render_mlt.py @@ -13,10 +13,10 @@ from PyQt5 import QtWidgets usage = "usage: %(prog)s [options] json" parser = ArgumentParser(usage=usage) parser.add_argument('-p', '--prefix', dest='prefix', help='version prefix', default='.') -parser.add_argument('files', metavar='path', type=str, nargs='*', help='json files') +parser.add_argument('files', metavar='path', type=str, nargs='*', help='json source file') opts = parser.parse_args() -if opts.prefix.endswith('performance'): +if 'performance' in opts.prefix: version = 'performance' else: version = 'main' @@ -111,7 +111,7 @@ fps = 60 profile = mlt.Profile("atsc_1080p_%d" % fps) #profile.set_explicit(1) -source = sys.argv[1] +source = opts.files[0] target = source.replace('.json', '.xml') target_audio = source.replace('.json', '.audio.xml') @@ -212,26 +212,34 @@ save_xml(music, target_music) if version == 'performance': # render gongs - subprocess.call([ - # offset in pi, duration, number of tracks, target - './render_gongs.py', - str(data['gongs']['offset']), - str(duration), - str(data['gongs']['tracks']), - gongs_wav - ]) + render_gongs = not os.path.exists(gongs_wav) + if not render_gongs: + render_gongs = os.path.getmtime(source) > os.path.getmtime(gongs_wav) + if render_gongs: + subprocess.call([ + # offset in pi, duration, number of tracks, target + './render_gongs.py', + str(data['gongs']['offset']), + str(duration), + str(data['gongs']['tracks']), + gongs_wav + ]) + # load gongs add_audio_clip(gongs, gongs_wav, int(duration * fps), int(5*fps)) # mix gongs + music - mtractor = mix_audio_tracks(gongs, music, 0.3) + mtractor = mix_audio_tracks(gongs, music, 0.15) else: # mix drones + music mtractor = mix_audio_tracks(drones, music, 0.3) norm = mlt.Filter(profile, "volume") # lower volume -norm.set("gain", "-12dB") +if version == 'performance': + norm.set("gain", "-6dB") +else: + norm.set("gain", "-12dB") mtractor.plant_filter(norm) # background and vocals