tune gongs

This commit is contained in:
j 2017-10-03 14:12:07 +00:00
parent 6aa6b9bab0
commit baf6625bde
2 changed files with 30 additions and 16 deletions

View file

@ -348,6 +348,7 @@ def sequence(seq, letter):
if letter in VOCALS: if letter in VOCALS:
position = 0 position = 0
loop = 0 loop = 0
used = []
while position < duration: while position < duration:
n = seq() n = seq()
# vocals should start after one blank # vocals should start after one blank
@ -358,8 +359,12 @@ def sequence(seq, letter):
n = seq() / 2 # (0 - 5 seconds) n = seq() / 2 # (0 - 5 seconds)
position += add_blank(result[track], min(n, duration-position)) position += add_blank(result[track], min(n, duration-position))
else: else:
n = seq() clip = None
clip = VOCALS[letter][n] if len(used) == len(VOCALS[letter]):
break
while clip is None or clip['path'] in used:
n = seq()
clip = VOCALS[letter][n]
position += clip['duration'] position += clip['duration']
if result[track] and position > duration \ if result[track] and position > duration \
and result[track][-1].get('blank') \ and result[track][-1].get('blank') \
@ -369,6 +374,7 @@ def sequence(seq, letter):
if not result[track][-1]['duration']: if not result[track][-1]['duration']:
result[track].pop(-1) result[track].pop(-1)
if position <= duration: if position <= duration:
used.append(clip['path'])
result[track].append(clip) result[track].append(clip)
else: else:
position -= clip['duration'] position -= clip['duration']
@ -458,5 +464,5 @@ if __name__ == '__main__':
fd.write(current) fd.write(current)
if render_xml: if render_xml:
if current != old or os.path.getmtime(tjson) < os.path.getmtime('render_mlt.py'): if current != old or os.path.getmtime(tjson) < os.path.getmtime('render_mlt.py'):
subprocess.call(['./render_mlt.py', '--prefix', prefix, tjson]) subprocess.call(['./render_mlt.py', '--prefix', opts.prefix, tjson])
#subprocess.call(['./render_audio.py', tjson]) #subprocess.call(['./render_audio.py', tjson])

View file

@ -13,10 +13,10 @@ from PyQt5 import QtWidgets
usage = "usage: %(prog)s [options] json" usage = "usage: %(prog)s [options] json"
parser = ArgumentParser(usage=usage) parser = ArgumentParser(usage=usage)
parser.add_argument('-p', '--prefix', dest='prefix', help='version prefix', default='.') parser.add_argument('-p', '--prefix', dest='prefix', help='version prefix', default='.')
parser.add_argument('files', metavar='path', type=str, nargs='*', help='json files') parser.add_argument('files', metavar='path', type=str, nargs='*', help='json source file')
opts = parser.parse_args() opts = parser.parse_args()
if opts.prefix.endswith('performance'): if 'performance' in opts.prefix:
version = 'performance' version = 'performance'
else: else:
version = 'main' version = 'main'
@ -111,7 +111,7 @@ fps = 60
profile = mlt.Profile("atsc_1080p_%d" % fps) profile = mlt.Profile("atsc_1080p_%d" % fps)
#profile.set_explicit(1) #profile.set_explicit(1)
source = sys.argv[1] source = opts.files[0]
target = source.replace('.json', '.xml') target = source.replace('.json', '.xml')
target_audio = source.replace('.json', '.audio.xml') target_audio = source.replace('.json', '.audio.xml')
@ -212,26 +212,34 @@ save_xml(music, target_music)
if version == 'performance': if version == 'performance':
# render gongs # render gongs
subprocess.call([ render_gongs = not os.path.exists(gongs_wav)
# offset in pi, duration, number of tracks, target if not render_gongs:
'./render_gongs.py', render_gongs = os.path.getmtime(source) > os.path.getmtime(gongs_wav)
str(data['gongs']['offset']), if render_gongs:
str(duration), subprocess.call([
str(data['gongs']['tracks']), # offset in pi, duration, number of tracks, target
gongs_wav './render_gongs.py',
]) str(data['gongs']['offset']),
str(duration),
str(data['gongs']['tracks']),
gongs_wav
])
# load gongs # load gongs
add_audio_clip(gongs, gongs_wav, int(duration * fps), int(5*fps)) add_audio_clip(gongs, gongs_wav, int(duration * fps), int(5*fps))
# mix gongs + music # mix gongs + music
mtractor = mix_audio_tracks(gongs, music, 0.3) mtractor = mix_audio_tracks(gongs, music, 0.15)
else: else:
# mix drones + music # mix drones + music
mtractor = mix_audio_tracks(drones, music, 0.3) mtractor = mix_audio_tracks(drones, music, 0.3)
norm = mlt.Filter(profile, "volume") norm = mlt.Filter(profile, "volume")
# lower volume # lower volume
norm.set("gain", "-12dB") if version == 'performance':
norm.set("gain", "-6dB")
else:
norm.set("gain", "-12dB")
mtractor.plant_filter(norm) mtractor.plant_filter(norm)
# background and vocals # background and vocals