diff --git a/render_mlt.py b/render_mlt.py index 471f951..2d7c747 100755 --- a/render_mlt.py +++ b/render_mlt.py @@ -50,28 +50,30 @@ def add_color(playlist, color, duration): red.set_in_and_out(0, duration) playlist.append(red) -def add_clip(playlist, file_, in_, duration): +def add_clip(playlist, clip, in_, duration): + file_ = clip['path'] if not isinstance(file_, str): file_ = file_.encode('utf-8') - ''' info = ox.avinfo(file_) tractor = mlt.Tractor(profile) tracks = tractor.multitrack() video = mlt.Playlist() - ''' - clip = mlt.Producer(profile, file_) - clip.set_in_and_out(in_, in_+duration-1) - playlist.append(clip) - ''' - video.append(clip) + c = mlt.Producer(profile, file_) + c.set_in_and_out(in_, in_+duration-1) + video.append(c) tracks.connect(video, 0) - if not not info.get('audio'): + if not info.get('audio'): audio = mlt.Playlist() add_silence(audio, duration) tracks.connect(audio, 1) - #tracks.set_in_and_out(in_, in_+duration-1) + else: + volume = mlt.Filter(profile, "volume") + if clip.get('tag', '') == 'gong': + volume.set("gain", '0.8') + else: + volume.set("gain", '0.12') + tractor.plant_filter(volume) playlist.append(tractor) - ''' def add_audio_clip(playlist, file_, duration): in_ = 0 @@ -112,7 +114,7 @@ for clip in data['clips']: sys.exit(1) # fixme seconds to fps! in_ = int(clip['in'] * fps) - add_clip(video, clip['path'], in_, frames) + add_clip(video, clip, in_, frames) add_color(video, 'black', 60) for clip in data['text']: @@ -160,9 +162,11 @@ composite = mlt.Transition(profile, "composite") tractor.plant_transition(composite) +''' volume = mlt.Filter(profile, "volume") volume.set("gain", '0.12') tractor.plant_filter(volume) +''' def mix_audio_tracks(a, b, ratio): tractor = mlt.Tractor(profile)