diff --git a/render_mlt.py b/render_mlt.py index 0560486..471f951 100755 --- a/render_mlt.py +++ b/render_mlt.py @@ -15,7 +15,11 @@ app = QtWidgets.QApplication(sys.argv) #mlt.mlt_log_set_level(40) # verbose mlt.Factory.init() -tractor = mlt.Tractor() +fps = 60 +profile = mlt.Profile("atsc_1080p_%d" % fps) +#profile.set_explicit(1) + +tractor = mlt.Tractor(profile) tractor.mark_in = -1 tractor.mark_out = -1 @@ -41,14 +45,6 @@ vocals = mlt.Playlist() drones0 = mlt.Playlist() drones1 = mlt.Playlist() -fps = 60 -profile = mlt.Profile("atsc_1080p_%d" % fps) -#profile.set_explicit(1) - -# get profile from clip -#clip = mlt.Producer(profile, 'test.mp4') -#profile.from_producer(clip) - def add_color(playlist, color, duration): red = mlt.Producer(profile, 'color:' + color) red.set_in_and_out(0, duration) @@ -59,7 +55,7 @@ def add_clip(playlist, file_, in_, duration): file_ = file_.encode('utf-8') ''' info = ox.avinfo(file_) - tractor = mlt.Tractor() + tractor = mlt.Tractor(profile) tracks = tractor.multitrack() video = mlt.Playlist() ''' @@ -169,7 +165,7 @@ volume.set("gain", '0.12') tractor.plant_filter(volume) def mix_audio_tracks(a, b, ratio): - tractor = mlt.Tractor() + tractor = mlt.Tractor(profile) tractor.mark_in = -1 tractor.mark_out = -1 @@ -224,7 +220,7 @@ add_audio_clip(audiomix, target_audio_wav, int(duration * fps)) dtractor = mix_audio_tracks(audiomix, tractor, 0.29) -output = mlt.Tractor() +output = mlt.Tractor(profile) tractor.mark_in = -1 tractor.mark_out = -1 output_tracks = output.multitrack()