render updates

This commit is contained in:
j 2026-01-22 12:24:32 +01:00
commit f6fbb9ab81
2 changed files with 110 additions and 3 deletions

View file

@ -187,16 +187,32 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
if clip.get('volume') is not None:
volume_front = '%0.2f' % (float(volume_front) + clip['volume'])
volume_rear = '%0.2f' % (float(volume_rear) + clip['volume'])
audio_filter = {
'mono': [
["channels", "2"],
],
'dynamic_loudness': [
["target_loudness", "-35"],
["min_gain", "-15"],
["max_gin", "15"],
],
'volume': volume_front,
'fadein': '00:00:00.125'
}
scene['audio-front']['A2'].append({
'duration': clip['duration'],
'src': audio,
'filter': {'volume': volume_front},
'filter': audio_filter.copy()
})
'''
audio_filter['volume'] = volume_rear
scene['audio-rear']['A2'].append({
'duration': clip['duration'],
'src': audio,
'filter': {'volume': volume_rear},
'filter': audio_filter.copy()
})
'''
used.append(clip)
print("scene duration %0.3f (target: %0.3f, vo_min: %0.3f)" % (length, target, vo_min))
scene_duration = int(get_scene_duration(scene) * fps)
@ -309,6 +325,8 @@ def get_fragments(clips, voice_over, prefix):
fragment['tags'].append(sub['value'])
elif sub['key'] == "tags" and sub['operator'] == '!=':
fragment['tags'].append(sub['value'])
elif sub['key'] == 'type' and sub['value'] in ('source', ''):
pass
else:
print(l.name, 'unknown sub condition', sub)
elif con.get('key') == "tags" and con['operator'] == '==':
@ -829,6 +847,9 @@ def generate_clips(options):
else:
clip[type_] = target
durations.append(e.files.filter(selected=True)[0].duration)
if not durations:
print(i.public_id, 'no duration!', clip)
continue
clip["duration"] = min(durations)
# trim to a multiple of the output fps
d1 = int(clip["duration"] * 24) / 24
@ -849,7 +870,7 @@ def generate_clips(options):
seqid = re.sub(r"Night March_(\d+)", "S\\1_", seqid)
seqid = re.sub(r"_(\d+)H_(\d+)", "_S\\1\\2_", seqid)
seqid = seqid.split('_')[:2]
seqid = [b[1:] if b[0] in ('B', 'S') else '0' for b in seqid]
seqid = [b[1:] if b[:1] in ('B', 'S') else '0' for b in seqid]
seqid[1] = resolve_roman(seqid[1])
seqid[1] = ''.join([b for b in seqid[1] if b.isdigit()])
if not seqid[1]:

86
render_sound.py Normal file
View file

@ -0,0 +1,86 @@
import os
import subprocess
import ox
import itemlist.models
import item.models
from .render_kdenlive import KDEnliveProject, _CACHE
from .render import default_prefix as root
def render_music():
pass
def render_forest():
# Stereo Mix, playing on 5.1 rear left/right
project = KDEnliveProject(root)
qs = item.models.Item.objects.filter(
data__type__icontains='Forest'
).order_by('sort__title')
for clip in qs:
src = clip.files.all()[0].data.path
project.append_clip('A1', {
"src": src,
"duration": clip.sort.duration,
"filter": {
},
})
path = os.path.join(root, "forest.kdenlive")
with open(path, 'w') as fd:
fd.write(project.to_xml())
cmds = []
cmds.append([
"melt", "forest.kdenlive", '-quiet', '-consumer', 'avformat:forest.wav'
])
info = ox.avinfo('forest.wav')
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-f", "lavfi", "-i", "anullsrc=r=48000:cl=mono",
"-t", str(info["duration"]),
"forest_silence.wav"
])
for src, out1, out2 in (
('forest.wav', "forest_left.wav", "forest_right.wav"),
):
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", src,
"-filter_complex",
"[0:0]pan=1|c0=c0[left]; [0:0]pan=1|c0=c1[right]",
"-map", "[left]", out1,
"-map", "[right]", out2,
])
cmds.append([
"ffmpeg", "-y",
"-nostats", "-loglevel", "error",
"-i", "silence.wav",
"-i", "silence.wav",
"-i", "silence.wav",
"-i", "silence.wav",
"-i", "forest_left.wav",
"-i", "forest_right.wav",
"-filter_complex", "[0:a][1:a][2:a][3:a][4:a][5:a]amerge=inputs=6[a]",
"-map", "[a]",
"-ar", "48000",
"-c:a", "aac", "render/forest-5.1.mp4"
])
os.chdir(root)
for cmd in cmds:
print(" ".join([str(x) for x in cmd]))
subprocess.call(cmd)
for name in (
"forest.wav",
"forest_left.wav",
"forest_right.wav",
"silence.wav",
):
if os.path.exists(name):
os.unlink(name)