better random

This commit is contained in:
j 2023-11-12 18:30:06 +01:00
parent 3b9d8f7275
commit 88a8b84d04

View file

@ -1,4 +1,5 @@
#!/usr/bin/python3 #!/usr/bin/python3
from collections import defaultdict
import json import json
import os import os
import subprocess import subprocess
@ -12,28 +13,28 @@ from .pi import random
from .render_kdenlive import KDEnliveProject, _CACHE from .render_kdenlive import KDEnliveProject, _CACHE
def random_choice(seq, items, pop=False): def random_int(seq, length):
n = n_ = len(items) - 1 n = n_ = length - 1
#print('len', n) #print('len', n)
if n == 0: if n == 0:
if pop: return n
return items.pop(n) r = seq() / 9 * 10
return items[n]
r = seq()
base = 10 base = 10
while n > 10: while n > 10:
n /= 10 n /= 10
#print(r) r += seq() / 9 * 10
r += seq()
base += 10 base += 10
r = int(n_ * r / base) r = int(round(n_ * r / base))
#print('result', r, items) return r
def random_choice(seq, items, pop=False):
n = random_int(seq, len(items))
if pop: if pop:
return items.pop(r) return items.pop(n)
return items[r] return items[n]
def chance(seq, chance): def chance(seq, chance):
return (seq() / 10) >= chance return (seq() / 9) >= chance
def get_clip_by_seqid(clips, seqid): def get_clip_by_seqid(clips, seqid):
selected = None selected = None
@ -45,7 +46,6 @@ def get_clip_by_seqid(clips, seqid):
return clips.pop(i) return clips.pop(i)
return None return None
def compose(clips, target=150, base=1024, voice_over=None): def compose(clips, target=150, base=1024, voice_over=None):
length = 0 length = 0
scene = { scene = {
@ -77,21 +77,23 @@ def compose(clips, target=150, base=1024, voice_over=None):
}, },
} }
all_clips = clips.copy() all_clips = clips.copy()
seq = random(base) seq = random(10000 + base * 1000)
used = [] used = []
voice_overs = [] voice_overs = []
if voice_over: if voice_over:
vo_keys = list(voice_over) vo_keys = list(sorted(voice_over))
if chance(seq, 0.5): if chance(seq, 0.5):
voice_overs.append(voice_over[vo_keys[chance(seq, len(vo_keys))]]) vo_key = vo_keys[random_int(seq, len(vo_keys))]
voice_overs.append(voice_over[vo_key])
elif len(vo_keys) >= 2: elif len(vo_keys) >= 2:
vo1 = vo_keys.pop(chance(seq, len(vo_keys))) vo1 = vo_keys.pop(random_int(seq, len(vo_keys)))
vo2 = vo_keys.pop(chance(seq, len(vo_keys))) vo2 = vo_keys.pop(random_int(seq, len(vo_keys)))
voice_overs.append(voice_over[vo1]) voice_overs.append(voice_over[vo1])
if voice_over[vo1]["duration"] + voice_over[vo2]["duration"] < target: if voice_over[vo1]["duration"] + voice_over[vo2]["duration"] < target:
print("adding second vo") print("adding second vo")
voice_overs.append(voice_over[vo2]) voice_overs.append(voice_over[vo2])
print("vo:", [x['src'] for x in voice_overs], list(sorted(voice_over)))
vo_min = sum([vo['duration'] for vo in voice_overs]) vo_min = sum([vo['duration'] for vo in voice_overs])
if vo_min > target: if vo_min > target:
target = vo_min target = vo_min
@ -108,10 +110,15 @@ def compose(clips, target=150, base=1024, voice_over=None):
vo_min += offset vo_min += offset
for vo in voice_overs: for vo in voice_overs:
voc = vo.copy() voc = vo.copy()
voc['filter'] = {'volume': '3'} a, b = '3', '-6'
if 'Whispered' in voc['src']:
a, b = '4', '-5'
elif 'Read' in voc['src']:
a, b = '6', '-3'
voc['filter'] = {'volume': a}
scene['audio-center']['A1'].append(voc) scene['audio-center']['A1'].append(voc)
vo_low = vo.copy() vo_low = vo.copy()
vo_low['filter'] = {'volume': '-6'} vo_low['filter'] = {'volume': b}
scene['audio-rear']['A1'].append(vo_low) scene['audio-rear']['A1'].append(vo_low)
clip = None clip = None
@ -235,10 +242,12 @@ def compose(clips, target=150, base=1024, voice_over=None):
scene['audio-front']['A2'].append({ scene['audio-front']['A2'].append({
'duration': clip['duration'], 'duration': clip['duration'],
'src': foley, 'src': foley,
'filter': {'volume': '-4'},
}) })
scene['audio-rear']['A2'].append({ scene['audio-rear']['A2'].append({
'duration': clip['duration'], 'duration': clip['duration'],
'src': foley, 'src': foley,
'filter': {'volume': '-4'},
}) })
used.append(clip) used.append(clip)
print("scene duration %0.3f (target: %0.3f, vo_min: %0.3f)" % (length, target, vo_min)) print("scene duration %0.3f (target: %0.3f, vo_min: %0.3f)" % (length, target, vo_min))
@ -284,7 +293,6 @@ def render(root, scene, prefix=''):
def get_fragments(clips, voice_over, prefix): def get_fragments(clips, voice_over, prefix):
import itemlist.models import itemlist.models
import item.models import item.models
from collections import defaultdict
fragments = [] fragments = []
@ -351,6 +359,8 @@ def render_all(options):
target = fragment_target = duration / len(fragments) target = fragment_target = duration / len(fragments)
base_prefix = os.path.join(prefix, 'render', str(base)) base_prefix = os.path.join(prefix, 'render', str(base))
clips_used = [] clips_used = []
stats = defaultdict(lambda: 0)
for fragment in fragments: for fragment in fragments:
fragment_id = int(fragment['name'].split(' ')[0]) fragment_id = int(fragment['name'].split(' ')[0])
name = fragment['name'].replace(' ', '_') name = fragment['name'].replace(' ', '_')
@ -368,6 +378,9 @@ def render_all(options):
clips_used += used clips_used += used
scene_duration = get_scene_duration(scene) scene_duration = get_scene_duration(scene)
print("%s %6.3f -> %6.3f (%6.3f)" % (name, target, scene_duration, fragment_target)) print("%s %6.3f -> %6.3f (%6.3f)" % (name, target, scene_duration, fragment_target))
src = [a for a in scene['audio-rear']['A1'] if 'src' in a][0]['src']
stats[src.split('/')[-2]] += 1
position += scene_duration position += scene_duration
target_position += fragment_target target_position += fragment_target
if position > target_position: if position > target_position:
@ -482,5 +495,6 @@ def render_all(options):
os.unlink(fn) os.unlink(fn)
print("Duration - Target: %s Actual: %s" % (target_position, position)) print("Duration - Target: %s Actual: %s" % (target_position, position))
print(json.dumps(dict(stats), sort_keys=True, indent=2))
with open(_cache, "w") as fd: with open(_cache, "w") as fd:
json.dump(_CACHE, fd) json.dump(_CACHE, fd)