Compare commits

...

3 commits

Author SHA1 Message Date
j
2b1e2e566b find tags 2026-01-25 00:19:10 +01:00
j
55bfb769cd fixes 2026-01-25 00:18:46 +01:00
j
06e099e797 multiple versions of same type 2026-01-24 23:25:53 +01:00
3 changed files with 44 additions and 13 deletions

View file

@ -253,6 +253,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"autocomplete": true,
"columnWidth": 180,
"filter": true,
"find": true,
"sort": true
},
{

View file

@ -575,11 +575,15 @@ def replace_character_motion_control(item, character, keep=False):
return ai
def describe_video(url, neutral=False):
def describe_video(url, neutral=False, face=False):
if face:
extra = 'describe the facial expression to allow an actor to recreate the scene, '
else:
extra = ''
if neutral:
prompt = (
"Detect cuts or scene changes and describe each scene, use as much details as you can. "
"Describe each person incudling detalied apreance, haircut in a gender neutral way, "
f"Describe each person incudling detalied apreance, haircut in a gender neutral way, {extra}"
"describe each objects, animal or plant, describe foreground and backgroud, "
"describe from what angle the scene is filmed, incude details about camera model, lense, depth of field used to film this scene. "
"Use the format: <description of scene 1>. CAMERA CUT TO <description of scene 2>. CAMERA CUT TO <description of scene 3>. "
@ -616,15 +620,15 @@ def describe_item(item, neutral=False):
return describe_video(video_url, neutral)
def reshoot_item(item, extra_prompt=None, first_frame=None, keep=False):
def reshoot_item(item, extra_prompt=None, first_frame=None, keep=False, prompt=None):
if isinstance(item, str):
item = Item.objects.get(public_id=item)
if isinstance(first_frame, Document):
first_frame = public_document_url(first_frame)
duration = item.sort.duration
frames = int(duration * 24)
if prompt is None:
prompt = describe_item(item, first_frame is not None)
if extra_prompt:
prompt += " " + extra_prompt
prompt_hash = hashlib.sha1((prompt).encode()).hexdigest()
@ -719,8 +723,6 @@ def reshoot_item_segments(item, character, keep=False):
ai.data["model"] = status["model"]
ai.data["seed"] = seed
ai.save()
if not keep:
shutil.rmtree(os.path.dirname(joined_output))
for first_frame in first_frames:
first_frame.add(ai)
if not keep:
@ -1146,6 +1148,7 @@ def process_motion_firstframe(character="P1", keep=False):
try:
replace_character_motion_control(i, character, keep=keep)
except:
i.refresh_from_db()
add_tag(i, 'ai-failed')
print('>> failed', i)
@ -1159,10 +1162,31 @@ def extract_firstframe(character='P1'):
try:
first_frame = replace_character(item, character, 0)
except:
item.refresh_from_db()
add_tag(item, 'ai-failed')
def process_reshoot_firstframe():
l = itemlist.models.List.objects.get(name='Reshoot-Firstframe')
for i in l.items.all():
if i.sort.duration > 30: continue
if i.public_id == 'HZ': continue
if i.documents.all().count():
ai = Item.objects.filter(data__type__icontains='ai').filter(data__title=i.data['title'])
if ai.exists() or 'ai-failed' in i.data.get('tags', []):
print('>> skip', i)
continue
first_frame = i.documents.all().order_by('-created').first()
if not first_frame:
first_frame = replace_character(i, 'P1', 0)
print(i, first_frame, i.documents.all().count())
try:
reshoot_item(i, first_frame=first_frame)
except:
add_tag(i, 'ai-failed')
print('>> failed', i)
def process_motion_firstframe():
l = itemlist.models.List.objects.get(name='Motion-Firstframe')
for i in l.items.all():
if i.sort.duration > 30: continue
if i.public_id == 'HZ': continue

View file

@ -873,20 +873,26 @@ def generate_clips(options):
source = e.files.filter(selected=True)[0].data.path
ext = os.path.splitext(source)[1]
type_ = e.data['type'][0].lower()
target = os.path.join(prefix, 'video', type_, i.data['title'] + ext)
os.makedirs(os.path.dirname(target), exist_ok=True)
if os.path.islink(target):
os.unlink(target)
os.symlink(source, target)
if type_ == "source":
source_target = target
clip['loudnorm'] = get_loudnorm(e.files.filter(selected=True)[0])
if type_.startswith('ai:'):
if 'ai' not in clip:
clip['ai'] = {}
clip['ai'][type_[3:]] = target
ai_type = type_[3:]
n = 1
while ai_type in clip['ai']:
ai_type = '%s-%s' % (type_[3:], n)
n += 1
clip['ai'][ai_type] = target
type_ = ai_type
else:
clip[type_] = target
target = os.path.join(prefix, 'video', type_, i.data['title'] + ext)
os.makedirs(os.path.dirname(target), exist_ok=True)
if os.path.islink(target):
os.unlink(target)
os.symlink(source, target)
durations.append(e.files.filter(selected=True)[0].duration)
if not durations:
print(i.public_id, 'no duration!', clip)