more models

This commit is contained in:
j 2026-01-29 16:59:45 +01:00
commit de484af2ae
2 changed files with 237 additions and 22 deletions

View file

@ -626,12 +626,50 @@ def kling_v2v_reference(item, character, keep=False):
img.add(ai) img.add(ai)
return ai return ai
def kling_v2v_edit(item, background, keep=False):
# https://fal.ai/models/fal-ai/kling-video/o1/video-to-video/edit
return
video_url = public_video_url(item)
images = []
elements = {
"reference_image_urls": [],
"frontal_image": ""
}
data = {
"prompt": prompt,
"video_url": video_url,
"image_urls": images,
"elements": elements,
"keep_audio": False,
"character_orientation": "video",
}
print(data)
handler = fal_client.submit(model, arguments=data)
request_id = handler.request_id
print(request_id)
result = fal_wait_for(model, request_id)
print(result)
output_url = result["video"]["url"]
ox.net.save_url(output_url, output, overwrite=True)
ai = add_ai_variant(item, output, "ai:v2v-replace")
ai.data["prompt"] = ox.escape_html(prompt)
ai.data["firstframe"] = image_url.split("?")[0]
ai.data["model"] = model
ai.save()
if not keep:
shutil.rmtree(os.path.dirname(output))
img.add(ai)
return ai
def wan_reference_to_video(foreground, background, keep=False): def wan_reference_to_video(foreground, background, keep=False):
foreground_url = public_video_url(foreground) foreground_url = public_video_url(foreground)
background_url = public_video_url(background) background_url = public_video_url(background)
prompt = "Use the character from @Video1 and use @Video2 as background" prompt = "Use the character from @Video1 and use @Video2 as background"
model = "wan/v2.6/reference-to-video" model = "wan/v2.6/reference-to-video"
prompt_hash = hashlib.sha1((prompt + foreground_url + background_url).encode()).hexdigest() prompt_hash = hashlib.sha1((prompt + foreground_url + background_url).encode()).hexdigest()
item = background
output = "/srv/pandora/static/power/cache/%s_%s/ai.mp4" % ( output = "/srv/pandora/static/power/cache/%s_%s/ai.mp4" % (
item.public_id, item.public_id,
prompt_hash, prompt_hash,
@ -646,12 +684,12 @@ def wan_reference_to_video(foreground, background, keep=False):
"video_urls": [ "video_urls": [
foreground_url, foreground_url,
background_url, background_url,
] ],
"aspect_ratio": "16:9", "aspect_ratio": "16:9",
"resolution": "1080p", "resolution": "1080p",
"enable_prompt_expansion": false, "enable_prompt_expansion": False,
"multi_shots": true, "multi_shots": True,
"enable_safety_checker": false "enable_safety_checker": False
} }
print(data) print(data)
handler = fal_client.submit(model, arguments=data) handler = fal_client.submit(model, arguments=data)
@ -669,6 +707,142 @@ def wan_reference_to_video(foreground, background, keep=False):
shutil.rmtree(os.path.dirname(output)) shutil.rmtree(os.path.dirname(output))
return ai return ai
def wan_animate_replace(item, character, keep=False):
item_url = public_video_url(item)
prompt = ""
model = "fal-ai/wan/v2.2-14b/animate/replace"
prompt_hash = hashlib.sha1((prompt + foreground_url + background_url).encode()).hexdigest()
output = "/srv/pandora/static/power/cache/%s_%s/ai.mp4" % (
item.public_id,
prompt_hash,
)
data = {
"video_url": item_url,
"image_url": character_url,
"resolution": "720p",
"video_quality": "high",
"guidance_scale": 1,
"enable_safety_checker": False,
"enable_output_safety_checker": False,
}
print(data)
handler = fal_client.submit(model, arguments=data)
request_id = handler.request_id
print(request_id)
result = fal_wait_for(model, request_id)
print(result)
output_url = result["video"]["url"]
ox.net.save_url(output_url, output, overwrite=True)
ai = add_ai_variant(item, output, "ai:foreground-background")
ai.data["prompt"] = ox.escape_html(prompt)
ai.data["model"] = model
ai.save()
if not keep:
shutil.rmtree(os.path.dirname(output))
return ai
def ltx_a2v(item, character, prompt=None, keep=False):
video_url = public_video_url(item)
audio_path = item.streams()[0].file.data.path
character = get_character_document(character)
position = 0
cid = get_character_document(character).get_id()
first_frame = item.documents.filter(
data__character=cid,
data__position=position
).order_by('-created').first()
if not first_frame:
first_frame = replace_character(item, character, position)
image_url = public_document_url(first_frame)
prefix = "/srv/pandora/static/power/cache/%s_a2v" % (item.public_id)
os.makedirs(prefix, exist_ok=True)
output = prefix + '/audio.m4a'
if not os.path.exists(output):
cmd = ['ffmpeg', '-hide_banner', '-nostats', '-i', audio_path, '-vn', '-c:a', 'copy', output]
subprocess.call(cmd)
if not os.path.exists(output):
raise Exception
audio_url = public_url(output)
model = "fal-ai/ltx-2-19b/audio-to-video"
neutral = True
if prompt is None:
prompt = describe_item(item, neutral)
data = {
"audio_url": audio_url,
"image_url": image_url,
"video_size": {"width": 1280, "height": 720},
"match_audio_length": True,
"fps": 24,
"prompt": prompt,
"enable_safety_checker": False,
}
print(data)
handler = fal_client.submit(model, arguments=data)
request_id = handler.request_id
print(request_id)
result = fal_wait_for(model, request_id)
print(result)
output_url = result["video"]["url"]
output = prefix + "/ai.mp4"
ox.net.save_url(output_url, output, overwrite=True)
ai = add_ai_variant(item, output, "ai:audio-to-video")
ai.data["model"] = model
ai.data["seed"] = result["seed"]
ai.save()
first_frame.add(ai)
ai.data["firstframe"] = first_frame.get_id()
if not keep:
shutil.rmtree(prefix)
return ai
def ltx_v2v(item, character, prompt=None, keep=False):
video_url = public_video_url(item)
character = get_character_document(character)
position = 0
cid = get_character_document(character).get_id()
first_frame = item.documents.filter(
data__character=cid,
data__position=position
).order_by('-created').first()
if not first_frame:
first_frame = replace_character(item, character, position)
image_url = public_document_url(first_frame)
prefix = "/srv/pandora/static/power/cache/%s_ltx_v2v" % (item.public_id)
model = "fal-ai/ltx-2-19b/video-to-video"
neutral = True
if prompt is None:
prompt = describe_item(item, neutral)
data = {
"prompt": prompt,
"video_url": video_url,
"image_url": image_url,
"video_size": {"width": 1280, "height": 720},
"match_video_length": True,
"fps": 24,
"generate_audio": False,
"enable_safety_checker": False,
}
print(data)
handler = fal_client.submit(model, arguments=data)
request_id = handler.request_id
print(request_id)
result = fal_wait_for(model, request_id)
print(result)
output_url = result["video"]["url"]
output = prefix + "/ai.mp4"
ox.net.save_url(output_url, output, overwrite=True)
ai = add_ai_variant(item, output, "ai:video-to-video")
ai.data["model"] = model
ai.data["seed"] = result["seed"]
ai.save()
first_frame.add(ai)
ai.data["firstframe"] = first_frame.get_id()
if not keep:
shutil.rmtree(prefix)
return ai
def replace_character_motion_control(item, character, keep=False): def replace_character_motion_control(item, character, keep=False):
if isinstance(item, str): if isinstance(item, str):
item = Item.objects.get(public_id=item) item = Item.objects.get(public_id=item)
@ -754,8 +928,15 @@ def describe_video(url, neutral=False, face=False):
def describe_item(item, neutral=False): def describe_item(item, neutral=False):
if isinstance(item, str): if isinstance(item, str):
item = Item.objects.get(public_id=item) item = Item.objects.get(public_id=item)
if item.get("prompt") and neutral:
return item.get("prompt")
video_url = public_video_url(item) video_url = public_video_url(item)
return describe_video(video_url, neutral) prompt = describe_video(video_url, neutral)
if neutral:
item.refresh_from_db()
item.data["prompt"] = prompt
item.save()
return prompt
def reshoot_item(item, extra_prompt=None, first_frame=None, keep=False, prompt=None): def reshoot_item(item, extra_prompt=None, first_frame=None, keep=False, prompt=None):
@ -839,13 +1020,25 @@ def reshoot_item_segments(item, character, keep=False):
segment_video_url = public_url(output) segment_video_url = public_url(output)
prompt = describe_video(segment_video_url, neutral=True) prompt = describe_video(segment_video_url, neutral=True)
prompts.append("Segment %s: " % (n + 1) + prompt) prompts.append("Segment %s: " % (n + 1) + prompt)
segment_character = character if character == "annotation":
a = item.annotations.filter(
layer='prompts', start__gte=position, end__gt=position
).order_by('start').first()
if a:
segment_character = a.value
else:
segment_character = None
else:
segment_character = character
if position: if position:
segment_character = segment_first_frame_url segment_character = segment_first_frame_url
segment_first_frame = replace_character( if segment_character:
item, segment_character, position, seed=seed segment_first_frame = replace_character(
) item, segment_character, position, seed=seed
segment_first_frame_url = public_document_url(segment_first_frame) )
segment_first_frame_url = public_document_url(segment_first_frame)
else:
segment_first_frame_url = public_url(output.replace(".mp4", "_first.jpg"))
status = i2v_bytedance( status = i2v_bytedance(
segment_first_frame_url, prompt, segment_duration, output_ai, seed=seed segment_first_frame_url, prompt, segment_duration, output_ai, seed=seed
) )
@ -1432,14 +1625,18 @@ def faceswap_item_segments(item, keep=False):
stype, segment = segment stype, segment = segment
else: else:
stype = "n" stype = "n"
character = item.annotations.filter(
layer='prompts', start__gte=position, end__gt=position
).order_by('start').first().value
character = get_character_document(character)
chracter_url = public_document_url(character)
output = "%s/%06d.mp4" % (prefix, n) output = "%s/%06d.mp4" % (prefix, n)
output_ai = "%s/%06d_ai.mp4" % (prefix, n) output_ai = "%s/%06d_ai.mp4" % (prefix, n)
character = item.annotations.filter(
layer='prompts', start__gte=position, end__gt=position
).order_by('start').first()
if character:
character = character.value
character = get_character_document(character)
chracter_url = public_document_url(character)
else:
character = output.replace('.mp4', '_first.jpg')
chracter_url = public_url(character)
segment_duration = segment - position segment_duration = segment - position
if os.path.exists(output): if os.path.exists(output):
segment_video_url = public_url(output) segment_video_url = public_url(output)
@ -1454,12 +1651,6 @@ def faceswap_item_segments(item, keep=False):
print(result) print(result)
output_url = result["video"]["url"] output_url = result["video"]["url"]
ox.net.save_url(output_url, output_ai, overwrite=True) ox.net.save_url(output_url, output_ai, overwrite=True)
'''
trimmed = "%s/%06d_ai_trimmed.mp4" % (prefix, n)
frames = int(segment_duration * 24)
trim_video(output_ai, trimmed, frames, stype == "c")
processed.append(trimmed)
'''
processed.append(output_ai) processed.append(output_ai)
n += 1 n += 1
position = segment position = segment

View file

@ -1112,3 +1112,27 @@ def update_unused():
l.add(i) l.add(i)
for i in l.items.filter(public_id__in=set(used)): for i in l.items.filter(public_id__in=set(used)):
l.remove(i) l.remove(i)
def unused_tags():
import itemlist.models
import item.models
prefix = default_prefix
with open(os.path.join(prefix, "clips.json")) as fd:
clips = json.load(fd)
with open(os.path.join(prefix, "voice_over.json")) as fd:
voice_over = json.load(fd)
fragments = get_fragments(clips, voice_over, prefix)
tags = []
for fragment in fragments:
tags += fragment['tags']
used_tags = set(tags)
all_tags = {t.value for t in item.models.Facet.objects.filter(key='tags').distinct()}
unused_tags = all_tags - used_tags
unused_items = itemlist.models.List.objects.get(name='Unused Material').items.all()
with open("/srv/pandora/static/power/unused-tags.txt", "w") as fd:
for tag in sorted(unused_tags):
count = unused_items.filter(data__tags__contains=tag).count()
fd.write("%s (%d unused video clips)\n" % (tag, count))