multi character

This commit is contained in:
j 2026-01-26 09:23:47 +01:00
commit ccbde02107

View file

@ -16,6 +16,7 @@ from django.conf import settings
from item.models import Item
from document.models import Document
from archive.models import File, Stream
import itemlist.models
os.environ["FAL_KEY"] = settings.FAL_KEY
@ -34,7 +35,7 @@ def public_url(path):
def public_document_url(document):
url = "%sdocuments/%s/source.%s?token=%s" % (
settings.PUBLIC_URL,
ox.toAZ(document.id),
document.get_id(),
document.extension,
settings.PUBLIC_TOKEN,
)
@ -89,6 +90,64 @@ def trim_video(src, dst, frames, start0=False):
out.release()
cap.release()
def make_single_character_image(character):
character = get_character_document(character, type='Character')
character_url = public_document_url(character)
data = {
"model": "seedream-4-5-251128",
"size": "2K",
"watermark": False,
'image': character_url,
"prompt": "character from image 1 is standing straight up, full body portrait from head to toe. face, clothing, skin are photo realistic, camera facing straight at character. background is white"
}
url = bytedance_image_generation(data)
extension = url.split(".")[-1].split("?")[0]
if extension == "jpeg":
extension = "jpg"
file = Document(user=character.user)
file.data["title"] = character.data['title'].replace('Character', 'Single Character')
file.extension = extension
file.width = -1
file.pages = -1
file.uploading = True
file.save()
file.uploading = True
name = "data.%s" % file.extension
file.file.name = file.path(name)
ox.net.save_url(url, file.file.path, overwrite=True)
file.get_info()
file.get_ratio()
file.oshash = ox.oshash(file.file.path)
file.save()
file.update_sort()
return file
def make_single_character_image_flux(character):
character = get_character_document(character, type='Character')
character_url = public_document_url(character)
prompt = 'character from @image 1 is standing straight up, full body portrait from head to toe. face, clothing, skin are photo realistic, camera facing straight at character. background is white'
url = flux_edit_image([character_url], prompt)
extension = url.split(".")[-1].split("?")[0]
if extension == "jpeg":
extension = "jpg"
file = Document(user=character.user)
file.data["title"] = character.data['title'].replace('Character', 'FLUX Single Character')
file.extension = extension
file.width = -1
file.pages = -1
file.uploading = True
file.save()
file.uploading = True
name = "data.%s" % file.extension
file.file.name = file.path(name)
ox.net.save_url(url, file.file.path, overwrite=True)
file.get_info()
file.get_ratio()
file.oshash = ox.oshash(file.file.path)
file.save()
file.update_sort()
return file
def bytedance_task(data):
url = "https://ark.ap-southeast.bytepluses.com/api/v3/contents/generations/tasks"
@ -394,6 +453,10 @@ def process_frame(item, prompt, character=None, position=0, seed=None):
img.update_find()
return img
def get_character_document(character, type="Single Character"):
if character in ("P1", "P2", "P3", "P4", "P5"):
return Document.objects.get(data__title=type + " " + character)
return character
"""
REPLACE_CHARACTER_PROMPT = "Replace the foreground character in image 1 with the character in image 2, keep the posture, clothing, background, light, atmosphere from image 1, but take the facial features and personality from image 2. Make sure the size of the character is adjusted since the new character is a child and make sure the size of the head matches the body. The quality of the image should be the same between foreground and background, adjust the quality of the character to match the background. Use the style of image 1 for the character: if image 1 is a photo make the character a real person, if image 1 is a drawing make the character a drawn character, if image 1 is a comic use a comic character and so on"
@ -410,8 +473,8 @@ def fal_replace_character(item, character, position=0):
)
if character == "P5":
prompt = prompt.replace("child", "teenager")
if character in ("P1", "P2", "P3", "P4", "P5"):
character = Document.objects.get(data__title="Character " + character)
character = get_character_document(character)
if isinstance(character, Document):
character = public_document_url(character)
image = public_frame_url(item, position)
@ -423,6 +486,11 @@ def fal_replace_character(item, character, position=0):
img.data["prompt"] = prompt
img.data["source"] = item.public_id
img.data["source"] += " " + character.split("?")[0]
if isinstance(character, Document):
img.data["character"] = character.get_id()
else:
img.data["character"] = character
img.data["position"] = position
print(img, img.data)
img.save()
img.update_sort()
@ -434,11 +502,19 @@ def replace_character(item, character, position=0, seed=None):
prompt = REPLACE_CHARACTER_PROMPT
if character == "P5":
prompt = prompt.replace("child", "teenager")
if character in ("P1", "P2", "P3", "P4", "P5"):
character = public_document_url(
Document.objects.get(data__title="Character " + character)
)
return process_frame(item, prompt, character, position, seed=seed)
character = get_character_document(character)
if isinstance(character, Document):
character_url = public_document_url(character)
else:
character_url = character
frame = process_frame(item, prompt, character_url, position, seed=seed)
if isinstance(character, Document):
frame.data["character"] = character.get_id()
else:
frame.data["character"] = character
frame.data["position"] = position
frame.save()
return frame
def kling_lipsync(audio_item, video_item):
@ -474,10 +550,15 @@ def kling_v2v_reference(item, character, keep=False):
character = public_document_url(
Document.objects.get(data__title="Character " + character)
)
character = get_character_document(character)
if isinstance(character, Document):
character_url = public_document_url(character)
else:
character_url = character
video_url = public_video_url(item)
prompt = "Replace the main character in @Video1 with the character from the reference images, adjust the style of the character to match the style of the video"
model = "fal-ai/kling-video/o1/video-to-video/reference"
prompt_hash = hashlib.sha1((prompt + character).encode()).hexdigest()
prompt_hash = hashlib.sha1((prompt + character_url).encode()).hexdigest()
output = "/srv/pandora/static/power/cache/%s_%s/ai.mp4" % (
item.public_id,
prompt_hash,
@ -499,7 +580,7 @@ def kling_v2v_reference(item, character, keep=False):
"keep_audio": False,
"aspect_ratio": "16:9",
"video_url": video_url,
"image_urls": [image_url],
"image_urls": [character_url],
"duration": str(duration)
}
'''
@ -624,11 +705,14 @@ def reshoot_item(item, extra_prompt=None, first_frame=None, keep=False, prompt=N
if isinstance(item, str):
item = Item.objects.get(public_id=item)
if isinstance(first_frame, Document):
first_frame = public_document_url(first_frame)
first_frame_url = public_document_url(first_frame)
else:
first_frame_url = first_frame
duration = item.sort.duration
frames = int(duration * 24)
neutral = first_frame is not None
if prompt is None:
prompt = describe_item(item, first_frame is not None)
prompt = describe_item(item, neutral)
if extra_prompt:
prompt += " " + extra_prompt
prompt_hash = hashlib.sha1((prompt).encode()).hexdigest()
@ -637,7 +721,7 @@ def reshoot_item(item, extra_prompt=None, first_frame=None, keep=False, prompt=N
prompt_hash,
)
if first_frame:
status = i2v_bytedance(first_frame, prompt, duration, output)
status = i2v_bytedance(first_frame_url, prompt, duration, output)
else:
status = t2v_bytedance(prompt, duration, output)
@ -654,9 +738,11 @@ def reshoot_item(item, extra_prompt=None, first_frame=None, keep=False, prompt=N
ai.data["model"] = status["model"]
ai.data["seed"] = status["seed"]
if first_frame:
ai.data["firstframe"] = first_frame.split("?")[0]
if isinstance(first_frame, Document):
first_frame.add(ai)
ai.data["firstframe"] = first_frame.get_id()
else:
ai.data["firstframe"] = first_frame.split("?")[0]
ai.save()
if not keep:
shutil.rmtree(os.path.dirname(output))
@ -719,7 +805,7 @@ def reshoot_item_segments(item, character, keep=False):
ai = add_ai_variant(item, joined_output, "ai:0:reshoot-firstframe")
prompt = "\n\n".join(prompts)
ai.data["prompt"] = ox.escape_html(prompt)
ai.data["firstframe"] = " ".join([ox.toAZ(ff.id) for ff in first_frames])
ai.data["firstframe"] = " ".join([ff.get_id() for ff in first_frames])
ai.data["model"] = status["model"]
ai.data["seed"] = seed
ai.save()
@ -1137,22 +1223,6 @@ def add_tag(item, tag):
item.data['tags'].append(tag)
item.save()
def process_motion_firstframe(character="P1", keep=False):
l = itemlist.models.List.objects.get(name='Motion-Firstframe')
for i in l.items.all():
ai = Item.objects.filter(data__type__icontains='ai').filter(data__title=i.data['title'])
if ai.exists() or 'ai-failed' in i.data.get('tags', []):
print('>> skip', i)
continue
print(i)
try:
replace_character_motion_control(i, character, keep=keep)
except:
i.refresh_from_db()
add_tag(i, 'ai-failed')
print('>> failed', i)
def extract_firstframe(character='P1'):
for item in Item.objects.filter(data__type__icontains="source"):
if 'ai-failed' in item.data.get('tags', []):
@ -1165,43 +1235,43 @@ def extract_firstframe(character='P1'):
item.refresh_from_db()
add_tag(item, 'ai-failed')
def process_reshoot_firstframe():
def process_reshoot_firstframe(character='P1'):
position = 0
l = itemlist.models.List.objects.get(name='Reshoot-Firstframe')
for i in l.items.all():
if i.sort.duration > 30: continue
if i.public_id == 'HZ': continue
if i.documents.all().count():
ai = Item.objects.filter(data__type__icontains='ai').filter(data__title=i.data['title'])
if ai.exists() or 'ai-failed' in i.data.get('tags', []):
print('>> skip', i)
continue
first_frame = i.documents.all().order_by('-created').first()
for item in l.items.all():
if 'ai-failed' in item.data.get('tags', []):
print('>> skip', item)
continue
if item.sort.duration > 30:
reshoot_item_segments(item, character)
else:
cid = get_character_document(character).get_id()
first_frame = item.documents.filter(
data__character=cid, data__position=position
).order_by('-created').first()
if not first_frame:
first_frame = replace_character(i, 'P1', 0)
print(i, first_frame, i.documents.all().count())
first_frame = replace_character(item, character, position)
if first_frame.items.filter(data__type__icontains='ai:').exists():
continue
print(item, first_frame)
try:
reshoot_item(i, first_frame=first_frame)
reshoot_item(item, first_frame=first_frame)
except:
add_tag(i, 'ai-failed')
print('>> failed', i)
add_tag(item, 'ai-failed')
print('>> failed', item)
def process_motion_firstframe():
def process_motion_firstframe(character="P1", keep=False):
l = itemlist.models.List.objects.get(name='Motion-Firstframe')
for i in l.items.all():
if i.sort.duration > 30: continue
if i.public_id == 'HZ': continue
if i.documents.all().count():
ai = Item.objects.filter(data__type__icontains='ai').filter(data__title=i.data['title'])
if ai.exists() or 'ai-failed' in i.data.get('tags', []):
print('>> skip', i)
continue
first_frame = i.documents.all().order_by('-created').first()
if not first_frame:
first_frame = replace_character(i, 'P1', 0)
print(i, first_frame, i.documents.all().count())
try:
replace_character_motion_control(i, first_frame)
except:
add_tag(i, 'ai-failed')
print('>> failed', i)
for item in l.items.all():
ai = Item.objects.filter(data__type__icontains='ai').filter(data__title=item.data['title'])
if ai.exists() or 'ai-failed' in item.data.get('tags', []):
print('>> skip', item)
continue
print(i)
try:
replace_character_motion_control(item, character, keep=keep)
except:
item.refresh_from_db()
add_tag(item, 'ai-failed')
print('>> failed', item)