updates
This commit is contained in:
parent
22da0654b5
commit
0c99e2b21a
1 changed files with 37 additions and 5 deletions
40
generate.py
40
generate.py
|
|
@ -191,7 +191,7 @@ def make_single_character_image_flux(character):
|
|||
def bytedance_task(data):
|
||||
url = "https://ark.ap-southeast.bytepluses.com/api/v3/contents/generations/tasks"
|
||||
model = "seedance-1-5-pro-251215"
|
||||
resolution = "720p"
|
||||
resolution = "1080p"
|
||||
defaults = {
|
||||
"model": model,
|
||||
"generate_audio": False,
|
||||
|
|
@ -567,7 +567,7 @@ REPLACE_CHARACTER_PROMPT = "Replace the foreground character in image 1 with the
|
|||
|
||||
REPLACE_CHARACTER_PROMPT = "Replace the foreground character in image 1 with the character in image 2, keep the posture, clothing, background, light, atmosphere from image 1, but take the facial features and personality from image 2. Make sure the size of the character is adjusted since the new character is a child and make sure the size of the head matches the body. The quality of the image should be the same between foreground and background, adjust the quality of the character to match the background. Use the style of image 1 for the character: if image 1 is a photo make the character a real person, if image 1 is a drawing make the character a drawn character, if image 1 is a comic use a comic character, restore any blurred out regions of the image"
|
||||
|
||||
REPLACE_CHARACTER_PROMPT = "Replace the main character in image 1 with the character in image 2, keep the posture, clothing, background, light, atmosphere from image 1, but take the facial features and personality from image 2. Make sure the size of the character is adjusted since the new character is a child and make sure the size of the head matches the body. The quality of the image should be the same between foreground and background, adjust the quality of the character to match the background. Use the style of image 1 for the character: if image 1 is a photo make the character a real person, if image 1 is a drawing make the character a drawn character, if image 1 is a comic use a comic character, restore any blurred out regions of the image"
|
||||
REPLACE_CHARACTER_PROMPT = "Replace the main person in image 1 with the person in image 2, keep the posture, clothing, background, light, atmosphere from image 1, but take the facial features and personality from image 2. Make sure the size of the person is adjusted since the new person is a child and make sure the size of the head matches the body. The quality of the image should be the same between foreground and background, adjust the quality of the person to match the background. Use the style of image 1 for the person: if image 1 is a photo make the person a real person, if image 1 is a drawing make the person a drawn person, if image 1 is a comic use a comic character, restore any blurred out regions of the image"
|
||||
|
||||
|
||||
def fal_replace_character(item, character, position=0):
|
||||
|
|
@ -1089,13 +1089,18 @@ def describe_item(item, neutral=False):
|
|||
return prompt
|
||||
|
||||
|
||||
def reshoot_item(item, extra_prompt=None, first_frame=None, keep=False, prompt=None):
|
||||
def reshoot_item(item, extra_prompt=None, first_frame=None, keep=False, prompt=None, last_frame=None, duration=None):
|
||||
if isinstance(item, str):
|
||||
item = Item.objects.get(public_id=item)
|
||||
if isinstance(first_frame, Document):
|
||||
first_frame_url = public_document_url(first_frame)
|
||||
else:
|
||||
first_frame_url = first_frame
|
||||
if isinstance(last_frame, Document):
|
||||
last_frame_url = public_document_url(last_frame)
|
||||
else:
|
||||
last_frame_url = last_frame
|
||||
if duration is None:
|
||||
duration = item.sort.duration
|
||||
frames = int(duration * 24)
|
||||
neutral = first_frame is not None
|
||||
|
|
@ -1109,7 +1114,7 @@ def reshoot_item(item, extra_prompt=None, first_frame=None, keep=False, prompt=N
|
|||
prompt_hash,
|
||||
)
|
||||
if first_frame:
|
||||
status = i2v_bytedance(first_frame_url, prompt, duration, output)
|
||||
status = i2v_bytedance(first_frame_url, prompt, duration, output, last_frame=last_frame)
|
||||
else:
|
||||
status = t2v_bytedance(prompt, duration, output)
|
||||
|
||||
|
|
@ -1471,6 +1476,33 @@ def fragment_video(filename, segmentdir, segments):
|
|||
print(frame_count, output_path)
|
||||
cv2.imwrite(output_path.replace(".mp4", "_last.jpg"), last)
|
||||
|
||||
def loop_video(filename, out, loops=2):
|
||||
filename = str(filename)
|
||||
input_info = ox.avinfo(filename)
|
||||
|
||||
position = 0
|
||||
|
||||
cap = cv2.VideoCapture(filename)
|
||||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
|
||||
fourcc = cv2.VideoWriter_fourcc(*"avc1")
|
||||
|
||||
os.makedirs(os.path.dirname(out), exist_ok=True)
|
||||
|
||||
out = cv2.VideoWriter(out, fourcc, fps, (width, height))
|
||||
while loops:
|
||||
loops -= 1
|
||||
cap = cv2.VideoCapture(filename)
|
||||
while cap.isOpened():
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
break
|
||||
out.write(frame)
|
||||
cap.release()
|
||||
out.release()
|
||||
|
||||
|
||||
def flux_edit_image(image, prompt):
|
||||
if isinstance(image, str):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue