Compare commits

...

2 commits

Author SHA1 Message Date
j
39427332b7 p for more 2026-02-01 22:12:31 +01:00
j
6ba0f229be update player 2026-02-01 22:12:11 +01:00
3 changed files with 228 additions and 36 deletions

View file

@ -492,6 +492,38 @@ def process_frame(item, prompt, character=None, position=0, seed=None):
img.update_find()
return img
def replace_background(image, background, prompt=None, seed=None):
model = "seedream-4-5-251128"
position = 0
if prompt is None:
prompt = "Place the character from image 2 into image 1"
if isinstance(background, Item):
background_url = public_frame_url(background, position)
else:
background_url = public_document_url(background)
images = [
background_url,
public_document_url(image),
]
data = {
"model": model,
"prompt": prompt,
"image": images,
"size": "2560x1440",
}
if seed:
data["seed"] = seed
url = bytedance_image_generation(data)
img = add_ai_image(image, position, url)
img.refresh_from_db()
img.data["model"] = model
img.data["prompt"] = prompt
img.data["source"] = image.get_id()
img.save()
img.update_sort()
img.update_find()
return img
def get_character_document(character, type="Single Character", age=None):
if character in ("P1", "P2", "P3", "P4", "P5"):
title = type + " " + character
@ -504,7 +536,7 @@ def get_character_document(character, type="Single Character", age=None):
REPLACE_CHARACTER_PROMPT = "Replace the foreground character in image 1 with the character in image 2, keep the posture, clothing, background, light, atmosphere from image 1, but take the facial features and personality from image 2. Make sure the size of the character is adjusted since the new character is a child and make sure the size of the head matches the body. The quality of the image should be the same between foreground and background, adjust the quality of the character to match the background. Use the style of image 1 for the character: if image 1 is a photo make the character a real person, if image 1 is a drawing make the character a drawn character, if image 1 is a comic use a comic character and so on"
"""
REPLACE_CHARACTER_PROMPT = "Replace the foreground character in image 1 with the character in image 2, keep the posture, clothing, background, light, atmosphere from image 1, but take the facial features and personality from image 2. Make sure the size of the character is adjusted since the new character is a child and make sure the size of the head matches the body. The quality of the image should be the same between foreground and background, adjust the quality of the character to match the background. Use the style of image 1 for the character: if image 1 is a photo make the character a real person, if image 1 is a drawing make the character a drawn character, if image 1 is a comic use a comic character, restore any blured out regions of the image"
REPLACE_CHARACTER_PROMPT = "Replace the foreground character in image 1 with the character in image 2, keep the posture, clothing, background, light, atmosphere from image 1, but take the facial features and personality from image 2. Make sure the size of the character is adjusted since the new character is a child and make sure the size of the head matches the body. The quality of the image should be the same between foreground and background, adjust the quality of the character to match the background. Use the style of image 1 for the character: if image 1 is a photo make the character a real person, if image 1 is a drawing make the character a drawn character, if image 1 is a comic use a comic character, restore any blurred out regions of the image"
def fal_replace_character(item, character, position=0):
@ -540,7 +572,8 @@ def fal_replace_character(item, character, position=0):
return img
def replace_character(item, character, position=0, seed=None, extra=None, age=None):
def replace_character(item, character, position=0, seed=None, extra=None, age=None, prompt=None):
if prompt is None:
prompt = REPLACE_CHARACTER_PROMPT
if age:
prompt = prompt.replace("child", "person")
@ -707,16 +740,32 @@ def kling_v2v_edit(item, background, keep=False):
return ai
def wan_reference_to_video(foreground, background, keep=False):
foreground_url = public_video_url(foreground)
background_url = public_video_url(background)
prompt = "Use the character from @Video1 and use @Video2 as background"
prompt = "Character1 dances in the foreground, Character2 as background"
model = "wan/v2.6/reference-to-video"
prompt_hash = hashlib.sha1((prompt + foreground_url + background_url).encode()).hexdigest()
foreground_url = public_video_url(foreground)
#background_url = public_video_url(background)
src = background.files.all()[0].data.path
item = background
output = "/srv/pandora/static/power/cache/%s_%s/ai.mp4" % (
prompt_hash = hashlib.sha1((prompt + foreground_url + src).encode()).hexdigest()
prefix = "/srv/pandora/static/power/cache/%s_%s" % (
item.public_id,
prompt_hash,
)
os.makedirs(prefix, exist_ok=True)
frames = int(foreground.sort.duration * 24)
dst = prefix + "/background.mp4"
trim_video(src, dst, frames)
if not os.path.exists(dst):
raise Exception
fg = prefix + "/foreground.mp4"
shutil.copy(foreground.files.all()[0].data.path, fg)
foreground_url = public_url(fg)
background_url = public_url(dst)
item = background
output = prefix + "/ai.mp4"
for d in [5, 10]:
if d > item.sort.duration:
break
@ -730,7 +779,8 @@ def wan_reference_to_video(foreground, background, keep=False):
],
"aspect_ratio": "16:9",
"resolution": "720p",
"enable_prompt_expansion": False,
"duration": str(duration),
"enable_prompt_expansion": True,
"multi_shots": True,
"enable_safety_checker": False
}
@ -846,12 +896,12 @@ def ltx_a2v(item, character, prompt=None, first_frame=None, keep=False, expand_p
return ai
def vo2video(vo, item, character, position=0, prompt=None, expand_prompt=False):
first_frame = replace_character(item, charcater, position)
def vo2video(vo, item, character, position=0, prompt=None, age=None, expand_prompt=False):
first_frame = replace_character(item, character, position, age=age)
if prompt is None:
# the painting becomes animated and the girl looks into the camera and speaks
prompt = "the scene and character become animated, the character looks into the camera and speaks"
return ltx_a2v(audio, character=character, prompt=prompt, first_frame=first_frame, expand_prompt=expand_prompt)
prompt = "the scene and person become animated, the person looks into the camera and speaks"
return ltx_a2v(vo, character=character, prompt=prompt, first_frame=first_frame, expand_prompt=expand_prompt)
def ltx_v2v(item, character, prompt=None, keep=False):
@ -901,16 +951,25 @@ def ltx_v2v(item, character, prompt=None, keep=False):
shutil.rmtree(prefix)
return ai
def replace_character_motion_control(item, character, keep=False):
def replace_character_motion_control(item, character, first_frame=None, background=None, keep=False):
if isinstance(item, str):
item = Item.objects.get(public_id=item)
add = []
if first_frame:
img = first_frame
image_url = public_document_url(first_frame)
else:
# FIXME get character from documents
if isinstance(character, str):
img = replace_character(item, character, 0)
else:
img = character
if background:
add.append(img)
img = replace_background(img, background)
image_url = public_document_url(img)
video_url = public_video_url(item)
prompt = ""
model = "fal-ai/kling-video/v2.6/pro/motion-control"
@ -942,6 +1001,8 @@ def replace_character_motion_control(item, character, keep=False):
if not keep:
shutil.rmtree(os.path.dirname(output))
img.add(ai)
for img_ in add:
img_.add(ai)
return ai
@ -1045,7 +1106,7 @@ def reshoot_item(item, extra_prompt=None, first_frame=None, keep=False, prompt=N
return ai
def reshoot_item_segments(item, character, keep=False):
def reshoot_item_segments(item, character, age=None, keep=False):
if isinstance(item, str):
item = Item.objects.get(public_id=item)
max_duration = 12
@ -1092,7 +1153,7 @@ def reshoot_item_segments(item, character, keep=False):
segment_character = character
if segment_character:
segment_first_frame = replace_character(
item, segment_character, position, seed=seed
item, segment_character, position, seed=seed, age=age
)
segment_first_frame_url = public_document_url(segment_first_frame)
else:
@ -1510,7 +1571,7 @@ def add_ai_image(item, position, url, extension=None):
extension = "jpg"
file = Document(user=item.user)
file.rightslevel = 2
file.data["title"] = "%s at %s" % (item.get("title"), position)
file.data["title"] = "%s at %s" % (item.data["title"], position)
file.data["position"] = position
file.extension = extension
file.width = -1
@ -1527,6 +1588,7 @@ def add_ai_image(item, position, url, extension=None):
file.save()
file.update_sort()
file.update_find()
if isinstance(item, Item):
file.add(item)
return file
@ -1556,7 +1618,8 @@ def process_reshoot_firstframe(character='P1', age=None, l=None):
if 'ai-failed' in item.data.get('tags', []):
print('>> skip', item)
continue
if item.sort.duration > 30:
if item.sort.duration > 12:
print("only up to 12 second for single shot")
pass
#reshoot_item_segments(item, character)
else:

View file

@ -17,6 +17,29 @@
height: 100%;
font-family: Menlo, sans-serif;
}
#click-to-play {
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
width: 100%;
height: 100%;
background: #000000;
color: white;
display: flex;
justify-content: center;
z-index: 10;
cursor: pointer;
}
#click-to-play .text {
font-size: 16px;
width: 80%;
margin: auto;
text-align: center;
}
/* Full page container */
.page {
@ -55,6 +78,9 @@
cursor: pointer;
font-size: 18px;
transition: background 0.2s ease, transform 0.2s ease;
&.current {
background: #909090;
}
}
.box:hover {
@ -106,6 +132,7 @@
<body>
<script>
const random = +new Date
const sub_handlers = {};
var playlist = [
@ -160,7 +187,7 @@
nextVideo = currentVideo
currentVideo = next
sub_handlers[nextVideo.id]?.destroy()
fetch(nextVideo.src.replace(/.mp4/, '.ass')).then(async (res) => {
fetch(nextVideo.src.replace(/.mp4/, '.ass') + '?' +random ).then(async (res) => {
const content = await res.text()
const ass = new ASS(content, nextVideo, {
container: subtitles,
@ -203,6 +230,86 @@
];
function bindFullscreen() {
window.addEventListener('keydown', event => {
console.log('keydown')
event.stopPropagation()
});
window.addEventListener('mousedown', event => {
console.log('mousedown')
toggleFullscreen()
event.preventDefault()
event.stopPropagation()
});
window.addEventListener('touchstart', event => {
event.preventDefault()
event.stopPropagation()
});
}
function toggleFullscreen() {
var body = document.querySelector('body')
if (!(document.fullscreenElement === null || document.webkitFullscreenElement === null)) {
if (document.fullscreenElement) {
document.exitFullscreen()
}
if (document.webkitFullscreenElement) {
document.webkitExitFullscreen()
}
} else {
console.log('enter fullscreen?')
if (body.webkitRequestFullscreen) {
body.webkitRequestFullscreen({navigationUI: 'hide'})
} else {
body.requestFullscreen({navigationUI: 'hide'}).catch(err => {
console.log(`Error attempting to enable full-screen mode: ${err.message} (${err.name})`);
});
}
}
}
function play() {
let blocked = false, loading
document.querySelectorAll("video,audio").forEach(async (media) => {
if (blocked) { return }
return media.play().then(() => {
if (media.classList.contains('next')) {
media.pause()
}
}).catch(error => {
if (blocked) {
return
}
blocked = true
loading = document.createElement('div')
loading.id = 'click-to-play'
loading.innerHTML = '<div class="text"><h1>P for Power</h1><p>click to start</p></div>'
document.body.appendChild(loading)
function removeBehaviorsRestrictions() {
loading.remove()
window.removeEventListener('keydown', removeBehaviorsRestrictions);
window.removeEventListener('mousedown', removeBehaviorsRestrictions);
window.removeEventListener('touchstart', removeBehaviorsRestrictions);
blocked = false
play()
toggleFullscreen()
}
window.addEventListener('keydown', removeBehaviorsRestrictions);
window.addEventListener('mousedown', removeBehaviorsRestrictions);
window.addEventListener('touchstart', removeBehaviorsRestrictions);
})
})
}
function pause() {
document.createElementAll("video,audio").forEach(media => {
media.paused = true
})
}
function render() {
name = document.location.hash.slice(1)
console.log('on load', name)
@ -211,7 +318,6 @@
name = parts[0]
current = parseInt(parts[1]) - 1
}
console.log('on load', name, parts, current)
var body = document.querySelector('body')
body.innerHTML = ``
var stage = document.createElement("div")
@ -231,7 +337,7 @@
const titleEl = overlay.querySelector("#title");
for (let i = 1; i <= 30; i++) {
const box = document.createElement("div");
box.className = "box";
box.className = "box box" + i;
box.textContent = i;
box.addEventListener("mouseenter", () => {
@ -246,7 +352,7 @@
document.querySelector(".page").style.display = ""
current = parseInt(box.textContent) - 1
nextVideo.src = prefix + name + playlist[current]
fetch(nextVideo.src.replace(/.mp4/, '.ass')).then(async (res) => {
fetch(nextVideo.src.replace(/.mp4/, '.ass') + '?' +random).then(async (res) => {
const content = await res.text()
const ass = new ASS(content, nextVideo, {
container: subtitles,
@ -289,7 +395,6 @@
audio2.controls = false
audio2.volume = 1
audio2.loop = true
audio2.autoplay = true
audio2.classList.add("forest")
body.appendChild(audio2)
@ -298,7 +403,6 @@
audio3.controls = false
audio3.volume = 1
audio3.loop = true
audio3.autoplay = true
audio3.currentTime = Math.random() * 60
audio3.classList.add("music")
body.appendChild(audio3)
@ -308,17 +412,19 @@
media.addEventListener('pause', sync)
media.addEventListener('click', () => {
document.querySelector(".page").style.display = "flex"
document.querySelectorAll('.box').forEach(box => { box.classList.remove('current') })
document.querySelector('.box' + current).classList.add('current')
})
})
fetch(video1.src.replace(/.mp4/, '.ass')).then(async (res) => {
fetch(video1.src.replace(/.mp4/, '.ass') + '?' +random).then(async (res) => {
const content = await res.text()
const ass = new ASS(content, video1, {
container: subtitles,
});
sub_handlers[video1.id] = ass
sub_handlers[video1.id].show()
video1.play()
fetch(video2.src.replace(/.mp4/, '.ass')).then(async (res) => {
play()
fetch(video2.src.replace(/.mp4/, '.ass')+ '?' +random).then(async (res) => {
const content = await res.text()
const ass = new ASS(content, video2, {
container: subtitles,

View file

@ -560,7 +560,10 @@ def render_all(options):
cmd += ['vn=1']
else:
cmd += ['an=1']
#cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
if options.get("use_qsv"):
cmd += ['vcodec=h264_qsv', 'pix_fmt=nv12', 'rc=icq', 'global_quality=17']
elif options.get("only_keyframes"):
cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
subprocess.call(cmd)
if ext == '.wav' and timeline.endswith('audio.kdenlive'):
cmd = [
@ -689,6 +692,9 @@ def render_all(options):
cmd += ['vn=1']
else:
cmd += ['an=1']
if options.get("use_qsv"):
cmd += ['vcodec=h264_qsv', 'pix_fmt=nv12', 'rc=icq', 'global_quality=17']
elif options.get("only_keyframes"):
cmd += ['vcodec=libx264', 'x264opts=keyint=1', 'crf=15']
cmds.append(cmd)
for src, out1, out2 in (
@ -1196,8 +1202,15 @@ def unused_tags():
def fragment_statistics():
import itemlist.models
import item.models
from item.models import Item
stats = {}
duration = {}
ai_duration = {}
prefix = default_prefix
with open(os.path.join(prefix, "clips.json")) as fd:
clips = json.load(fd)
for l in itemlist.models.List.objects.filter(status='featured').order_by('name'):
if l.name.split(' ')[0].isdigit():
fragment_id = l.name.split(' ')[0]
@ -1224,11 +1237,14 @@ def fragment_statistics():
elif con.get('key') == "tags" and con['operator'] == '!==':
fragment['anti-tags'].append(con['value'].lower().strip())
if fragment_id not in stats:
stats[fragment_id] = {}
for tag in fragment['tags']:
stats[fragment_id][tag] = 0
duration[fragment_id] = ai_duration[fragment_id] = 0
for item in l.get_items(l.user).all():
item_tags = [t.lower().strip() for t in item.get('tags')]
if set(item_tags) & set(fragment['anti-tags']):
@ -1236,9 +1252,16 @@ def fragment_statistics():
for tag in set(fragment['tags']):
if tag in item_tags:
stats[fragment_id][tag] += 1
duration[fragment_id] += item.sort.duration
for ai in Item.objects.filter(data__title=item.data['title']).filter(data__type__icontains='ai:'):
ai_duration[fragment_id] += ai.sort.duration
with open("/srv/pandora/static/power/fragments.txt", "w") as fd:
for fragment, data in stats.items():
fd.write("%s\n" % fragment)
fd.write("%s (%s source material, %s ai material)\n" % (
fragment,
ox.format_duration(1000*duration[fragment], 1, milliseconds=False),
ox.format_duration(1000*ai_duration[fragment], 1, milliseconds=False))
)
for tag in sorted(data):
fd.write(" %s: %s\n" % (tag, data[tag]))
return stats