Compare commits

...

3 commits

Author SHA1 Message Date
j
2520681d25 tweak info pages 2026-01-15 16:11:36 +00:00
j
f77bc641a6 helper functions to generate new versions 2026-01-14 22:12:51 +00:00
j
8cba167c22 use adjustvolume in edit 2026-01-14 21:17:23 +00:00
4 changed files with 1495 additions and 2 deletions

753
generate.py Normal file
View file

@ -0,0 +1,753 @@
from pathlib import Path
import hashlib
import math
import os
import time
import cv2
import ox
import requests
import fal_client
from byteplussdkarkruntime import Ark
from django.conf import settings
from item.models import Item
os.environ["FAL_KEY"] = settings.FAL_KEY
MAX_DURATION = 12
headers = {
"Authorization": "Bearer " + settings.BYTEPLUSE_TOKEN,
"Content-Type": "application/json",
}
def public_url(path):
return path.replace("/srv/pandora/static/", settings.PUBLIC_URL + "static/")
def trim_video(src, dst, frames, start0=False):
cap = cv2.VideoCapture(src)
fps = cap.get(cv2.CAP_PROP_FPS)
frames_src = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count = 0
offset = int((frames_src - frames) / 2)
if start0:
offset = 0
print(frames_src, frames, offset)
fourcc = cv2.VideoWriter_fourcc(*"avc1")
out = cv2.VideoWriter(dst, fourcc, fps, (width, height))
written = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame_count += 1
if frame_count < offset:
continue
if frame_count >= (frames + offset):
continue
out.write(frame)
written += 1
out.release()
cap.release()
def bytedance_task(data):
url = "https://ark.ap-southeast.bytepluses.com/api/v3/contents/generations/tasks"
model = "seedance-1-5-pro-251215"
resolution = "720p"
defaults = {
"model": model,
"generate_audio": False,
"ratio": "16:9",
"watermark": False,
"resolution": resolution,
"camera_fixed": True,
"return_last_frame": True,
}
for key, value in defaults.items():
if key not in data:
data[key] = value
print(data)
r = requests.post(url, headers=headers, json=data).json()
print(r)
task_id = r["id"]
status = requests.get(url + "/" + task_id, headers=headers).json()
while status["status"] in ("queued", "running", "cancelled"):
time.sleep(10)
status = requests.get(url + "/" + task_id, headers=headers).json()
print(status)
return status
def bytedance_response(data):
url = "https://ark.ap-southeast.bytepluses.com/api/v3/responses"
defaults = {"model": "seed-1-8-251228"}
for key, value in defaults.items():
if key not in data:
data[key] = value
print(data)
response = requests.post(url, headers=headers, json=data).json()
print(response)
return response
def t2v_bytedance(prompt, duration, output):
nduration = max(4, int(math.ceil(duration)))
data = {
"duration": nduration,
"content": [{"type": "text", "text": prompt}],
}
status = bytedance_task(data)
output_url = status["content"]["video_url"]
ox.net.save_url(output_url, output, overwrite=True)
if "last_frame_url" in status["content"]:
ox.net.save_url(
status["content"]["last_frame_url"],
output + ".last_frame.png",
overwrite=True,
)
return status
def first_last(first_frame, last_frame, prompt, duration, output):
model = "seedance-1-5-pro-251215"
resolution = "720p"
nduration = max(4, int(math.ceil(duration)))
data = {
"duration": nduration,
"content": [
{
"type": "text",
"text": prompt,
},
{
"type": "image_url",
"role": "first_frame",
"image_url": {"url": first_frame},
},
{
"type": "image_url",
"role": "last_frame",
"image_url": {"url": last_frame},
},
],
}
status = bytedance_task(data)
output_url = status["content"]["video_url"]
ox.net.save_url(output_url, output, overwrite=True)
if "last_frame_url" in status["content"]:
ox.net.save_url(
status["content"]["last_frame_url"],
output + ".last_frame.png",
overwrite=True,
)
return status
def get_item_segments(item, max_duration=MAX_DURATION):
cuts = item.get("cuts")
filename = item.files.all()[0].data.path
input_info = ox.avinfo(filename)
p = 0
nc = []
for c in cuts:
d = c - p
if d < 0.5:
continue
p = c
nc.append(c)
nc = nc + [input_info["duration"]]
if len(nc) > 3:
if nc[-1] - nc[-2] < 0.5:
nc = nc[:-2] + nc[-1:]
segments = []
position = 0
for out in nc:
duration = out - position
while duration > max_duration:
position += max_duration
if len(segments):
segments.append(["c", position])
else:
segments.append(position)
duration = out - position
else:
segments.append(out)
position = out
return segments
def join_segments(processed, joined_output):
out = None
for filename in processed:
cap = cv2.VideoCapture(filename)
if out is None:
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*"avc1")
out = cv2.VideoWriter(joined_output, fourcc, fps, (width, height))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
out.write(frame)
cap.release()
if out is not None:
out.release()
def remake_video(item_id, prompt):
item = Item.objects.get(public_id=item_id)
segments = get_item_segments(item)
print(segments)
prompt_hash = hashlib.sha1(prompt.encode()).hexdigest()
position = n = 0
processed = []
for segment in segments:
if isinstance(segment, list):
stype, segment = segment
else:
stype = "n"
duration = segment - position
if stype == "c":
first_frame_path = (
"/srv/pandora/static/power/cache/%s_%s/%06d.mp4.last_frame.png"
% (item.public_id, prompt_hash, n - 1)
)
first_frame = public_url(first_frame_path)
else:
first_frame = "%s%s/source%s.png?token=%s" % (
settings.PUBLIC_URL,
item.public_id,
position,
settings.PUBLIC_TOKEN,
)
last_frame_position = segment - 2 / 24
last_frame = "%s%s/source%0.3f.png?token=%s" % (
settings.PUBLIC_URL,
item.public_id,
last_frame_position,
settings.PUBLIC_TOKEN,
)
output = "/srv/pandora/static/power/cache/%s_%s/%06d.mp4" % (
item.public_id,
prompt_hash,
n,
)
if not os.path.exists(output):
first_last(first_frame, last_frame, prompt, duration, output)
trimmed = "/srv/pandora/static/power/cache/%s_%s/%06d_trimmed.mp4" % (
item.public_id,
prompt_hash,
n,
)
frames = int(duration * 24)
if not os.path.exists(trimmed):
trim_video(output, trimmed, frames, stype == "c")
processed.append(trimmed)
position = segment
n += 1
joined_output = "/srv/pandora/static/power/cache/%s_%s.mp4" % (
item.public_id,
prompt_hash,
)
join_segments(processed, joined_output)
return joined_output
def prepare_image(image, prompt, out=None):
model = "seedream-4-5-251128"
if not image.startswith("http:"):
image = public_url(image)
data = {
"model": model,
"prompt": prompt,
"image": image,
"size": "2560x1440",
"watermark": False,
}
url = "https://ark.ap-southeast.bytepluses.com/api/v3/images/generations"
print("prepare_image", data)
r = requests.post(url, headers=headers, json=data).json()
print(r)
output_url = r["data"][0]["url"]
if out is None:
out = image + ".ai.png"
ox.net.save_url(output_url, out, overwrite=True)
return r
def describe_video(url):
prompt = (
"Detect cuts or scene changes and describe each scene, use as much details as you can. "
"Describe each person incudling detalied apreance, ethnicity, haircolor, haircut, "
"describe each objects, animal or plant, describe foreground and backgroud, "
"describe from what angle the scene is filmed, incude details about camera model, lense, depth of field used to film this scene. "
"Use the format: <description of scene 1>. CAMERA CUT TO <description of scene 2>. CAMERA CUT TO <description of scene 3>. "
"Don't mention it if you don't find a cut."
)
data = {
"input": [
{
"role": "user",
"content": [
{"type": "input_video", "video_url": url, "fps": 1},
{"type": "input_text", "text": prompt},
],
}
],
}
response = bytedance_response(data)
return response["output"][1]["content"][0]["text"]
def describe_item(item):
if isinstance(item, str):
item = Item.objects.get(public_id=item)
video_url = "%s%s/download/source/?token=%s" % (
settings.PUBLIC_URL,
item.public_id,
settings.PUBLIC_TOKEN,
)
return describe_video(video_url)
def reshoot_item(item, extra_prompt=None, keep=False):
if isinstance(item, str):
item = Item.objects.get(public_id=item)
duration = item.sort.duration
frames = int(duration * 24)
prompt = describe_item(item)
if extra_prompt:
prompt += " " + extra_prompt
prompt_hash = hashlib.sha1((prompt).encode()).hexdigest()
output = "/srv/pandora/static/power/cache/%s_%s/ai.mp4" % (
item.public_id,
prompt_hash,
)
status = t2v_bytedance(prompt, duration, output)
trimmed = "/srv/pandora/static/power/cache/%s_%s/trimmed.mp4" % (
item.public_id,
prompt_hash,
)
trim_video(output, trimmed, frames)
ai = add_ai_variant(item, trimmed, "ai:0:reshoot")
ai.data["prompt"] = ox.escape_html(prompt)
ai.data["model"] = status["model"]
ai.data["seed"] = status["seed"]
ai.save()
if not keep:
shutil.rmtree(os.path.dirname(output))
return ai
def describe_image(url):
system_prompt = ""
system_prompt = "You are an image analyst describing different aspects of an image. You are focused on the form, composition, and task shown in the image."
prompt = "Please analyze this image according to the specified structure."
data = {
"input": [
{"role": "system", "content": system_prompt},
{
"role": "user",
"content": [
{"type": "input_image", "image_url": url},
{"type": "input_text", "text": prompt},
],
},
],
}
response = bytedance_response(data)
return response["output"][-1]["content"][0]["text"]
def transform_remake_video(item_id, image_prompt, video_prompt):
item = Item.objects.get(public_id=item_id)
segments = get_item_segments(item)
print(segments)
prompt_hash = hashlib.sha1((image_prompt + video_prompt).encode()).hexdigest()
position = n = 0
processed = []
for segment in segments:
if isinstance(segment, list):
stype, segment = segment
else:
stype = "n"
duration = segment - position
if stype == "c":
first_frame_path = (
"/srv/pandora/static/power/cache/%s_%s/%06d.mp4.last_frame.png"
% (item.public_id, prompt_hash, n - 1)
)
first_frame = public_url(first_frame_path)
else:
first_frame = "%s%s/source%s.png?token=%s" % (
settings.PUBLIC_URL,
item.public_id,
position,
settings.PUBLIC_TOKEN,
)
first_frame_path = (
"/srv/pandora/static/power/cache/%s_%s/%06d.first_frame.png"
% (item.public_id, prompt_hash, n)
)
if not os.path.exists(first_frame_path):
prepare_image(first_frame, image_prompt, first_frame_path)
first_frame = public_url(first_frame_path)
last_frame_position = segment - 2 / 24
last_frame = "%s%s/source%0.3f.png?token=%s" % (
settings.PUBLIC_URL,
item.public_id,
last_frame_position,
settings.PUBLIC_TOKEN,
)
last_frame_path = (
"/srv/pandora/static/power/cache/%s_%s/%06d.last_frame.png"
% (item.public_id, prompt_hash, n)
)
if not os.path.exists(last_frame_path):
prepare_image(last_frame, image_prompt, last_frame_path)
last_frame = public_url(last_frame_path)
output = "/srv/pandora/static/power/cache/%s_%s/%06d.mp4" % (
item.public_id,
prompt_hash,
n,
)
if not os.path.exists(output):
first_last(first_frame, last_frame, video_prompt, duration, output)
trimmed = "/srv/pandora/static/power/cache/%s_%s/%06d_trimmed.mp4" % (
item.public_id,
prompt_hash,
n,
)
frames = int(duration * 24)
if not os.path.exists(trimmed):
trim_video(output, trimmed, frames, stype == "c")
processed.append(trimmed)
position = segment
n += 1
joined_output = "/srv/pandora/static/power/cache/%s_%s.mp4" % (
item.public_id,
prompt_hash,
)
join_segments(processed, joined_output)
return joined_output
def restyle_video(item_id, prompt):
item = Item.objects.get(public_id=item_id)
video_url = "%s%s/download/source/?token=%s" % (
settings.PUBLIC_URL,
item.public_id,
settings.PUBLIC_TOKEN,
)
model = "decart/lucy-restyle"
handler = fal_client.submit(
model,
arguments={
"prompt": prompt,
"video_url": video_url,
"resolution": "720p",
"enhance_prompt": True,
},
)
request_id = handler.request_id
print(request_id)
status = fal_client.status(model, request_id, with_logs=True)
while isinstance(status, fal_client.InProgress):
time.sleep(10)
status = fal_client.status(model, request_id, with_logs=True)
result = fal_client.result(model, request_id)
print(result)
output_url = result["video"]["url"]
prompt_hash = hashlib.sha1((prompt).encode()).hexdigest()
output_path = "/srv/pandora/static/power/cache/%s_%s.mp4" % (
item.public_id,
prompt_hash,
)
ox.net.save_url(output_url, output_path, overwrite=True)
return output_path
def motion_control_preprocess_image(item_id, image_prompt, video_prompt):
item = Item.objects.get(public_id=item_id)
video_url = "%s%s/download/source/?token=%s" % (
settings.PUBLIC_URL,
item.public_id,
settings.PUBLIC_TOKEN,
)
model = "fal-ai/kling-video/v2.6/pro/motion-control"
prompt_hash = hashlib.sha1((image_prompt + video_prompt).encode()).hexdigest()
output = "/srv/pandora/static/power/cache/%s_%s.mp4" % (item.public_id, prompt_hash)
first_frame = "%s%s/source%s.png?token=%s" % (
settings.PUBLIC_URL,
item.public_id,
0,
settings.PUBLIC_TOKEN,
)
first_frame_path = "/srv/pandora/static/power/cache/%s_%s/%06d.first_frame.png" % (
item.public_id,
prompt_hash,
0,
)
if not os.path.exists(first_frame_path):
os.makedirs(os.path.dirname(first_frame_path), exist_ok=True)
prepare_image(first_frame, image_prompt, first_frame_path)
image_url = public_url(first_frame_path)
data = {
"prompt": video_prompt,
"image_url": image_url,
"video_url": video_url,
"keep_original_sound": False,
"character_orientation": "video",
}
print(data)
handler = fal_client.submit(model, arguments=data)
request_id = handler.request_id
print(request_id)
status = fal_client.status(model, request_id, with_logs=True)
while isinstance(status, fal_client.InProgress):
time.sleep(10)
status = fal_client.status(model, request_id, with_logs=True)
result = fal_client.result(model, request_id)
print(result)
output_url = result["video"]["url"]
ox.net.save_url(output_url, output, overwrite=True)
return output
def luma_wait_for(id):
url = "https://api.lumalabs.ai/dream-machine/v1/generations/%s" % id
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": "Bearer " + settings.LUMA_TOKEN,
}
status = requests.get(url, headers=headers).json()
while status["state"] in ("queued", "dreaming"):
time.sleep(10)
status = requests.get(url, headers=headers).json()
return status
def luma_modify_segment(video_url, prompt, first_frame=None):
# also got that at fal-ai/luma-dream-machine/ray-2/modify
url = "https://api.lumalabs.ai/dream-machine/v1/generations/video/modify"
payload = {
"generation_type": "modify_video",
"model": "ray-2",
"mode": "adhere_1",
"prompt": prompt,
"media": {"url": video_url},
}
if first_frame:
payload["first_frame"] = {"url": first_frame}
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": "Bearer " + settings.LUMA_TOKEN,
}
response = requests.post(url, json=payload, headers=headers).json()
print(response)
status = luma_wait_for(response["id"])
return status["assets"]["video"]
def fragment_video(filename, segmentdir, segments):
filename = str(filename)
input_info = ox.avinfo(filename)
segments_ = []
for segment in segments:
if isinstance(segment, list):
stype, segment = segment
else:
stype = "n"
segments_.append(segment)
segments = segments_
position = 0
segment = 0
cap = cv2.VideoCapture(filename)
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*"avc1")
frame_count = 0
next_cut = int(segments.pop(0) * fps)
last = None
os.makedirs(segmentdir, exist_ok=True)
while cap.isOpened():
if frame_count == 0:
output_path = segmentdir + "/%06d.mp4" % segment
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
elif next_cut and frame_count >= next_cut and segments:
print(frame_count, output_path)
cv2.imwrite(output_path.replace(".mp4", "_last.jpg"), frame)
out.release()
segment += 1
output_path = segmentdir + "/%06d.mp4" % segment
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
if segments:
next_cut = int(segments.pop(0) * fps)
else:
next_cut = None
last = None
ret, frame = cap.read()
if not ret:
break
out.write(frame)
if last is None:
cv2.imwrite(output_path.replace(".mp4", "_first.jpg"), frame)
last = frame
frame_count += 1
out.release()
cap.release()
if last is None:
os.unlink(output_path)
else:
cv2.imwrite(output_path.replace(".mp4", "_last.jpg"), last)
def flux_edit_image(image, prompt):
data = {
"prompt": prompt,
"safety_tolerance": "5",
"enable_safety_checker": False,
"output_format": "jpeg",
"image_urls": [image],
}
print(data)
result = fal_client.subscribe("fal-ai/flux-2-pro/edit", arguments=data)
print(result)
return result["images"][0]["url"]
def in_the_style_of_fal(image, style):
prompt = "apply style from @image 2 to @image 1 keep the position of the person in @image 1 but take light, colors, clothing from @image 2"
data = {
"prompt": prompt,
"safety_tolerance": "5",
"enable_safety_checker": False,
"output_format": "jpeg",
"image_urls": [image, style],
}
print(data)
result = fal_client.subscribe("fal-ai/flux-2-pro/edit", arguments=data)
print(result)
return result["images"][0]["url"]
def in_the_style_of_byte(image, style):
prompt = "apply style from image 2 to image 1 keep the position of the person in image 1 but take light, colors, clothing from image 2"
image_model_name = "seedream-4-5-251128"
ark_client = Ark(
base_url="https://ark.ap-southeast.bytepluses.com/api/v3",
api_key=settings.BYTEPLUSE_TOKEN,
)
create_result = ark_client.images.generate(
model=image_model_name,
image=[image, style],
prompt=prompt,
sequential_image_generation="disabled",
response_format="url",
size="2560x1440",
stream=False,
watermark=False,
)
print(create_result)
return create_result.data[0].url
def luma_modify_item(item, prompt, image_prompt=None, first_frame=None):
if isinstance(item, str):
item = Item.objects.get(public_id=item)
source = item.files.all()[0].data.path
info = ox.avinfo(source)
duration = info["duration"]
max_duration = 10
if duration < max_duration:
segments = [duration]
else:
segments = get_item_segments(item, max_duration=max_duration)
print(segments)
prompt_hash = hashlib.sha1((prompt + (image_prompt or "")).encode()).hexdigest()
processed = []
prefix = "/srv/pandora/static/power/cache/%s_%s" % (item.public_id, prompt_hash)
video_segments = fragment_video(source, prefix, segments)
n = 0
for segment in segments:
if isinstance(segment, list):
stype, segment = segment
else:
stype = "n"
output = "/srv/pandora/static/power/cache/%s_%s/%06d.mp4" % (
item.public_id,
prompt_hash,
n,
)
output_ai = "/srv/pandora/static/power/cache/%s_%s/%06d_ai.mp4" % (
item.public_id,
prompt_hash,
n,
)
if os.path.exists(output):
video_url = luma_modify_segment(
public_url(output), prompt, first_frame=first_frame
)
ox.net.save_url(video_url, output_ai, overwrite=True)
processed.append(output_ai)
n += 1
joined_output = "/srv/pandora/static/power/cache/%s_%s.mp4" % (
item.public_id,
prompt_hash,
)
join_segments(processed, joined_output)
return joined_output
def add_ai_variant(item, video_path, type):
if isinstance(item, str):
item = Item.objects.get(public_id=item)
from archive.models import File, Stream
ai = Item()
ai.user = item.user
ai.data["type"] = [type]
ai.data["title"] = item.data["title"]
ai.save()
file = File()
file.oshash = ox.oshash(video_path)
file.item = ai
file.path = "%s.mp4" % type
file.info = ox.avinfo(video_path)
del file.info["path"]
file.parse_info()
file.data.name = file.get_path("data." + video_path.split(".")[-1])
os.makedirs(os.path.dirname(file.data.path), exist_ok=True)
shutil.copy(video_path, file.data.path)
file.available = True
file.selected = True
file.queued = True
file.wanted = False
file.save()
file.extract_stream()
return ai

View file

@ -168,6 +168,9 @@ def compose(clips, target=150, base=1024, voice_over=None, options=None):
volume_front = '-2.5' volume_front = '-2.5'
volume_rear = '-8.5' volume_rear = '-8.5'
if clip.get('volume') is not None:
volume_front = '%0.2f' % (float(volume_front) + clip['volume'])
volume_rear = '%0.2f' % (float(volume_rear) + clip['volume'])
scene['audio-front']['A2'].append({ scene['audio-front']['A2'].append({
'duration': clip['duration'], 'duration': clip['duration'],
'src': audio, 'src': audio,
@ -814,6 +817,9 @@ def generate_clips(options):
cd = format_duration(clip["duration"], 24) cd = format_duration(clip["duration"], 24)
clip["duration"] = cd clip["duration"] = cd
clip['tags'] = i.data.get('tags', []) clip['tags'] = i.data.get('tags', [])
adjust_volume = i.data.get('adjustvolume', '')
if adjust_volume:
clip['volume'] = float(adjust_volume)
clip['id'] = i.public_id clip['id'] = i.public_id
name = os.path.basename(source_target) name = os.path.basename(source_target)
seqid = re.sub(r"Hotel Aporia_(\d+)", "S\\1_", name) seqid = re.sub(r"Hotel Aporia_(\d+)", "S\\1_", name)

View file

@ -0,0 +1,732 @@
'use strict';
pandora.ui.documentInfoView = function(data, isMixed) {
isMixed = isMixed || {};
var ui = pandora.user.ui,
isMultiple = arguments.length == 2,
canEdit = pandora.hasCapability('canEditMetadata') || isMultiple || data.editable,
canRemove = pandora.hasCapability('canRemoveItems'),
css = {
marginTop: '4px',
textAlign: 'justify'
},
html,
iconRatio = data.ratio,
iconSize = isMultiple ? 0 : ui.infoIconSize,
iconWidth = isMultiple ? 0 : iconRatio > 1 ? iconSize : Math.round(iconSize * iconRatio),
iconHeight = iconRatio < 1 ? iconSize : Math.round(iconSize / iconRatio),
iconLeft = iconSize == 256 ? Math.floor((iconSize - iconWidth) / 2) : 0,
margin = 16,
nameKeys = pandora.site.documentKeys.filter(function(key) {
return key.sortType == 'person';
}).map(function(key) {
return key.id;
}),
listKeys = pandora.site.documentKeys.filter(function(key) {
return Ox.isArray(key.type);
}).map(function(key){
return key.id;
}),
displayedKeys = [ // FIXME: can tis be a flag in the config?
'title', 'notes', 'name', 'description', 'id',
'user', 'rightslevel', 'timesaccessed',
'extension', 'dimensions', 'size', 'matches',
'created', 'modified', 'accessed',
'random', 'entity',
'prompt'
].concat(pandora.site.documentKeys.filter(key => { return key.fulltext }).map(key => key.id)),
statisticsWidth = 128,
$bar = Ox.Bar({size: 16})
.bindEvent({
doubleclick: function(e) {
if ($(e.target).is('.OxBar')) {
$info.animate({scrollTop: 0}, 250);
}
}
}),
$options = Ox.MenuButton({
items: [
{
id: 'delete',
title: Ox._('Delete {0}...', [Ox._('Document')]),
disabled: !canRemove
}
],
style: 'square',
title: 'set',
tooltip: Ox._('Options'),
type: 'image',
})
.css({
float: 'left',
borderColor: 'rgba(0, 0, 0, 0)',
background: 'rgba(0, 0, 0, 0)'
})
.bindEvent({
click: function(data_) {
if (data_.id == 'delete') {
pandora.ui.deleteDocumentDialog(
[data],
function() {
Ox.Request.clearCache();
if (ui.document) {
pandora.UI.set({document: ''});
} else {
pandora.$ui.list.reloadList()
}
}
).open();
}
}
})
.appendTo($bar),
$edit = Ox.MenuButton({
items: [
{
id: 'insert',
title: Ox._('Insert HTML...'),
disabled: true
}
],
style: 'square',
title: 'edit',
tooltip: Ox._('Edit'),
type: 'image',
})
.css({
float: 'right',
borderColor: 'rgba(0, 0, 0, 0)',
background: 'rgba(0, 0, 0, 0)'
})
.bindEvent({
click: function(data) {
// ...
}
})
.appendTo($bar),
$info = Ox.Element().css({overflowY: 'auto'}),
that = Ox.SplitPanel({
elements: [
{element: $bar, size: isMultiple ? 0 : 16},
{element: $info}
],
orientation: 'vertical'
});
if (!isMultiple) {
var $icon = Ox.Element({
element: '<img>',
})
.attr({
src: '/documents/' + data.id + '/512p.jpg?' + data.modified
})
.css({
position: 'absolute',
left: margin + iconLeft + 'px',
top: margin + 'px',
width: iconWidth + 'px',
height: iconHeight + 'px'
})
.bindEvent({
singleclick: toggleIconSize
})
.appendTo($info),
$reflection = $('<div>')
.addClass('OxReflection')
.css({
position: 'absolute',
left: margin + 'px',
top: margin + iconHeight + 'px',
width: iconSize + 'px',
height: iconSize / 2 + 'px',
overflow: 'hidden'
})
.appendTo($info),
$reflectionIcon = $('<img>')
.attr({
src: '/documents/' + data.id + '/512p.jpg?' + data.modified
})
.css({
position: 'absolute',
left: iconLeft + 'px',
width: iconWidth + 'px',
height: iconHeight + 'px',
})
.appendTo($reflection),
$reflectionGradient = $('<div>')
.css({
position: 'absolute',
width: iconSize + 'px',
height: iconSize / 2 + 'px'
})
.appendTo($reflection);
}
var $text = Ox.Element()
.addClass('OxTextPage')
.css({
position: 'absolute',
left: margin + (iconSize == 256 ? 256 : iconWidth) + margin + 'px',
top: margin + 'px',
right: margin + statisticsWidth + margin + 'px',
})
.appendTo($info),
$statistics = $('<div>')
.css({
position: 'absolute',
width: statisticsWidth + 'px',
top: margin + 'px',
right: margin + 'px'
})
.appendTo($info),
$capabilities;
[$options, $edit].forEach(function($element) {
$element.find('input').css({
borderWidth: 0,
borderRadius: 0,
padding: '3px'
});
});
listKeys.forEach(function(key) {
if (Ox.isString(data[key])) {
data[key] = [data[key]];
}
});
if (!canEdit) {
pandora.createLinks($info);
}
// Title -------------------------------------------------------------------
$('<div>')
.css({
marginTop: '-2px',
})
.append(
Ox.EditableContent({
editable: canEdit,
placeholder: formatLight(Ox._( isMixed.title ? 'Mixed title' : 'Untitled')),
tooltip: canEdit ? pandora.getEditTooltip() : '',
value: data.title || ''
})
.css({
marginBottom: '-3px',
fontWeight: 'bold',
fontSize: '13px'
})
.bindEvent({
submit: function(event) {
editMetadata('title', event.value);
}
})
)
.appendTo($text);
// Director, Year and Country ----------------------------------------------
renderGroup(['author', 'date', 'type']);
renderGroup(['publisher', 'place', 'series', 'edition', 'language']);
Ox.getObjectById(pandora.site.documentKeys, 'keywords') && renderGroup(['keywords'])
// Render any remaing keys defined in config
renderRemainingKeys();
// Description -------------------------------------------------------------
if (canEdit || data.description) {
$('<div>')
.addClass("InlineImages")
.append(
Ox.EditableContent({
clickLink: pandora.clickLink,
editable: canEdit,
maxHeight: Infinity,
placeholder: formatLight(Ox._('No Description')),
tooltip: canEdit ? pandora.getEditTooltip() : '',
type: 'textarea',
value: data.description || ''
})
.css(css)
.css({
marginTop: '12px',
overflow: 'hidden'
})
.bindEvent({
submit: function(event) {
editMetadata('description', event.value);
}
})
)
.appendTo($text);
}
if (canEdit || data.prompt) {
$('<div>')
.append(
Ox.EditableContent({
clickLink: pandora.clickLink,
editable: canEdit,
format: function(value) {
return value.replace(
/<img src=/g,
'<img style="float: left; max-width: 256px; max-height: 256px; margin: 0 16px 16px 0" src='
);
},
maxHeight: Infinity,
placeholder: formatLight(Ox._( isMixed.prompt ? 'Mixed Prompt' : 'No Prompt')),
tooltip: canEdit ? pandora.getEditTooltip() : '',
type: 'textarea',
value: data.prompt || ''
})
.css(css)
.css({
marginTop: '12px',
overflow: 'hidden',
'white-space': 'pre-line'
})
.bindEvent({
submit: function(event) {
editMetadata('prompt', event.value);
}
})
)
.appendTo($text);
}
// Referenced --------------------------------------------------------------
if (
!isMultiple && (
data.referenced.items.length
|| data.referenced.annotations.length
|| data.referenced.documents.length
|| data.referenced.entities.length
)) {
var itemsById = {}
data.referenced.items.forEach(function(item) {
itemsById[item.id] = Ox.extend(item, {annotations: [], referenced: true});
});
data.referenced.annotations.forEach(function(annotation) {
var itemId = annotation.id.split('/')[0];
if (!itemsById[itemId]) {
itemsById[itemId] = {
id: itemId,
title: annotation.title,
annotations: []
};
}
itemsById[itemId].annotations = itemsById[itemId].annotations.concat(annotation);
});
var html = Ox.sortBy(Object.values(itemsById), 'title').map(function(item) {
return (item.referenced ? '<a href="/' + item.id + '/documents">' : '')
+ item.title //Ox.encodeHTMLEntities(item.title)
+ (item.referenced ? '</a>' : '')
+ (item.annotations.length
? ' (' + Ox.sortBy(item.annotations, 'in').map(function(annotation) {
return '<a href="/' + annotation.id + '">'
+ Ox.formatDuration(annotation.in) + '</a>'
}).join(', ')
+ ')'
: '')
}).join(', ');
html += data.referenced.documents.map(function(document) {
return ', <a href="/documents/' + document.id + '/info">' + document.title + '</a>';
}).join('');
html += data.referenced.entities.map(function(entity) {
return ', <a href="/entities/' + entity.id + '">' + entity.name + '</a>';
}).join('');
var $div = $('<div>')
.css({marginTop: '12px'})
.html(formatKey('Referenced', 'text') + html)
.appendTo($text);
pandora.createLinks($div);
}
// Extension, Dimensions, Size ---------------------------------------------
['extension', 'dimensions', 'size'].forEach(function(key) {
$('<div>')
.css({marginBottom: '4px'})
.append(formatKey(key, 'statistics'))
.append(
Ox.Theme.formatColor(null, 'gradient')
.css({textAlign: 'right'})
.html(formatValue(key, data[key]))
)
.appendTo($statistics);
});
/*
['created', 'modified'].forEach(function(key) {
$('<div>')
.css({marginBottom: '4px'})
.append(formatKey(key, 'statistics'))
.append(
$('<div>').html(Ox.formatDate(data[key], '%B %e, %Y'))
)
.appendTo($statistics);
});
$('<div>')
.css({marginBottom: '4px'})
.append(formatKey('timesaccessed', 'statistics'))
.append(
$('<div>').html(data.timesaccessed)
)
.appendTo($statistics);
*/
// Rights Level ------------------------------------------------------------
var $rightsLevel = $('<div>');
$('<div>')
.css({marginBottom: '4px'})
.append(formatKey('Rights Level', 'statistics'))
.append($rightsLevel)
.appendTo($statistics);
renderRightsLevel();
function editMetadata(key, value) {
if (value != data[key]) {
var edit = {
id: isMultiple ? ui.collectionSelection : data.id,
};
if (key == 'title') {
edit[key] = value;
} else if (listKeys.indexOf(key) >= 0) {
edit[key] = value ? value.split(', ') : [];
} else {
edit[key] = value ? value : null;
}
pandora.api.editDocument(edit, function(result) {
if (!isMultiple) {
var src;
data[key] = result.data[key];
Ox.Request.clearCache(); // fixme: too much? can change filter/list etc
if (result.data.id != data.id) {
pandora.UI.set({document: result.data.id});
pandora.$ui.browser.value(data.id, 'id', result.data.id);
}
//pandora.updateItemContext();
//pandora.$ui.browser.value(result.data.id, key, result.data[key]);
pandora.$ui.itemTitle
.options({title: '<b>' + (pandora.getDocumentTitle(result.data)) + '</b>'});
}
that.triggerEvent('change', Ox.extend({}, key, value));
});
}
}
function formatKey(key, mode) {
var item = Ox.getObjectById(pandora.site.documentKeys, key);
key = Ox._(item ? item.title : key);
mode = mode || 'text';
return mode == 'text'
? '<span style="font-weight: bold">' + Ox.toTitleCase(key) + ':</span> '
: mode == 'description'
? Ox.toTitleCase(key)
: Ox.Element()
.css({marginBottom: '4px', fontWeight: 'bold'})
.html(Ox.toTitleCase(key)
.replace(' Per ', ' per '));
}
function formatLight(str) {
return '<span class="OxLight">' + str + '</span>';
}
function formatLink(value, key) {
return (Ox.isArray(value) ? value : [value]).map(function(value) {
var documentKey = Ox.getObjectById(pandora.site.documentKeys, key),
op = documentKey && documentKey.filter ? '==' : '=';
return key
? '<a href="/documents/' + key + op + pandora.escapeQueryValue(Ox.decodeHTMLEntities(value)) + '">' + value + '</a>'
: value;
}).join(', ');
}
function formatValue(key, value) {
var ret;
if (key == 'date' && (!value || value.split('-').length < 4)) {
ret = pandora.formatDate(value);
} else if (nameKeys.indexOf(key) > -1) {
ret = formatLink(value.split(', '), key);
} else if (listKeys.indexOf(key) > -1) {
ret = formatLink(value.split(', '), key);
} else if (['type', 'publisher'].indexOf(key) > -1) {
ret = formatLink(value, key);
} else {
if (isMixed[key]) {
ret = 'Mixed'
} else {
ret = pandora.formatDocumentKey(Ox.getObjectById(pandora.site.documentKeys, key), data);
}
}
return ret;
}
function getRightsLevelElement(rightsLevel) {
return Ox.Theme.formatColorLevel(
rightsLevel,
pandora.site.documentRightsLevels.map(function(rightsLevel) {
return rightsLevel.name;
})
);
}
function getValue(key, value) {
return !value ? ''
: Ox.contains(listKeys, key) ? value.join(', ')
: value;
}
function renderCapabilities(rightsLevel) {
var capabilities = [].concat(
canEdit ? [{name: 'canSeeItem', symbol: 'Find'}] : [],
[
{name: 'canPlayClips', symbol: 'PlayInToOut'},
{name: 'canPlayVideo', symbol: 'Play'},
{name: 'canDownloadVideo', symbol: 'Download'}
]
),
userLevels = canEdit ? pandora.site.userLevels : [pandora.user.level];
$capabilities.empty();
userLevels.forEach(function(userLevel, i) {
var $element,
$line = $('<div>')
.css({
height: '16px',
marginBottom: '4px'
})
.appendTo($capabilities);
if (canEdit) {
$element = Ox.Theme.formatColorLevel(i, userLevels.map(function(userLevel) {
return Ox.toTitleCase(userLevel);
}), [0, 240]);
Ox.Label({
textAlign: 'center',
title: Ox.toTitleCase(userLevel),
width: 60
})
.addClass('OxColor OxColorGradient')
.css({
float: 'left',
height: '12px',
paddingTop: '2px',
background: $element.css('background'),
fontSize: '8px',
color: $element.css('color')
})
.data({OxColor: $element.data('OxColor')})
.appendTo($line);
}
capabilities.forEach(function(capability) {
var hasCapability = pandora.hasCapability(capability.name, userLevel) >= rightsLevel,
$element = Ox.Theme.formatColorLevel(hasCapability, ['', '']);
Ox.Button({
tooltip: (canEdit ? Ox.toTitleCase(userLevel) : 'You') + ' '
+ (hasCapability ? 'can' : 'can\'t') + ' '
+ Ox.toSlashes(capability.name)
.split('/').slice(1).join(' ')
.toLowerCase(),
title: capability.symbol,
type: 'image'
})
.addClass('OxColor OxColorGradient')
.css({background: $element.css('background')})
.css('margin' + (canEdit ? 'Left' : 'Right'), '4px')
.data({OxColor: $element.data('OxColor')})
.appendTo($line);
});
if (!canEdit) {
Ox.Button({
title: Ox._('Help'),
tooltip: Ox._('About Rights'),
type: 'image'
})
.css({marginLeft: '52px'})
.bindEvent({
click: function() {
pandora.UI.set({page: 'rights'});
}
})
.appendTo($line);
}
});
}
function renderGroup(keys) {
var $element;
keys.forEach(function(key) { displayedKeys.push(key) });
if (canEdit || keys.filter(function(key) {
return data[key];
}).length) {
$element = $('<div>').addClass('OxSelectable').css(css);
keys.forEach(function(key, i) {
if (canEdit || data[key]) {
if ($element.children().length) {
$('<span>').html('; ').appendTo($element);
}
$('<span>').html(formatKey(key)).appendTo($element);
Ox.EditableContent({
clickLink: pandora.clickLink,
editable: canEdit,
format: function(value) {
return formatValue(key, value);
},
placeholder: formatLight(Ox._(isMixed[key] ? 'mixed' : 'unknown')),
tooltip: canEdit ? pandora.getEditTooltip() : '',
value: getValue(key, data[key])
})
.bindEvent({
submit: function(data) {
editMetadata(key, data.value);
}
})
.appendTo($element);
if (isMixed[key] && Ox.contains(listKeys, key)) {
pandora.ui.addRemoveKeyDialog({
ids: ui.collectionSelection,
key: key,
section: ui.section
}).appendTo($element)
}
}
});
$element.appendTo($text);
}
return $element;
}
function renderRemainingKeys() {
var keys = pandora.site.documentKeys.filter(function(item) {
return item.id != '*' && !Ox.contains(displayedKeys, item.id);
}).map(function(item) {
return item.id;
});
if (keys.length) {
renderGroup(keys)
}
}
function renderRightsLevel() {
var $rightsLevelElement = getRightsLevelElement(data.rightslevel),
$rightsLevelSelect;
$rightsLevel.empty();
if (canEdit) {
$rightsLevelSelect = Ox.Select({
items: pandora.site.documentRightsLevels.map(function(rightsLevel, i) {
return {id: i, title: rightsLevel.name};
}),
width: 128,
value: data.rightslevel
})
.addClass('OxColor OxColorGradient')
.css({
marginBottom: '4px',
background: $rightsLevelElement.css('background')
})
.data({OxColor: $rightsLevelElement.data('OxColor')})
.bindEvent({
change: function(event) {
var rightsLevel = event.value;
$rightsLevelElement = getRightsLevelElement(rightsLevel);
$rightsLevelSelect
.css({background: $rightsLevelElement.css('background')})
.data({OxColor: $rightsLevelElement.data('OxColor')})
//renderCapabilities(rightsLevel);
var edit = {
id: isMultiple ? ui.collectionSelection : data.id,
rightslevel: rightsLevel
};
pandora.api.editDocument(edit, function(result) {
Ox.Request.clearCache();
that.triggerEvent('change', Ox.extend({}, 'rightslevel', rightsLevel));
});
}
})
.appendTo($rightsLevel);
} else {
$rightsLevelElement
.css({
marginBottom: '4px'
})
.appendTo($rightsLevel);
}
$capabilities = $('<div>').appendTo($rightsLevel);
//renderCapabilities(data.rightslevel);
}
function toggleIconSize() {
iconSize = iconSize == 256 ? 512 : 256;
iconWidth = iconRatio > 1 ? iconSize : Math.round(iconSize * iconRatio);
iconHeight = iconRatio < 1 ? iconSize : Math.round(iconSize / iconRatio);
iconLeft = iconSize == 256 ? Math.floor((iconSize - iconWidth) / 2) : 0,
$icon.animate({
left: margin + iconLeft + 'px',
width: iconWidth + 'px',
height: iconHeight + 'px',
}, 250);
$reflection.animate({
top: margin + iconHeight + 'px',
width: iconSize + 'px',
height: iconSize / 2 + 'px'
}, 250);
$reflectionIcon.animate({
left: iconLeft + 'px',
width: iconWidth + 'px',
height: iconHeight + 'px',
}, 250);
$reflectionGradient.animate({
width: iconSize + 'px',
height: iconSize / 2 + 'px'
}, 250);
$text.animate({
left: margin + (iconSize == 256 ? 256 : iconWidth) + margin + 'px'
}, 250);
pandora.UI.set({infoIconSize: iconSize});
}
that.reload = function() {
/*
var src = '/documents/' + data.id + '/512p.jpg?' + data.modified;
$icon.attr({src: src});
$reflectionIcon.attr({src: src});
iconSize = iconSize == 256 ? 512 : 256;
iconRatio = ui.icons == 'posters'
? (ui.showSitePosters ? pandora.site.posters.ratio : data.posterRatio) : 1;
toggleIconSize();
*/
};
that.bindEvent({
mousedown: function() {
setTimeout(function() {
!Ox.Focus.focusedElementIsInput() && that.gainFocus();
});
}
});
return that;
};

View file

@ -297,7 +297,9 @@ pandora.ui.infoView = function(data, isMixed) {
} }
if (data.type?.join('').includes('ai:')) { if (data.type?.join('').includes('ai:')) {
$('<div>').addClass('ai-preview').appendTo($text); $('<div>').css({
marginTop: '12px',
}).addClass('ai-preview').appendTo($text);
} }
if (data.type?.includes('source') && !isMultiple) { if (data.type?.includes('source') && !isMultiple) {
$('<a>').attr({ $('<a>').attr({
@ -724,7 +726,7 @@ pandora.ui.infoView = function(data, isMixed) {
<video src="${src_ai}" loop></video> <video src="${src_ai}" loop></video>
<style> <style>
.ai-preview video { .ai-preview video {
width: 33%; width: 45%;
} }
</style> </style>
` `