Merge branch 'master' into stable

This commit is contained in:
j 2024-09-11 15:01:33 +01:00
commit f9628d96ae
611 changed files with 157454 additions and 84676 deletions

2
.gitignore vendored
View file

@ -36,3 +36,5 @@ pandora/gunicorn_config.py
.DS_Store .DS_Store
.env .env
overlay/ overlay/
pandora/encoding.conf
pandora/tasks.conf

View file

@ -1,4 +1,4 @@
FROM 0x2620/pandora-base:latest FROM code.0x2620.org/0x2620/pandora-base:latest
LABEL maintainer="0x2620@0x2620.org" LABEL maintainer="0x2620@0x2620.org"

View file

@ -7,7 +7,7 @@
We recommend to run pan.do/ra inside of LXD or LXC or dedicated VM or server. We recommend to run pan.do/ra inside of LXD or LXC or dedicated VM or server.
You will need at least 2GB of free disk space You will need at least 2GB of free disk space
pan.do/ra is known to work with Ubuntu 18.04, 20.04 and Debian/10 (buster), pan.do/ra is known to work with Debian/12 (bookworm) and Ubuntu 20.04,
other distributions might also work, let us know if it works for you. other distributions might also work, let us know if it works for you.
Use the following commands as root to install pan.do/ra and all dependencies: Use the following commands as root to install pan.do/ra and all dependencies:
@ -16,7 +16,7 @@
cd /root cd /root
curl -sL https://pan.do/ra-install > pandora_install.sh curl -sL https://pan.do/ra-install > pandora_install.sh
chmod +x pandora_install.sh chmod +x pandora_install.sh
export BRANCH=stable # change to 'master' to get current developement version export BRANCH=master # change to 'stable' to get the latest release (sometimes outdated)
./pandora_install.sh 2>&1 | tee pandora_install.log ./pandora_install.sh 2>&1 | tee pandora_install.log
``` ```

29
ctl
View file

@ -27,25 +27,33 @@ if [ "$action" = "init" ]; then
$SUDO bin/python3 -m pip install -U --ignore-installed "pip<9" $SUDO bin/python3 -m pip install -U --ignore-installed "pip<9"
fi fi
if [ ! -d static/oxjs ]; then if [ ! -d static/oxjs ]; then
$SUDO git clone --depth 1 -b $branch https://git.0x2620.org/oxjs.git static/oxjs $SUDO git clone -b $branch https://code.0x2620.org/0x2620/oxjs.git static/oxjs
fi fi
$SUDO mkdir -p src $SUDO mkdir -p src
if [ ! -d src/oxtimelines ]; then if [ ! -d src/oxtimelines ]; then
$SUDO git clone --depth 1 -b $branch https://git.0x2620.org/oxtimelines.git src/oxtimelines $SUDO git clone -b $branch https://code.0x2620.org/0x2620/oxtimelines.git src/oxtimelines
fi fi
for package in oxtimelines python-ox; do for package in oxtimelines python-ox; do
cd ${BASE} cd ${BASE}
if [ ! -d src/${package} ]; then if [ ! -d src/${package} ]; then
$SUDO git clone --depth 1 -b $branch https://git.0x2620.org/${package}.git src/${package} $SUDO git clone -b $branch https://code.0x2620.org/0x2620/${package}.git src/${package}
fi fi
cd ${BASE}/src/${package} cd ${BASE}/src/${package}
$SUDO ${BASE}/bin/python setup.py develop
$SUDO ${BASE}/bin/pip install -e .
done done
cd ${BASE} cd ${BASE}
$SUDO ./bin/pip install -r requirements.txt $SUDO ./bin/pip install -r requirements.txt
if [ ! -e pandora/gunicorn_config.py ]; then for template in gunicorn_config.py encoding.conf tasks.conf; do
$SUDO cp pandora/gunicorn_config.py.in pandora/gunicorn_config.py if [ ! -e pandora/$template ]; then
fi $SUDO cp pandora/${template}.in pandora/$template
fi
done
exit 0
fi
if [ "$action" = "version" ]; then
git rev-list HEAD --count
exit 0 exit 0
fi fi
@ -73,10 +81,15 @@ if [ `whoami` != 'root' ]; then
exit 1 exit 1
fi fi
if [ "$action" = "install" ]; then if [ "$action" = "install" ]; then
cd "`dirname "$0"`" cd "`dirname "$self"`"
BASE=`pwd` BASE=`pwd`
if [ -x /bin/systemctl ]; then if [ -x /bin/systemctl ]; then
if [ -d /etc/systemd/system/ ]; then if [ -d /etc/systemd/system/ ]; then
for template in gunicorn_config.py encoding.conf tasks.conf; do
if [ ! -e pandora/$template ]; then
$SUDO cp pandora/${template}.in pandora/$template
fi
done
for service in $SERVICES; do for service in $SERVICES; do
if [ -e /lib/systemd/system/${service}.service ]; then if [ -e /lib/systemd/system/${service}.service ]; then
rm -f /lib/systemd/system/${service}.service \ rm -f /lib/systemd/system/${service}.service \

View file

@ -15,7 +15,6 @@ services:
- "127.0.0.1:2620:80" - "127.0.0.1:2620:80"
networks: networks:
- backend - backend
- default
links: links:
- pandora - pandora
- websocketd - websocketd
@ -28,7 +27,7 @@ services:
restart: unless-stopped restart: unless-stopped
db: db:
image: postgres:latest image: postgres:15
networks: networks:
- backend - backend
env_file: .env env_file: .env

View file

@ -1,4 +1,4 @@
FROM debian:buster FROM debian:12
LABEL maintainer="0x2620@0x2620.org" LABEL maintainer="0x2620@0x2620.org"

View file

@ -1,9 +1,17 @@
#!/bin/bash #!/bin/bash
UBUNTU_CODENAME=bionic
if [ -e /etc/os-release ]; then if [ -e /etc/os-release ]; then
. /etc/os-release . /etc/os-release
fi fi
if [ -z "$UBUNTU_CODENAME" ]; then
UBUNTU_CODENAME=bionic
fi
if [ "$VERSION_CODENAME" = "bullseye" ]; then
UBUNTU_CODENAME=focal
fi
if [ "$VERSION_CODENAME" = "bookworm" ]; then
UBUNTU_CODENAME=lunar
fi
export DEBIAN_FRONTEND=noninteractive export DEBIAN_FRONTEND=noninteractive
echo 'Acquire::Languages "none";' > /etc/apt/apt.conf.d/99languages echo 'Acquire::Languages "none";' > /etc/apt/apt.conf.d/99languages
@ -44,7 +52,6 @@ apt-get install -y \
python3-numpy \ python3-numpy \
python3-psycopg2 \ python3-psycopg2 \
python3-pyinotify \ python3-pyinotify \
python3-simplejson \
python3-lxml \ python3-lxml \
python3-cssselect \ python3-cssselect \
python3-html5lib \ python3-html5lib \
@ -53,7 +60,6 @@ apt-get install -y \
oxframe \ oxframe \
ffmpeg \ ffmpeg \
mkvtoolnix \ mkvtoolnix \
gpac \
imagemagick \ imagemagick \
poppler-utils \ poppler-utils \
ipython3 \ ipython3 \

View file

@ -11,7 +11,7 @@ else
proxy= proxy=
fi fi
docker build $proxy -t 0x2620/pandora-base base docker build $proxy -t code.0x2620.org/0x2620/pandora-base base
docker build -t 0x2620/pandora-nginx nginx docker build -t code.0x2620.org/0x2620/pandora-nginx nginx
cd .. cd ..
docker build -t 0x2620/pandora . docker build -t code.0x2620.org/0x2620/pandora .

View file

@ -6,7 +6,7 @@ user=pandora
export LANG=en_US.UTF-8 export LANG=en_US.UTF-8
mkdir -p /run/pandora mkdir -p /run/pandora
chown -R ${user}.${user} /run/pandora chown -R ${user}:${user} /run/pandora
update="/usr/bin/sudo -u $user -E -H /srv/pandora/update.py" update="/usr/bin/sudo -u $user -E -H /srv/pandora/update.py"
@ -32,7 +32,7 @@ if [ "$action" = "pandora" ]; then
/srv/pandora/pandora/manage.py init_db /srv/pandora/pandora/manage.py init_db
$update db $update db
echo "Generating static files..." echo "Generating static files..."
chown -R ${user}.${user} /srv/pandora/ chown -R ${user}:${user} /srv/pandora/
$update static $update static
touch /srv/pandora/initialized touch /srv/pandora/initialized
fi fi
@ -52,7 +52,7 @@ if [ "$action" = "encoding" ]; then
-A app worker \ -A app worker \
-Q encoding -n ${name} \ -Q encoding -n ${name} \
--pidfile /run/pandora/encoding.pid \ --pidfile /run/pandora/encoding.pid \
--maxtasksperchild 500 \ --max-tasks-per-child 500 \
-c 1 \ -c 1 \
-l INFO -l INFO
fi fi
@ -66,7 +66,7 @@ if [ "$action" = "tasks" ]; then
-A app worker \ -A app worker \
-Q default,celery -n ${name} \ -Q default,celery -n ${name} \
--pidfile /run/pandora/tasks.pid \ --pidfile /run/pandora/tasks.pid \
--maxtasksperchild 1000 \ --max-tasks-per-child 1000 \
-l INFO -l INFO
fi fi
if [ "$action" = "cron" ]; then if [ "$action" = "cron" ]; then
@ -103,9 +103,9 @@ fi
# pan.do/ra setup hooks # pan.do/ra setup hooks
if [ "$action" = "docker-compose.yml" ]; then if [ "$action" = "docker-compose.yml" ]; then
cat /srv/pandora_base/docker-compose.yml | \ cat /srv/pandora_base/docker-compose.yml | \
sed "s#build: \.#image: 0x2620/pandora:latest#g" | \ sed "s#build: \.#image: code.0x2620.org/0x2620/pandora:latest#g" | \
sed "s#\./overlay:#.:#g" | \ sed "s#\./overlay:#.:#g" | \
sed "s#build: docker/nginx#image: 0x2620/pandora-nginx:latest#g" sed "s#build: docker/nginx#image: code.0x2620.org/0x2620/pandora-nginx:latest#g"
exit exit
fi fi
if [ "$action" = ".env" ]; then if [ "$action" = ".env" ]; then
@ -131,5 +131,5 @@ echo " docker run 0x2620/pandora setup | sh"
echo echo
echo adjust created files to match your needs and run: echo adjust created files to match your needs and run:
echo echo
echo " docker-compose up" echo " docker compose up"
echo echo

View file

@ -1,5 +1,5 @@
#!/bin/bash #!/bin/bash
# push new version of pan.do/ra to docker hub # push new version of pan.do/ra to code.0x2620.org
set -e set -e
cd /tmp cd /tmp
@ -7,6 +7,6 @@ git clone https://code.0x2620.org/0x2620/pandora
cd pandora cd pandora
./docker/build.sh ./docker/build.sh
docker push 0x2620/pandora-base:latest docker push code.0x2620.org/0x2620/pandora-base:latest
docker push 0x2620/pandora-nginx:latest docker push code.0x2620.org/0x2620/pandora-nginx:latest
docker push 0x2620/pandora:latest docker push code.0x2620.org/0x2620/pandora:latest

View file

@ -1,18 +1,18 @@
#!/bin/sh #!/bin/sh
docker run 0x2620/pandora docker-compose.yml > docker-compose.yml docker run --rm code.0x2620.org/0x2620/pandora docker-compose.yml > docker-compose.yml
if [ ! -e .env ]; then if [ ! -e .env ]; then
docker run 0x2620/pandora .env > .env docker run --rm code.0x2620.org/0x2620/pandora .env > .env
echo .env >> .gitignore echo .env >> .gitignore
fi fi
if [ ! -e config.jsonc ]; then if [ ! -e config.jsonc ]; then
docker run 0x2620/pandora config.jsonc > config.jsonc docker run --rm code.0x2620.org/0x2620/pandora config.jsonc > config.jsonc
fi fi
cat > README.md << EOF cat > README.md << EOF
pan.do/ra docker instance pan.do/ra docker instance
this folder was created with this folder was created with
docker run 0x2620/pandora setup | sh docker run --rm code.0x2620.org/0x2620/pandora setup | sh
To start pan.do/ra adjust the files in this folder: To start pan.do/ra adjust the files in this folder:
@ -22,11 +22,14 @@ To start pan.do/ra adjust the files in this folder:
and to get started run this: and to get started run this:
docker-compose up -d docker compose up -d
To update pan.do/ra run: To update pan.do/ra run:
docker-compose run pandora ctl update docker compose run --rm pandora ctl update
To run pan.do/ra manage shell:
docker compose run --rm pandora ctl manage shell
EOF EOF
touch __init__.py touch __init__.py

View file

@ -17,6 +17,7 @@ server {
#server_name pandora.YOURDOMAIN.COM; #server_name pandora.YOURDOMAIN.COM;
listen 80 default; listen 80 default;
listen [::]:80 default;
access_log /var/log/nginx/pandora.access.log; access_log /var/log/nginx/pandora.access.log;
error_log /var/log/nginx/pandora.error.log; error_log /var/log/nginx/pandora.error.log;

1
etc/sudoers.d/pandora Normal file
View file

@ -0,0 +1 @@
pandora ALL=(ALL:ALL) NOPASSWD:/usr/local/bin/pandoractl

View file

@ -11,7 +11,7 @@ PIDFile=/run/pandora/cron.pid
WorkingDirectory=/srv/pandora/pandora WorkingDirectory=/srv/pandora/pandora
ExecStart=/srv/pandora/bin/celery \ ExecStart=/srv/pandora/bin/celery \
-A app beat \ -A app beat \
-s /run/pandora/celerybeat-schedule \ --scheduler django_celery_beat.schedulers:DatabaseScheduler \
--pidfile /run/pandora/cron.pid \ --pidfile /run/pandora/cron.pid \
-l INFO -l INFO
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID

View file

@ -7,14 +7,16 @@ Type=simple
Restart=always Restart=always
User=pandora User=pandora
Group=pandora Group=pandora
EnvironmentFile=/srv/pandora/pandora/encoding.conf
PIDFile=/run/pandora/encoding.pid PIDFile=/run/pandora/encoding.pid
WorkingDirectory=/srv/pandora/pandora WorkingDirectory=/srv/pandora/pandora
ExecStart=/srv/pandora/bin/celery \ ExecStart=/srv/pandora/bin/celery \
-A app worker \ -A app worker \
-Q encoding -n pandora-encoding \ -Q encoding -n pandora-encoding \
--pidfile /run/pandora/encoding.pid \ --pidfile /run/pandora/encoding.pid \
--maxtasksperchild 500 \ -c $CONCURRENCY \
-l INFO --max-tasks-per-child $MAX_TASKS_PER_CHILD \
-l $LOGLEVEL
ExecReload=/bin/kill -TERM $MAINPID ExecReload=/bin/kill -TERM $MAINPID
[Install] [Install]

View file

@ -7,14 +7,16 @@ Type=simple
Restart=always Restart=always
User=pandora User=pandora
Group=pandora Group=pandora
EnvironmentFile=/srv/pandora/pandora/tasks.conf
PIDFile=/run/pandora/tasks.pid PIDFile=/run/pandora/tasks.pid
WorkingDirectory=/srv/pandora/pandora WorkingDirectory=/srv/pandora/pandora
ExecStart=/srv/pandora/bin/celery \ ExecStart=/srv/pandora/bin/celery \
-A app worker \ -A app worker \
-Q default,celery -n pandora-default \ -Q default,celery -n pandora-default \
--pidfile /run/pandora/tasks.pid \ --pidfile /run/pandora/tasks.pid \
--maxtasksperchild 1000 \ -c $CONCURRENCY \
-l INFO --max-tasks-per-child $MAX_TASKS_PER_CHILD \
-l $LOGLEVEL
ExecReload=/bin/kill -TERM $MAINPID ExecReload=/bin/kill -TERM $MAINPID
[Install] [Install]

View file

@ -0,0 +1,6 @@
from django.apps import AppConfig
class AnnotationConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'annotation'

View file

@ -27,6 +27,7 @@ class Command(BaseCommand):
parser.add_argument('username', help='username') parser.add_argument('username', help='username')
parser.add_argument('item', help='item') parser.add_argument('item', help='item')
parser.add_argument('layer', help='layer') parser.add_argument('layer', help='layer')
parser.add_argument('language', help='language', default="")
parser.add_argument('filename', help='filename.srt') parser.add_argument('filename', help='filename.srt')
def handle(self, *args, **options): def handle(self, *args, **options):
@ -34,6 +35,7 @@ class Command(BaseCommand):
public_id = options['item'] public_id = options['item']
layer_id = options['layer'] layer_id = options['layer']
filename = options['filename'] filename = options['filename']
language = options.get("language")
user = User.objects.get(username=username) user = User.objects.get(username=username)
item = Item.objects.get(public_id=public_id) item = Item.objects.get(public_id=public_id)
@ -47,6 +49,9 @@ class Command(BaseCommand):
for i in range(len(annotations)-1): for i in range(len(annotations)-1):
if annotations[i]['out'] == annotations[i+1]['in']: if annotations[i]['out'] == annotations[i+1]['in']:
annotations[i]['out'] = annotations[i]['out'] - 0.001 annotations[i]['out'] = annotations[i]['out'] - 0.001
if language:
for annotation in annotations:
annotation["value"] = '<span lang="%s">%s</span>' % (language, annotation["value"])
tasks.add_annotations.delay({ tasks.add_annotations.delay({
'item': item.public_id, 'item': item.public_id,
'layer': layer_id, 'layer': layer_id,

View file

@ -0,0 +1,18 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('annotation', '0003_auto_20160219_1537'),
]
operations = [
migrations.AlterField(
model_name='annotation',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -163,28 +163,25 @@ class Annotation(models.Model):
self.sortvalue = None self.sortvalue = None
self.languages = None self.languages = None
with transaction.atomic(): if not self.clip or self.start != self.clip.start or self.end != self.clip.end:
if not self.clip or self.start != self.clip.start or self.end != self.clip.end: self.clip, created = Clip.get_or_create(self.item, self.start, self.end)
self.clip, created = Clip.get_or_create(self.item, self.start, self.end)
with transaction.atomic():
if set_public_id: if set_public_id:
self.set_public_id() self.set_public_id()
super(Annotation, self).save(*args, **kwargs) super(Annotation, self).save(*args, **kwargs)
if self.clip: if self.clip:
Clip.objects.filter(**{ self.clip.update_findvalue()
'id': self.clip.id, setattr(self.clip, self.layer, True)
self.layer: False self.clip.save(update_fields=[self.layer, 'sortvalue', 'findvalue'])
}).update(**{self.layer: True})
# update clip.findvalue
self.clip.save()
# update matches in bulk if called from load_subtitles # update matches in bulk if called from load_subtitles
if not delay_matches: if not delay_matches:
self.update_matches() self.update_matches()
self.update_documents() self.update_documents()
self.update_translations() self.update_translations()
def update_matches(self): def update_matches(self):
from place.models import Place from place.models import Place
@ -267,7 +264,10 @@ class Annotation(models.Model):
from translation.models import Translation from translation.models import Translation
layer = self.get_layer() layer = self.get_layer()
if layer.get('translate'): if layer.get('translate'):
Translation.objects.get_or_create(lang=lang, key=self.value, defaults={'type': Translation.CONTENT}) for lang in settings.CONFIG['languages']:
if lang == settings.CONFIG['language']:
continue
Translation.objects.get_or_create(lang=lang, key=self.value, defaults={'type': Translation.CONTENT})
def delete(self, *args, **kwargs): def delete(self, *args, **kwargs):
with transaction.atomic(): with transaction.atomic():

View file

@ -5,12 +5,12 @@ from django.contrib.auth import get_user_model
from django.db import transaction from django.db import transaction
import ox import ox
from celery.task import task from app.celery import app
from .models import Annotation from .models import Annotation
@task(ignore_results=False, queue='default') @app.task(ignore_results=False, queue='default')
def add_annotations(data): def add_annotations(data):
from item.models import Item from item.models import Item
from entity.models import Entity from entity.models import Entity
@ -51,7 +51,7 @@ def add_annotations(data):
annotation.item.update_facets() annotation.item.update_facets()
return True return True
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_item(id, force=False): def update_item(id, force=False):
from item.models import Item from item.models import Item
from clip.models import Clip from clip.models import Clip
@ -72,7 +72,7 @@ def update_item(id, force=False):
a.item.save() a.item.save()
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_annotations(layers, value): def update_annotations(layers, value):
items = {} items = {}

View file

@ -180,10 +180,10 @@ def addAnnotation(request, data):
text='invalid data')) text='invalid data'))
item = get_object_or_404_json(Item, public_id=data['item']) item = get_object_or_404_json(Item, public_id=data['item'])
layer_id = data['layer'] layer_id = data['layer']
layer = get_by_id(settings.CONFIG['layers'], layer_id) layer = get_by_id(settings.CONFIG['layers'], layer_id)
if layer['canAddAnnotations'].get(request.user.profile.get_level()): if layer['canAddAnnotations'].get(request.user.profile.get_level()) or item.editable(request.user):
if layer['type'] == 'entity': if layer['type'] == 'entity':
try: try:
value = Entity.get_by_name(ox.decode_html(data['value']), layer['entity']).get_id() value = Entity.get_by_name(ox.decode_html(data['value']), layer['entity']).get_id()
@ -241,8 +241,7 @@ def addAnnotations(request, data):
layer_id = data['layer'] layer_id = data['layer']
layer = get_by_id(settings.CONFIG['layers'], layer_id) layer = get_by_id(settings.CONFIG['layers'], layer_id)
if item.editable(request.user) \ if item.editable(request.user):
and layer['canAddAnnotations'].get(request.user.profile.get_level()):
response = json_response() response = json_response()
data['user'] = request.user.username data['user'] = request.user.username
t = add_annotations.delay(data) t = add_annotations.delay(data)

7
pandora/app/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class AppConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'app'

View file

@ -6,16 +6,8 @@ root_dir = os.path.normpath(os.path.abspath(os.path.dirname(__file__)))
root_dir = os.path.dirname(root_dir) root_dir = os.path.dirname(root_dir)
os.chdir(root_dir) os.chdir(root_dir)
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings') os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
app = Celery('pandora') app = Celery('pandora', broker_connection_retry_on_startup=True)
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY') app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks() app.autodiscover_tasks()

View file

@ -133,7 +133,13 @@ def load_config(init=False):
added = [] added = []
for key in sorted(d): for key in sorted(d):
if key not in c: if key not in c:
added.append("\"%s\": %s," % (key, json.dumps(d[key]))) if key not in (
'hidden',
'find',
'findDocuments',
'videoPoints',
):
added.append("\"%s\": %s," % (key, json.dumps(d[key])))
c[key] = d[key] c[key] = d[key]
if added: if added:
sys.stderr.write("adding default %s:\n\t" % section) sys.stderr.write("adding default %s:\n\t" % section)
@ -321,7 +327,11 @@ def update_static():
#locale #locale
for f in sorted(glob(os.path.join(settings.STATIC_ROOT, 'json/locale.pandora.*.json'))): for f in sorted(glob(os.path.join(settings.STATIC_ROOT, 'json/locale.pandora.*.json'))):
with open(f) as fd: with open(f) as fd:
locale = json.load(fd) try:
locale = json.load(fd)
except:
print("failed to parse %s" % f)
raise
site_locale = f.replace('locale.pandora', 'locale.' + settings.CONFIG['site']['id']) site_locale = f.replace('locale.pandora', 'locale.' + settings.CONFIG['site']['id'])
locale_file = f.replace('locale.pandora', 'locale') locale_file = f.replace('locale.pandora', 'locale')
print('write', locale_file) print('write', locale_file)
@ -365,13 +375,3 @@ def update_geoip(force=False):
def init(): def init():
load_config(True) load_config(True)
def shutdown():
if settings.RELOADER_RUNNING:
RUN_RELOADER = False
settings.RELOADER_RUNNING = False
if NOTIFIER:
NOTIFIER.stop()

View file

@ -0,0 +1,23 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='page',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='settings',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -2,13 +2,16 @@
import datetime import datetime
from celery.task import periodic_task from app.celery import app
from celery.schedules import crontab from celery.schedules import crontab
@app.task(queue='encoding')
@periodic_task(run_every=crontab(hour=6, minute=0), queue='encoding')
def cron(**kwargs): def cron(**kwargs):
from django.db import transaction from django.db import transaction
from django.contrib.sessions.models import Session from django.contrib.sessions.models import Session
Session.objects.filter(expire_date__lt=datetime.datetime.now()).delete() Session.objects.filter(expire_date__lt=datetime.datetime.now()).delete()
transaction.commit() transaction.commit()
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(crontab(hour=6, minute=0), cron.s())

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import copy
from datetime import datetime from datetime import datetime
import base64
import copy
from django.shortcuts import render, redirect from django.shortcuts import render, redirect
from django.conf import settings from django.conf import settings
@ -53,17 +53,18 @@ def embed(request, id):
}) })
def redirect_url(request, url): def redirect_url(request, url):
if request.META['QUERY_STRING']: try:
url += "?" + request.META['QUERY_STRING'] url = base64.decodebytes(url.encode()).decode()
except:
pass
if settings.CONFIG['site'].get('sendReferrer', False): if settings.CONFIG['site'].get('sendReferrer', False):
return redirect(url) return redirect(url)
else: else:
return HttpResponse('<script>document.location.href=%s;</script>'%json.dumps(url)) return HttpResponse('<script>document.location.href=%s;</script>' % json.dumps(url))
def opensearch_xml(request): def opensearch_xml(request):
osd = ET.Element('OpenSearchDescription') osd = ET.Element('OpenSearchDescription')
osd.attrib['xmlns']="http://a9.com/-/spec/opensearch/1.1/" osd.attrib['xmlns'] = "http://a9.com/-/spec/opensearch/1.1/"
e = ET.SubElement(osd, 'ShortName') e = ET.SubElement(osd, 'ShortName')
e.text = settings.SITENAME e.text = settings.SITENAME
e = ET.SubElement(osd, 'Description') e = ET.SubElement(osd, 'Description')
@ -162,7 +163,7 @@ def init(request, data):
del config['keys'] del config['keys']
if 'HTTP_ACCEPT_LANGUAGE' in request.META: if 'HTTP_ACCEPT_LANGUAGE' in request.META:
response['data']['locale'] = request.META['HTTP_ACCEPT_LANGUAGE'].split(';')[0].split('-')[0] response['data']['locale'] = request.META['HTTP_ACCEPT_LANGUAGE'].split(';')[0].split('-')[0].split(',')[0]
if request.META.get('HTTP_X_PREFIX') == 'NO': if request.META.get('HTTP_X_PREFIX') == 'NO':
config['site']['videoprefix'] = '' config['site']['videoprefix'] = ''
@ -245,7 +246,7 @@ def getEmbedDefaults(request, data):
i = qs[0].cache i = qs[0].cache
response['data']['item'] = i['id'] response['data']['item'] = i['id']
response['data']['itemDuration'] = i['duration'] response['data']['itemDuration'] = i['duration']
response['data']['itemRatio'] = i['videoRatio'] response['data']['itemRatio'] = i.get('videoRatio', settings.CONFIG['video']['previewRatio'])
qs = List.objects.exclude(status='private').order_by('name') qs = List.objects.exclude(status='private').order_by('name')
if qs.exists(): if qs.exists():
i = qs[0].json() i = qs[0].json()

7
pandora/archive/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class ArchiveConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'archive'

View file

@ -1,10 +1,11 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import json import json
import subprocess import logging
import shutil
import tempfile
import os import os
import shutil
import subprocess
import tempfile
import ox import ox
from django.conf import settings from django.conf import settings
@ -14,6 +15,9 @@ from item.tasks import load_subtitles
from . import models from . import models
logger = logging.getLogger('pandora.' + __name__)
info_keys = [ info_keys = [
'title', 'title',
'description', 'description',
@ -37,7 +41,7 @@ info_key_map = {
} }
def get_info(url, referer=None): def get_info(url, referer=None):
cmd = ['youtube-dl', '-j', '--all-subs', url] cmd = ['yt-dlp', '-j', '--all-subs', url]
if referer: if referer:
cmd += ['--referer', referer] cmd += ['--referer', referer]
p = subprocess.Popen(cmd, p = subprocess.Popen(cmd,
@ -88,6 +92,15 @@ def add_subtitles(item, media, tmp):
sub.selected = True sub.selected = True
sub.save() sub.save()
def load_formats(url):
cmd = ['yt-dlp', '-q', url, '-j', '-F']
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
formats = stdout.decode().strip().split('\n')[-1]
return json.loads(formats)
def download(item_id, url, referer=None): def download(item_id, url, referer=None):
item = Item.objects.get(public_id=item_id) item = Item.objects.get(public_id=item_id)
info = get_info(url, referer) info = get_info(url, referer)
@ -99,18 +112,19 @@ def download(item_id, url, referer=None):
if isinstance(tmp, bytes): if isinstance(tmp, bytes):
tmp = tmp.decode('utf-8') tmp = tmp.decode('utf-8')
os.chdir(tmp) os.chdir(tmp)
cmd = ['youtube-dl', '-q', media['url']] cmd = ['yt-dlp', '-q', media['url']]
if referer: if referer:
cmd += ['--referer', referer] cmd += ['--referer', referer]
elif 'referer' in media: elif 'referer' in media:
cmd += ['--referer', media['referer']] cmd += ['--referer', media['referer']]
cmd += ['-o', '%(title)80s.%(ext)s']
if settings.CONFIG['video'].get('reuseUpload', False): if settings.CONFIG['video'].get('reuseUpload', False):
max_resolution = max(settings.CONFIG['video']['resolutions']) max_resolution = max(settings.CONFIG['video']['resolutions'])
format = settings.CONFIG['video']['formats'][0] format = settings.CONFIG['video']['formats'][0]
if format == 'mp4': if format == 'mp4':
cmd += [ cmd += [
'-f', 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/bestvideo+bestaudio', '-f', 'bestvideo[height<=%s][ext=mp4]+bestaudio[ext=m4a]' % max_resolution,
'--merge-output-format', 'mp4' '--merge-output-format', 'mp4'
] ]
elif format == 'webm': elif format == 'webm':
@ -120,6 +134,50 @@ def download(item_id, url, referer=None):
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True) stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate() stdout, stderr = p.communicate()
if stderr and b'Requested format is not available.' in stderr:
formats = load_formats(url)
has_audio = bool([fmt for fmt in formats['formats'] if fmt['resolution'] == 'audio only'])
has_video = bool([fmt for fmt in formats['formats'] if 'x' in fmt['resolution']])
cmd = [
'yt-dlp', '-q', url,
'-o', '%(title)80s.%(ext)s'
]
if referer:
cmd += ['--referer', referer]
elif 'referer' in media:
cmd += ['--referer', media['referer']]
if has_video and not has_audio:
cmd += [
'-f', 'bestvideo[height<=%s][ext=mp4]' % max_resolution,
]
elif not has_video and has_audio:
cmd += [
'bestaudio[ext=m4a]'
]
else:
cmd = []
if cmd:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
if stderr and b'Requested format is not available.' in stderr:
cmd = [
'yt-dlp', '-q', url,
'-o', '%(title)80s.%(ext)s'
]
if referer:
cmd += ['--referer', referer]
elif 'referer' in media:
cmd += ['--referer', media['referer']]
if cmd:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
if stdout or stderr:
logger.error("import failed:\n%s\n%s\n%s", " ".join(cmd), stdout.decode(), stderr.decode())
parts = list(os.listdir(tmp)) parts = list(os.listdir(tmp))
if parts: if parts:
part = 1 part = 1
@ -147,6 +205,7 @@ def download(item_id, url, referer=None):
f.extract_stream() f.extract_stream()
status = True status = True
else: else:
logger.error("failed to import %s file already exists %s", url, oshash)
status = 'file exists' status = 'file exists'
if len(parts) == 1: if len(parts) == 1:
add_subtitles(f.item, media, tmp) add_subtitles(f.item, media, tmp)

View file

@ -1,26 +1,30 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import os from distutils.spawn import find_executable
from glob import glob
from os.path import exists from os.path import exists
import fractions import fractions
import logging
import math import math
import os
import re import re
import shutil import shutil
import subprocess import subprocess
import tempfile import tempfile
import time import time
from distutils.spawn import find_executable
from glob import glob
import numpy as np import numpy as np
import ox import ox
import ox.image import ox.image
from ox.utils import json from ox.utils import json
from django.conf import settings from django.conf import settings
from PIL import Image from PIL import Image, ImageOps
from .chop import Chop, make_keyframe_index from .chop import Chop, make_keyframe_index
logger = logging.getLogger('pandora.' + __name__)
img_extension = 'jpg' img_extension = 'jpg'
MAX_DISTANCE = math.sqrt(3 * pow(255, 2)) MAX_DISTANCE = math.sqrt(3 * pow(255, 2))
@ -57,14 +61,15 @@ def supported_formats():
stdout = stdout.decode('utf-8') stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8') stderr = stderr.decode('utf-8')
version = stderr.split('\n')[0].split(' ')[2] version = stderr.split('\n')[0].split(' ')[2]
mp4 = 'libx264' in stdout and bool(re.compile('DEA.L. aac').findall(stdout))
return { return {
'version': version.split('.'), 'version': version.split('.'),
'ogg': 'libtheora' in stdout and 'libvorbis' in stdout, 'ogg': 'libtheora' in stdout and 'libvorbis' in stdout,
'webm': 'libvpx' in stdout and 'libvorbis' in stdout, 'webm': 'libvpx' in stdout and 'libvorbis' in stdout,
'vp8': 'libvpx' in stdout and 'libvorbis' in stdout, 'vp8': 'libvpx' in stdout and 'libvorbis' in stdout,
'vp9': 'libvpx-vp9' in stdout and 'libopus' in stdout, 'vp9': 'libvpx-vp9' in stdout and 'libopus' in stdout,
'mp4': 'libx264' in stdout and bool(re.compile('DEA.L. aac').findall(stdout)), 'mp4': mp4,
'h264': 'libx264' in stdout and bool(re.compile('DEA.L. aac').findall(stdout)), 'h264': mp4,
} }
def stream(video, target, profile, info, audio_track=0, flags={}): def stream(video, target, profile, info, audio_track=0, flags={}):
@ -155,7 +160,7 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
else: else:
height = 96 height = 96
if settings.FFMPEG_SUPPORTS_VP9: if settings.USE_VP9 and settings.FFMPEG_SUPPORTS_VP9:
audio_codec = 'libopus' audio_codec = 'libopus'
video_codec = 'libvpx-vp9' video_codec = 'libvpx-vp9'
@ -218,7 +223,7 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
bitrate = height*width*fps*bpp/1000 bitrate = height*width*fps*bpp/1000
video_settings = trim + [ video_settings = trim + [
'-vb', '%dk' % bitrate, '-b:v', '%dk' % bitrate,
'-aspect', aspect, '-aspect', aspect,
# '-vf', 'yadif', # '-vf', 'yadif',
'-max_muxing_queue_size', '512', '-max_muxing_queue_size', '512',
@ -246,6 +251,8 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
'-level', '4.0', '-level', '4.0',
'-pix_fmt', 'yuv420p', '-pix_fmt', 'yuv420p',
] ]
if info['video'][0].get("force_framerate"):
video_settings += ['-r:v', str(fps)]
video_settings += ['-map', '0:%s,0:0' % info['video'][0]['id']] video_settings += ['-map', '0:%s,0:0' % info['video'][0]['id']]
audio_only = False audio_only = False
else: else:
@ -285,7 +292,7 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
ac = min(ac, audiochannels) ac = min(ac, audiochannels)
audio_settings += ['-ac', str(ac)] audio_settings += ['-ac', str(ac)]
if audiobitrate: if audiobitrate:
audio_settings += ['-ab', audiobitrate] audio_settings += ['-b:a', audiobitrate]
if format == 'mp4': if format == 'mp4':
audio_settings += ['-c:a', 'aac', '-strict', '-2'] audio_settings += ['-c:a', 'aac', '-strict', '-2']
elif audio_codec == 'libopus': elif audio_codec == 'libopus':
@ -318,14 +325,15 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
pass1_post = post[:] pass1_post = post[:]
pass1_post[-1] = '/dev/null' pass1_post[-1] = '/dev/null'
if format == 'webm': if format == 'webm':
pass1_post = ['-speed', '4'] + pass1_post if video_codec != 'libvpx-vp9':
pass1_post = ['-speed', '4'] + pass1_post
post = ['-speed', '1'] + post post = ['-speed', '1'] + post
cmds.append(base + ['-an', '-pass', '1', '-passlogfile', '%s.log' % target] cmds.append(base + ['-pass', '1', '-passlogfile', '%s.log' % target]
+ video_settings + pass1_post) + video_settings + ['-an'] + pass1_post)
cmds.append(base + ['-pass', '2', '-passlogfile', '%s.log' % target] cmds.append(base + ['-pass', '2', '-passlogfile', '%s.log' % target]
+ audio_settings + video_settings + post) + video_settings + audio_settings + post)
else: else:
cmds.append(base + audio_settings + video_settings + post) cmds.append(base + video_settings + audio_settings + post)
if settings.FFMPEG_DEBUG: if settings.FFMPEG_DEBUG:
print('\n'.join([' '.join(cmd) for cmd in cmds])) print('\n'.join([' '.join(cmd) for cmd in cmds]))
@ -433,10 +441,15 @@ def frame_direct(video, target, position):
r = run_command(cmd) r = run_command(cmd)
return r == 0 return r == 0
def open_image_rgb(image_source):
source = Image.open(image_source)
source = ImageOps.exif_transpose(source)
source = source.convert('RGB')
return source
def resize_image(image_source, image_output, width=None, size=None): def resize_image(image_source, image_output, width=None, size=None):
if exists(image_source): if exists(image_source):
source = Image.open(image_source).convert('RGB') source = open_image_rgb(image_source)
source_width = source.size[0] source_width = source.size[0]
source_height = source.size[1] source_height = source.size[1]
if size: if size:
@ -457,7 +470,7 @@ def resize_image(image_source, image_output, width=None, size=None):
height = max(height, 1) height = max(height, 1)
if width < source_width: if width < source_width:
resize_method = Image.ANTIALIAS resize_method = Image.LANCZOS
else: else:
resize_method = Image.BICUBIC resize_method = Image.BICUBIC
output = source.resize((width, height), resize_method) output = source.resize((width, height), resize_method)
@ -471,7 +484,7 @@ def timeline(video, prefix, modes=None, size=None):
size = [64, 16] size = [64, 16]
if isinstance(video, str): if isinstance(video, str):
video = [video] video = [video]
cmd = ['../bin/oxtimelines', cmd = [os.path.normpath(os.path.join(settings.BASE_DIR, '../bin/oxtimelines')),
'-s', ','.join(map(str, reversed(sorted(size)))), '-s', ','.join(map(str, reversed(sorted(size)))),
'-m', ','.join(modes), '-m', ','.join(modes),
'-o', prefix, '-o', prefix,
@ -603,7 +616,7 @@ def timeline_strip(item, cuts, info, prefix):
print(frame, 'cut', c, 'frame', s, frame, 'width', widths[s], box) print(frame, 'cut', c, 'frame', s, frame, 'width', widths[s], box)
# FIXME: why does this have to be frame+1? # FIXME: why does this have to be frame+1?
frame_image = Image.open(item.frame((frame+1)/fps)) frame_image = Image.open(item.frame((frame+1)/fps))
frame_image = frame_image.crop(box).resize((widths[s], timeline_height), Image.ANTIALIAS) frame_image = frame_image.crop(box).resize((widths[s], timeline_height), Image.LANCZOS)
for x_ in range(widths[s]): for x_ in range(widths[s]):
line_image.append(frame_image.crop((x_, 0, x_ + 1, timeline_height))) line_image.append(frame_image.crop((x_, 0, x_ + 1, timeline_height)))
frame += widths[s] frame += widths[s]
@ -731,19 +744,24 @@ def remux_stream(src, dst):
cmd = [ cmd = [
settings.FFMPEG, settings.FFMPEG,
'-nostats', '-loglevel', 'error', '-nostats', '-loglevel', 'error',
'-map_metadata', '-1', '-sn',
'-i', src, '-i', src,
'-map_metadata', '-1', '-sn',
] + video + [ ] + video + [
] + audio + [ ] + audio + [
'-movflags', '+faststart', '-movflags', '+faststart',
dst dst
] ]
print(cmd)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=open('/dev/null', 'w'), stdout=subprocess.PIPE,
stderr=open('/dev/null', 'w'), stderr=subprocess.STDOUT,
close_fds=True) close_fds=True)
p.wait() stdout, stderr = p.communicate()
return True, None if stderr:
logger.error("failed to remux %s %s", cmd, stderr)
return False, stderr
else:
return True, None
def ffprobe(path, *args): def ffprobe(path, *args):

View file

@ -0,0 +1,100 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('archive', '0005_auto_20180804_1554'),
]
operations = [
migrations.AlterField(
model_name='file',
name='extension',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='file',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='file',
name='info',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='file',
name='language',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='file',
name='part',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='file',
name='part_title',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='file',
name='path',
field=models.CharField(default='', max_length=2048),
),
migrations.AlterField(
model_name='file',
name='sort_path',
field=models.CharField(default='', max_length=2048),
),
migrations.AlterField(
model_name='file',
name='type',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='file',
name='version',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='frame',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='instance',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='stream',
name='error',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='stream',
name='format',
field=models.CharField(default='webm', max_length=255),
),
migrations.AlterField(
model_name='stream',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='stream',
name='info',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='volume',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -0,0 +1,17 @@
# Generated by Django 4.2.3 on 2023-08-18 12:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archive', '0006_alter_file_extension_alter_file_id_alter_file_info_and_more'),
]
operations = [
migrations.AddIndex(
model_name='stream',
index=models.Index(fields=['file', 'source', 'available'], name='archive_str_file_id_69a542_idx'),
),
]

View file

@ -151,8 +151,10 @@ class File(models.Model):
self.sampleate = 0 self.sampleate = 0
self.channels = 0 self.channels = 0
if self.framerate: if self.framerate and self.duration > 0:
self.pixels = int(self.width * self.height * float(utils.parse_decimal(self.framerate)) * self.duration) self.pixels = int(self.width * self.height * float(utils.parse_decimal(self.framerate)) * self.duration)
else:
self.pixels = 0
def get_path_info(self): def get_path_info(self):
data = {} data = {}
@ -181,6 +183,13 @@ class File(models.Model):
for type in ox.movie.EXTENSIONS: for type in ox.movie.EXTENSIONS:
if data['extension'] in ox.movie.EXTENSIONS[type]: if data['extension'] in ox.movie.EXTENSIONS[type]:
data['type'] = type data['type'] = type
if data['extension'] == 'ogg' and self.info.get('video'):
data['type'] = 'video'
if data['type'] == 'unknown':
if self.info.get('video'):
data['type'] = 'video'
elif self.info.get('audio'):
data['type'] = 'audio'
if 'part' in data and isinstance(data['part'], int): if 'part' in data and isinstance(data['part'], int):
data['part'] = str(data['part']) data['part'] = str(data['part'])
return data return data
@ -268,7 +277,7 @@ class File(models.Model):
if self.type not in ('audio', 'video'): if self.type not in ('audio', 'video'):
self.duration = None self.duration = None
else: elif self.id:
duration = sum([s.info.get('duration', 0) duration = sum([s.info.get('duration', 0)
for s in self.streams.filter(source=None)]) for s in self.streams.filter(source=None)])
if duration: if duration:
@ -276,7 +285,7 @@ class File(models.Model):
if self.is_subtitle: if self.is_subtitle:
self.available = self.data and True or False self.available = self.data and True or False
else: elif self.id:
self.available = not self.uploading and \ self.available = not self.uploading and \
self.streams.filter(source=None, available=True).count() self.streams.filter(source=None, available=True).count()
super(File, self).save(*args, **kwargs) super(File, self).save(*args, **kwargs)
@ -365,8 +374,8 @@ class File(models.Model):
self.info.update(stream.info) self.info.update(stream.info)
self.parse_info() self.parse_info()
self.save() self.save()
if stream.info.get('video'): #if stream.info.get('video'):
extract.make_keyframe_index(stream.media.path) # extract.make_keyframe_index(stream.media.path)
return True, stream.media.size return True, stream.media.size
return save_chunk(stream, stream.media, chunk, offset, name, done_cb) return save_chunk(stream, stream.media, chunk, offset, name, done_cb)
return False, 0 return False, 0
@ -395,7 +404,7 @@ class File(models.Model):
config = settings.CONFIG['video'] config = settings.CONFIG['video']
height = self.info['video'][0]['height'] if self.info.get('video') else None height = self.info['video'][0]['height'] if self.info.get('video') else None
max_resolution = max(config['resolutions']) max_resolution = max(config['resolutions'])
if height <= max_resolution and self.extension in ('mov', 'mkv', 'mp4', 'm4v'): if height and height <= max_resolution and self.extension in ('mov', 'mkv', 'mp4', 'm4v'):
vcodec = self.get_codec('video') vcodec = self.get_codec('video')
acodec = self.get_codec('audio') acodec = self.get_codec('audio')
if vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS: if vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS:
@ -406,7 +415,7 @@ class File(models.Model):
config = settings.CONFIG['video'] config = settings.CONFIG['video']
height = self.info['video'][0]['height'] if self.info.get('video') else None height = self.info['video'][0]['height'] if self.info.get('video') else None
max_resolution = max(config['resolutions']) max_resolution = max(config['resolutions'])
if height <= max_resolution and config['formats'][0] == self.extension: if height and height <= max_resolution and config['formats'][0] == self.extension:
vcodec = self.get_codec('video') vcodec = self.get_codec('video')
acodec = self.get_codec('audio') acodec = self.get_codec('audio')
if self.extension in ['mp4', 'm4v'] and vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS: if self.extension in ['mp4', 'm4v'] and vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS:
@ -725,6 +734,9 @@ class Stream(models.Model):
class Meta: class Meta:
unique_together = ("file", "resolution", "format") unique_together = ("file", "resolution", "format")
indexes = [
models.Index(fields=['file', 'source', 'available'])
]
file = models.ForeignKey(File, related_name='streams', on_delete=models.CASCADE) file = models.ForeignKey(File, related_name='streams', on_delete=models.CASCADE)
resolution = models.IntegerField(default=96) resolution = models.IntegerField(default=96)
@ -804,9 +816,15 @@ class Stream(models.Model):
shutil.move(self.file.data.path, target) shutil.move(self.file.data.path, target)
self.file.data.name = '' self.file.data.name = ''
self.file.save() self.file.save()
self.available = True
self.save()
done = True
elif self.file.can_remux(): elif self.file.can_remux():
ok, error = extract.remux_stream(media, target) ok, error = extract.remux_stream(media, target)
done = True if ok:
self.available = True
self.save()
done = True
if not done: if not done:
ok, error = extract.stream(media, target, self.name(), info, flags=self.flags) ok, error = extract.stream(media, target, self.name(), info, flags=self.flags)
@ -814,7 +832,7 @@ class Stream(models.Model):
# get current version from db and update # get current version from db and update
try: try:
self.refresh_from_db() self.refresh_from_db()
except archive.models.DoesNotExist: except Stream.DoesNotExist:
pass pass
else: else:
self.update_status(ok, error) self.update_status(ok, error)

View file

@ -1,11 +1,9 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from datetime import datetime from datetime import datetime
from time import time from time import time, monotonic
import celery.task.control
import kombu.five
from app.celery import app
from .models import File from .models import File
@ -18,7 +16,7 @@ def parse_job(job):
'file': f.oshash 'file': f.oshash
} }
if job['time_start']: if job['time_start']:
start_time = datetime.fromtimestamp(time() - (kombu.five.monotonic() - job['time_start'])) start_time = datetime.fromtimestamp(time() - (monotonic() - job['time_start']))
r.update({ r.update({
'started': start_time, 'started': start_time,
'running': (datetime.now() - start_time).total_seconds() 'running': (datetime.now() - start_time).total_seconds()
@ -30,7 +28,7 @@ def parse_job(job):
def status(): def status():
status = [] status = []
encoding_jobs = ('archive.tasks.extract_stream', 'archive.tasks.process_stream') encoding_jobs = ('archive.tasks.extract_stream', 'archive.tasks.process_stream')
c = celery.task.control.inspect() c = app.control.inspect()
for job in c.active(safe=True).get('celery@pandora-encoding', []): for job in c.active(safe=True).get('celery@pandora-encoding', []):
if job['name'] in encoding_jobs: if job['name'] in encoding_jobs:
status.append(parse_job(job)) status.append(parse_job(job))
@ -67,7 +65,7 @@ def fill_queue():
def get_celery_worker_status(): def get_celery_worker_status():
ERROR_KEY = "ERROR" ERROR_KEY = "ERROR"
try: try:
insp = celery.task.control.inspect() insp = app.control.inspect()
d = insp.stats() d = insp.stats()
if not d: if not d:
d = {ERROR_KEY: 'No running Celery workers were found.'} d = {ERROR_KEY: 'No running Celery workers were found.'}

View file

@ -2,13 +2,14 @@
from glob import glob from glob import glob
from celery.task import task
from django.conf import settings from django.conf import settings
from django.db import transaction
from django.db.models import Q from django.db.models import Q
from item.models import Item from item.models import Item
from item.tasks import update_poster, update_timeline from item.tasks import update_poster, update_timeline
from taskqueue.models import Task from taskqueue.models import Task
from app.celery import app
from . import models from . import models
from . import extract from . import extract
@ -68,7 +69,7 @@ def update_or_create_instance(volume, f):
instance.file.item.update_wanted() instance.file.item.update_wanted()
return instance return instance
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_files(user, volume, files): def update_files(user, volume, files):
user = models.User.objects.get(username=user) user = models.User.objects.get(username=user)
volume, created = models.Volume.objects.get_or_create(user=user, name=volume) volume, created = models.Volume.objects.get_or_create(user=user, name=volume)
@ -100,7 +101,7 @@ def update_files(user, volume, files):
Task.start(i, user) Task.start(i, user)
update_timeline.delay(i.public_id) update_timeline.delay(i.public_id)
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_info(user, info): def update_info(user, info):
user = models.User.objects.get(username=user) user = models.User.objects.get(username=user)
files = models.File.objects.filter(oshash__in=list(info)) files = models.File.objects.filter(oshash__in=list(info))
@ -114,7 +115,7 @@ def update_info(user, info):
Task.start(i, user) Task.start(i, user)
update_timeline.delay(i.public_id) update_timeline.delay(i.public_id)
@task(queue="encoding") @app.task(queue="encoding")
def process_stream(fileId): def process_stream(fileId):
''' '''
process uploaded stream process uploaded stream
@ -140,7 +141,7 @@ def process_stream(fileId):
Task.finish(file.item) Task.finish(file.item)
return True return True
@task(queue="encoding") @app.task(queue="encoding")
def extract_stream(fileId): def extract_stream(fileId):
''' '''
extract stream from direct upload extract stream from direct upload
@ -169,7 +170,7 @@ def extract_stream(fileId):
models.File.objects.filter(id=fileId).update(encoding=False) models.File.objects.filter(id=fileId).update(encoding=False)
Task.finish(file.item) Task.finish(file.item)
@task(queue="encoding") @app.task(queue="encoding")
def extract_derivatives(fileId, rebuild=False): def extract_derivatives(fileId, rebuild=False):
file = models.File.objects.get(id=fileId) file = models.File.objects.get(id=fileId)
streams = file.streams.filter(source=None) streams = file.streams.filter(source=None)
@ -177,7 +178,7 @@ def extract_derivatives(fileId, rebuild=False):
streams[0].extract_derivatives(rebuild) streams[0].extract_derivatives(rebuild)
return True return True
@task(queue="encoding") @app.task(queue="encoding")
def update_stream(id): def update_stream(id):
s = models.Stream.objects.get(pk=id) s = models.Stream.objects.get(pk=id)
if not glob("%s*" % s.timeline_prefix): if not glob("%s*" % s.timeline_prefix):
@ -199,11 +200,11 @@ def update_stream(id):
c.update_calculated_values() c.update_calculated_values()
c.save() c.save()
@task(queue="encoding") @app.task(queue="encoding")
def download_media(item_id, url, referer=None): def download_media(item_id, url, referer=None):
return external.download(item_id, url, referer) return external.download(item_id, url, referer)
@task(queue='default') @app.task(queue='default')
def move_media(data, user): def move_media(data, user):
from changelog.models import add_changelog from changelog.models import add_changelog
from item.models import get_item, Item, ItemSort from item.models import get_item, Item, ItemSort
@ -248,7 +249,8 @@ def move_media(data, user):
if old_item and old_item.files.count() == 0 and i.files.count() == len(data['ids']): if old_item and old_item.files.count() == 0 and i.files.count() == len(data['ids']):
for a in old_item.annotations.all().order_by('id'): for a in old_item.annotations.all().order_by('id'):
a.item = i a.item = i
a.set_public_id() with transaction.atomic():
a.set_public_id()
Annotation.objects.filter(id=a.id).update(item=i, public_id=a.public_id) Annotation.objects.filter(id=a.id).update(item=i, public_id=a.public_id)
old_item.clips.all().update(item=i, sort=i.sort) old_item.clips.all().update(item=i, sort=i.sort)

View file

@ -103,7 +103,7 @@ def update(request, data):
file__available=False, file__available=False,
file__wanted=True)] file__wanted=True)]
if list(filter(lambda l: l['id'] == 'subtitles', settings.CONFIG['layers'])): if utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True):
qs = files.filter( qs = files.filter(
file__is_subtitle=True, file__is_subtitle=True,
file__available=False file__available=False

View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class ChangelogConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'changelog'

View file

@ -0,0 +1,35 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('changelog', '0002_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='changelog',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='changelog',
name='value',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='log',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='log',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

7
pandora/clip/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class ClipConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'clip'

View file

@ -17,6 +17,7 @@ keymap = {
'place': 'annotations__places__id', 'place': 'annotations__places__id',
'text': 'findvalue', 'text': 'findvalue',
'annotations': 'findvalue', 'annotations': 'findvalue',
'layer': 'annotations__layer',
'user': 'annotations__user__username', 'user': 'annotations__user__username',
} }
case_insensitive_keys = ('annotations__user__username', ) case_insensitive_keys = ('annotations__user__username', )

View file

@ -0,0 +1,18 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clip', '0003_auto_20160219_1805'),
]
operations = [
migrations.AlterField(
model_name='clip',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -8,6 +8,7 @@ import ox
from archive import extract from archive import extract
from . import managers from . import managers
from .utils import add_cuts
def get_layers(item, interval=None, user=None): def get_layers(item, interval=None, user=None):
@ -59,9 +60,7 @@ class MetaClip(object):
self.hue = self.saturation = self.lightness = 0 self.hue = self.saturation = self.lightness = 0
self.volume = 0 self.volume = 0
def save(self, *args, **kwargs): def update_findvalue(self):
if self.duration != self.end - self.start:
self.update_calculated_values()
if not self.aspect_ratio and self.item: if not self.aspect_ratio and self.item:
streams = self.item.streams() streams = self.item.streams()
if streams: if streams:
@ -89,6 +88,11 @@ class MetaClip(object):
self.findvalue = '\n'.join(list(filter(None, [a.findvalue for a in anns]))) self.findvalue = '\n'.join(list(filter(None, [a.findvalue for a in anns])))
for l in [k['id'] for k in settings.CONFIG['layers']]: for l in [k['id'] for k in settings.CONFIG['layers']]:
setattr(self, l, l in anns_by_layer and bool(len(anns_by_layer[l]))) setattr(self, l, l in anns_by_layer and bool(len(anns_by_layer[l])))
def save(self, *args, **kwargs):
if self.duration != self.end - self.start:
self.update_calculated_values()
self.update_findvalue()
models.Model.save(self, *args, **kwargs) models.Model.save(self, *args, **kwargs)
clip_keys = ('id', 'in', 'out', 'position', 'created', 'modified', clip_keys = ('id', 'in', 'out', 'position', 'created', 'modified',
@ -111,8 +115,7 @@ class MetaClip(object):
del j[key] del j[key]
#needed here to make item find with clips work #needed here to make item find with clips work
if 'annotations' in keys: if 'annotations' in keys:
#annotations = self.annotations.filter(layer__in=settings.CONFIG['clipLayers']) annotations = self.annotations.all().exclude(value='')
annotations = self.annotations.all()
if qs: if qs:
for q in qs: for q in qs:
annotations = annotations.filter(q) annotations = annotations.filter(q)
@ -150,12 +153,12 @@ class MetaClip(object):
data['annotation'] = qs[0].public_id data['annotation'] = qs[0].public_id
data['parts'] = self.item.cache['parts'] data['parts'] = self.item.cache['parts']
data['durations'] = self.item.cache['durations'] data['durations'] = self.item.cache['durations']
for key in ('title', 'director', 'year', 'videoRatio'): for key in settings.CONFIG['itemTitleKeys'] + ['videoRatio']:
value = self.item.cache.get(key) value = self.item.cache.get(key)
if value: if value:
data[key] = value data[key] = value
data['duration'] = data['out'] - data['in'] data['duration'] = data['out'] - data['in']
data['cuts'] = tuple([c for c in self.item.get('cuts', []) if c > self.start and c < self.end]) add_cuts(data, self.item, self.start, self.end)
data['layers'] = self.get_layers(user) data['layers'] = self.get_layers(user)
data['streams'] = [s.file.oshash for s in self.item.streams()] data['streams'] = [s.file.oshash for s in self.item.streams()]
return data return data
@ -186,6 +189,7 @@ class MetaClip(object):
def __str__(self): def __str__(self):
return self.public_id return self.public_id
class Meta: class Meta:
unique_together = ("item", "start", "end") unique_together = ("item", "start", "end")

22
pandora/clip/utils.py Normal file
View file

@ -0,0 +1,22 @@
def add_cuts(data, item, start, end):
cuts = []
last = False
outer = []
first = 0
for cut in item.get('cuts', []):
if cut > start and cut < end:
if not cuts:
outer.append(first)
cuts.append(cut)
last = True
elif cut <= start:
first = cut
elif cut >= end:
if not len(outer):
outer.append(first)
if len(outer) == 1:
outer.append(cut)
data['cuts'] = tuple(cuts)
data['outerCuts'] = tuple(outer)

View file

@ -1009,7 +1009,7 @@
{ {
"id": "tags", "id": "tags",
"title": "Tags", "title": "Tags",
"canAddAnnotations": {"member": true, "staff": true, "admin": true}, "canAddAnnotations": {"member": true, "friend": true, "staff": true, "admin": true},
"item": "Tag", "item": "Tag",
"autocomplete": true, "autocomplete": true,
"overlap": true, "overlap": true,
@ -1399,10 +1399,8 @@
corner of the screen corner of the screen
"resolutions": List of video resolutions. Supported values are 96, 144, "resolutions": List of video resolutions. Supported values are 96, 144,
240, 288, 360, 432, 480, 720 and 1080. 240, 288, 360, 432, 480, 720 and 1080.
"torrent": If true, video downloads are offered via BitTorrent
*/ */
"video": { "video": {
"torrent": false,
"formats": ["webm", "mp4"], "formats": ["webm", "mp4"],
// fixme: this should be named "ratio" or "defaultRatio", // fixme: this should be named "ratio" or "defaultRatio",
// as it also applies to clip lists (on load) // as it also applies to clip lists (on load)

View file

@ -73,13 +73,14 @@
"canSeeAccessed": {"researcher": true, "staff": true, "admin": true}, "canSeeAccessed": {"researcher": true, "staff": true, "admin": true},
"canSeeAllTasks": {"staff": true, "admin": true}, "canSeeAllTasks": {"staff": true, "admin": true},
"canSeeDebugMenu": {"researcher": true, "staff": true, "admin": true}, "canSeeDebugMenu": {"researcher": true, "staff": true, "admin": true},
"canSeeExtraItemViews": {"researcher": true, "staff": true, "admin": true},
"canSeeMedia": {"researcher": true, "staff": true, "admin": true},
"canSeeDocument": {"guest": 1, "member": 1, "researcher": 2, "staff": 3, "admin": 3}, "canSeeDocument": {"guest": 1, "member": 1, "researcher": 2, "staff": 3, "admin": 3},
"canSeeExtraItemViews": {"researcher": true, "staff": true, "admin": true},
"canSeeItem": {"guest": 2, "member": 2, "researcher": 2, "staff": 3, "admin": 3}, "canSeeItem": {"guest": 2, "member": 2, "researcher": 2, "staff": 3, "admin": 3},
"canSeeMedia": {"researcher": true, "staff": true, "admin": true},
"canSeeSize": {"researcher": true, "staff": true, "admin": true}, "canSeeSize": {"researcher": true, "staff": true, "admin": true},
"canSeeSoftwareVersion": {"researcher": true, "staff": true, "admin": true}, "canSeeSoftwareVersion": {"researcher": true, "staff": true, "admin": true},
"canSendMail": {"staff": true, "admin": true} "canSendMail": {"staff": true, "admin": true},
"canShare": {"staff": true, "admin": true}
}, },
/* /*
"clipKeys" are the properties that clips can be sorted by (the values are "clipKeys" are the properties that clips can be sorted by (the values are
@ -312,6 +313,14 @@
"autocomplete": true, "autocomplete": true,
"columnWidth": 128 "columnWidth": 128
}, },
{
"id": "fulltext",
"operator": "+",
"title": "Fulltext",
"type": "text",
"fulltext": true,
"find": true
},
{ {
"id": "created", "id": "created",
"operator": "-", "operator": "-",
@ -1494,6 +1503,7 @@
"hasEvents": true, "hasEvents": true,
"hasPlaces": true, "hasPlaces": true,
"item": "Keyword", "item": "Keyword",
"autocomplete": true,
"overlap": true, "overlap": true,
"type": "string" "type": "string"
}, },
@ -1875,10 +1885,8 @@
corner of the screen corner of the screen
"resolutions": List of video resolutions. Supported values are 96, 144, "resolutions": List of video resolutions. Supported values are 96, 144,
240, 288, 360, 432, 480, 720 and 1080. 240, 288, 360, 432, 480, 720 and 1080.
"torrent": If true, video downloads are offered via BitTorrent
*/ */
"video": { "video": {
"torrent": false,
"formats": ["webm", "mp4"], "formats": ["webm", "mp4"],
"previewRatio": 1.375, "previewRatio": 1.375,
"resolutions": [240, 480] "resolutions": [240, 480]

View file

@ -71,13 +71,14 @@
"canSeeAccessed": {"staff": true, "admin": true}, "canSeeAccessed": {"staff": true, "admin": true},
"canSeeAllTasks": {"staff": true, "admin": true}, "canSeeAllTasks": {"staff": true, "admin": true},
"canSeeDebugMenu": {"staff": true, "admin": true}, "canSeeDebugMenu": {"staff": true, "admin": true},
"canSeeExtraItemViews": {"staff": true, "admin": true},
"canSeeMedia": {"staff": true, "admin": true},
"canSeeDocument": {"guest": 1, "member": 1, "staff": 4, "admin": 4}, "canSeeDocument": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
"canSeeExtraItemViews": {"staff": true, "admin": true},
"canSeeItem": {"guest": 1, "member": 1, "staff": 4, "admin": 4}, "canSeeItem": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
"canSeeMedia": {"staff": true, "admin": true},
"canSeeSize": {"staff": true, "admin": true}, "canSeeSize": {"staff": true, "admin": true},
"canSeeSoftwareVersion": {"staff": true, "admin": true}, "canSeeSoftwareVersion": {"staff": true, "admin": true},
"canSendMail": {"staff": true, "admin": true} "canSendMail": {"staff": true, "admin": true},
"canShare": {"staff": true, "admin": true}
}, },
/* /*
"clipKeys" are the properties that clips can be sorted by (the values are "clipKeys" are the properties that clips can be sorted by (the values are
@ -246,6 +247,28 @@
"filter": true, "filter": true,
"find": true "find": true
}, },
{
"id": "source",
"title": "Source",
"type": "string",
"autocomplete": true,
"description": true,
"columnWidth": 180,
"filter": true,
"find": true,
"sort": true
},
{
"id": "project",
"title": "Project",
"type": "string",
"autocomplete": true,
"description": true,
"columnWidth": 120,
"filter": true,
"find": true,
"sort": true
},
{ {
"id": "id", "id": "id",
"operator": "+", "operator": "+",
@ -291,6 +314,24 @@
"sort": true, "sort": true,
"columnWidth": 256 "columnWidth": 256
}, },
{
"id": "content",
"operator": "+",
"title": "Content",
"type": "text",
"find": true,
"sort": true,
"columnWidth": 256
},
{
"id": "translation",
"operator": "+",
"title": "Translation",
"type": "text",
"find": true,
"sort": true,
"columnWidth": 256
},
{ {
"id": "matches", "id": "matches",
"operator": "-", "operator": "-",
@ -310,6 +351,20 @@
"autocomplete": true, "autocomplete": true,
"columnWidth": 128 "columnWidth": 128
}, },
{
"id": "notes",
"title": "Notes",
"type": "text",
"capability": "canEditMetadata"
},
{
"id": "fulltext",
"operator": "+",
"title": "Fulltext",
"type": "text",
"fulltext": true,
"find": true
},
{ {
"id": "created", "id": "created",
"operator": "-", "operator": "-",
@ -545,7 +600,6 @@
"title": "Director", "title": "Director",
"type": ["string"], "type": ["string"],
"autocomplete": true, "autocomplete": true,
"columnRequired": true,
"columnWidth": 180, "columnWidth": 180,
"sort": true, "sort": true,
"sortType": "person" "sortType": "person"
@ -564,7 +618,6 @@
"title": "Featuring", "title": "Featuring",
"type": ["string"], "type": ["string"],
"autocomplete": true, "autocomplete": true,
"columnRequired": true,
"columnWidth": 180, "columnWidth": 180,
"filter": true, "filter": true,
"sort": true, "sort": true,
@ -620,7 +673,7 @@
{ {
"id": "annotations", "id": "annotations",
"title": "Annotations", "title": "Annotations",
"type": "string", // fixme: not the best type for this magic key "type": "text", // fixme: not the best type for this magic key
"find": true "find": true
}, },
{ {
@ -658,7 +711,7 @@
}, },
{ {
"id": "numberofannotations", "id": "numberofannotations",
"title": "Annotations", "title": "Number of Annotations",
"type": "integer", "type": "integer",
"columnWidth": 60, "columnWidth": 60,
"sort": true "sort": true
@ -794,12 +847,16 @@
"id": "user", "id": "user",
"title": "User", "title": "User",
"type": "string", "type": "string",
"columnWidth": 90,
"capability": "canSeeMedia", "capability": "canSeeMedia",
"sort": true,
"find": true "find": true
}, },
{ {
"id": "groups", "id": "groups",
"title": "Group", "title": "Group",
"columnWidth": 90,
"sort": true,
"type": ["string"] "type": ["string"]
}, },
{ {
@ -1332,10 +1389,8 @@
corner of the screen corner of the screen
"resolutions": List of video resolutions. Supported values are 96, 144, "resolutions": List of video resolutions. Supported values are 96, 144,
240, 288, 360, 432, 480, 720 and 1080. 240, 288, 360, 432, 480, 720 and 1080.
"torrent": If true, video downloads are offered via BitTorrent
*/ */
"video": { "video": {
"torrent": true,
"formats": ["webm", "mp4"], "formats": ["webm", "mp4"],
"previewRatio": 1.3333333333, "previewRatio": 1.3333333333,
//supported resolutions are //supported resolutions are

View file

@ -29,7 +29,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"text": Text shown on mouseover "text": Text shown on mouseover
*/ */
"cantPlay": { "cantPlay": {
"icon": "noCopyright", "icon": "NoCopyright",
"link": "", "link": "",
"text": "" "text": ""
}, },
@ -67,7 +67,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"canManageEntities": {"member": true, "staff": true, "admin": true}, "canManageEntities": {"member": true, "staff": true, "admin": true},
"canManageHome": {"staff": true, "admin": true}, "canManageHome": {"staff": true, "admin": true},
"canManagePlacesAndEvents": {"member": true, "staff": true, "admin": true}, "canManagePlacesAndEvents": {"member": true, "staff": true, "admin": true},
"canManageTitlesAndNames": {"member": true, "staff": true, "admin": true}, "canManageTitlesAndNames": {"member": false, "staff": true, "admin": true},
"canManageTranslations": {"admin": true}, "canManageTranslations": {"admin": true},
"canManageUsers": {"staff": true, "admin": true}, "canManageUsers": {"staff": true, "admin": true},
"canPlayClips": {"guest": 1, "member": 1, "staff": 4, "admin": 4}, "canPlayClips": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
@ -102,8 +102,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
], ],
/* /*
"clipLayers" is the ordered list of public layers that will appear as the "clipLayers" is the ordered list of public layers that will appear as the
text of clips (in grid view, below the icon). Excluding a layer from this text of clips (in grid view, below the icon).
list means it will not be included in find annotations.
*/ */
"clipLayers": ["publicnotes", "keywords", "subtitles"], "clipLayers": ["publicnotes", "keywords", "subtitles"],
/* /*
@ -351,11 +350,11 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"type": "enum", "type": "enum",
"columnWidth": 90, "columnWidth": 90,
"format": {"type": "ColorLevel", "args": [ "format": {"type": "ColorLevel", "args": [
["Public", "Out of Copyright", "Under Copyright", "Private"] ["Public", "Restricted", "Private"]
]}, ]},
"sort": true, "sort": true,
"sortOperator": "+", "sortOperator": "+",
"values": ["Public", "Out of Copyright", "Under Copyright", "Private", "Unknown"] "values": ["Public", "Restricted", "Private", "Unknown"]
} }
], ],
/* /*
@ -753,6 +752,13 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"capability": "canSeeMedia", "capability": "canSeeMedia",
"find": true "find": true
}, },
{
"id": "filename",
"title": "Filename",
"type": ["string"],
"capability": "canSeeMedia",
"find": true
},
{ {
"id": "created", "id": "created",
"title": "Date Created", "title": "Date Created",
@ -1159,6 +1165,11 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"findDocuments": {"conditions": [], "operator": "&"}, "findDocuments": {"conditions": [], "operator": "&"},
"followPlayer": true, "followPlayer": true,
"help": "", "help": "",
"hidden": {
"collections": [],
"edits": [],
"lists": []
},
"icons": "posters", "icons": "posters",
"infoIconSize": 256, "infoIconSize": 256,
"item": "", "item": "",
@ -1267,13 +1278,11 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
corner of the screen corner of the screen
"resolutions": List of video resolutions. Supported values are 96, 144, "resolutions": List of video resolutions. Supported values are 96, 144,
240, 288, 360, 432, 480, 720 and 1080. 240, 288, 360, 432, 480, 720 and 1080.
"torrent": If true, video downloads are offered via BitTorrent
*/ */
"video": { "video": {
"downloadFormat": "webm", "downloadFormat": "webm",
"formats": ["webm", "mp4"], "formats": ["webm", "mp4"],
"previewRatio": 1.3333333333, "previewRatio": 1.3333333333,
"resolutions": [240, 480], "resolutions": [240, 480]
"torrent": false
} }
} }

7
pandora/document/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class DocumentConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'document'

View file

@ -1,14 +1,37 @@
import logging
import os
import subprocess import subprocess
import tempfile
from django.conf import settings from django.conf import settings
def extract_text(pdf): logger = logging.getLogger('pandora.' + __name__)
cmd = ['pdftotext', pdf, '-']
def extract_text(pdf, page=None):
if page is not None:
page = str(page)
cmd = ['pdftotext', '-f', page, '-l', page, pdf, '-']
else:
cmd = ['pdftotext', pdf, '-']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate() stdout, stderr = p.communicate()
stdout = stdout.decode() stdout = stdout.decode().strip()
return stdout.strip() if not stdout:
if page:
# split page from pdf and ocr
fd, page_pdf = tempfile.mkstemp('.pdf')
cmd = ['pdfseparate', '-f', page, '-l', page, pdf, page_pdf]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
text = ocr_image(page_pdf)
os.unlink(page_pdf)
os.close(fd)
return text
else:
return ocr_image(pdf)
return stdout
def ocr_image(path): def ocr_image(path):
cmd = ['tesseract', path, '-', 'txt'] cmd = ['tesseract', path, '-', 'txt']
@ -43,9 +66,11 @@ class FulltextMixin:
if self.has_fulltext_key(): if self.has_fulltext_key():
from elasticsearch.exceptions import NotFoundError from elasticsearch.exceptions import NotFoundError
try: try:
res = self.elasticsearch().delete(index=self._ES_INDEX, doc_type='document', id=self.id) res = self.elasticsearch().delete(index=self._ES_INDEX, id=self.id)
except NotFoundError: except NotFoundError:
pass pass
except:
logger.error('failed to delete fulltext document', exc_info=True)
def update_fulltext(self): def update_fulltext(self):
if self.has_fulltext_key(): if self.has_fulltext_key():
@ -54,7 +79,7 @@ class FulltextMixin:
doc = { doc = {
'text': text.lower() 'text': text.lower()
} }
res = self.elasticsearch().index(index=self._ES_INDEX, doc_type='document', id=self.id, body=doc) res = self.elasticsearch().index(index=self._ES_INDEX, id=self.id, body=doc)
@classmethod @classmethod
def find_fulltext(cls, query): def find_fulltext(cls, query):
@ -95,3 +120,69 @@ class FulltextMixin:
ids += [int(r['_id']) for r in res['hits']['hits']] ids += [int(r['_id']) for r in res['hits']['hits']]
from_ += len(res['hits']['hits']) from_ += len(res['hits']['hits'])
return ids return ids
def highlight_page(self, page, query, size):
import pypdfium2 as pdfium
from PIL import Image
from PIL import ImageDraw
pdfpath = self.file.path
pagenumber = int(page) - 1
jpg = tempfile.NamedTemporaryFile(suffix='.jpg')
output = jpg.name
TINT_COLOR = (255, 255, 0)
TRANSPARENCY = .45
OPACITY = int(255 * TRANSPARENCY)
scale = 150/72
pdf = pdfium.PdfDocument(pdfpath)
page = pdf[pagenumber]
bitmap = page.render(scale=scale, rotation=0)
img = bitmap.to_pil().convert('RGBA')
overlay = Image.new('RGBA', img.size, TINT_COLOR+(0,))
draw = ImageDraw.Draw(overlay)
textpage = page.get_textpage()
search = textpage.search(query)
result = search.get_next()
while result:
pos, steps = result
steps += 1
while steps:
box = textpage.get_charbox(pos)
box = [b*scale for b in box]
tl = (box[0], img.size[1] - box[3])
br = (box[2], img.size[1] - box[1])
draw.rectangle((tl, br), fill=TINT_COLOR+(OPACITY,))
pos += 1
steps -= 1
result = search.get_next()
img = Image.alpha_composite(img, overlay)
img = img.convert("RGB")
aspect = img.size[0] / img.size[1]
resize_method = Image.LANCZOS
if img.size[0] >= img.size[1]:
width = size
height = int(size / aspect)
else:
width = int(size / aspect)
height = size
img = img.resize((width, height), resize_method)
img.save(output, quality=72)
return jpg
class FulltextPageMixin(FulltextMixin):
_ES_INDEX = "document-page-index"
def extract_fulltext(self):
if self.document.file:
if self.document.extension == 'pdf':
return extract_text(self.document.file.path, self.page)
elif self.extension in ('png', 'jpg'):
return ocr_image(self.document.file.path)
elif self.extension == 'html':
# FIXME: is there a nice way to split that into pages
return self.data.get('text', '')
return ''

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from datetime import datetime
import unicodedata import unicodedata
from django.db.models import Q, Manager from django.db.models import Q, Manager
@ -14,6 +15,7 @@ from documentcollection.models import Collection
from item import utils from item import utils
from user.models import Group from user.models import Group
from .pages import PageManager
keymap = { keymap = {
'item': 'items__public_id', 'item': 'items__public_id',
@ -61,7 +63,7 @@ def parseCondition(condition, user, item=None, owner=None):
def buildCondition(k, op, v, user, exclude=False, owner=None): def buildCondition(k, op, v, user, exclude=False, owner=None):
import entity.models import entity.models
from . import models from .. import models
# fixme: frontend should never call with list # fixme: frontend should never call with list
if k == 'list': if k == 'list':
@ -297,5 +299,8 @@ class DocumentManager(Manager):
q |= Q(groups__in=user.groups.all()) q |= Q(groups__in=user.groups.all())
rendered_q |= Q(groups__in=user.groups.all()) rendered_q |= Q(groups__in=user.groups.all())
qs = qs.filter(q) qs = qs.filter(q)
max_level = len(settings.CONFIG['documentRightsLevels'])
qs = qs.filter(rightslevel__lte=max_level)
return qs return qs

View file

@ -0,0 +1,302 @@
# -*- coding: utf-8 -*-
from datetime import datetime
import unicodedata
from six import string_types
from django.db.models import Q, Manager
from django.conf import settings
import ox
from oxdjango.query import QuerySet
import entity.managers
from oxdjango.managers import get_operator
from documentcollection.models import Collection
from item import utils
from user.models import Group
keymap = {
'item': 'items__public_id',
}
default_key = 'title'
def get_key_type(k):
key_type = (utils.get_by_id(settings.CONFIG['documentKeys'], k) or {'type': 'string'}).get('type')
if isinstance(key_type, list):
key_type = key_type[0]
key_type = {
'title': 'string',
'person': 'string',
'text': 'string',
'year': 'string',
'length': 'string',
'layer': 'string',
'list': 'list',
}.get(key_type, key_type)
return key_type
def parseCondition(condition, user, item=None, owner=None):
'''
'''
k = condition.get('key', default_key)
k = keymap.get(k, k)
if not k:
k = default_key
if item and k == 'description':
item_conditions = condition.copy()
item_conditions['key'] = 'items__itemproperties__description'
return parseCondition(condition, user) | parseCondition(item_conditions, user)
v = condition['value']
op = condition.get('operator')
if not op:
op = '='
if op.startswith('!'):
return buildCondition(k, op[1:], v, user, True, owner=owner)
else:
return buildCondition(k, op, v, user, owner=owner)
def buildCondition(k, op, v, user, exclude=False, owner=None):
import entity.models
from .. import models
# fixme: frontend should never call with list
if k == 'list':
print('fixme: frontend should never call with list', k, op, v)
k = 'collection'
key_type = get_key_type(k)
key_config = (utils.get_by_id(settings.CONFIG['documentKeys'], k) or {'type': 'string'})
facet_keys = models.Document.facet_keys
if k == 'document':
k = 'document__id'
if op == '&' and isinstance(v, list):
v = [ox.fromAZ(id_) for id_ in v]
k += get_operator(op)
else:
v = ox.fromAZ(v)
q = Q(**{k: v})
if exclude:
q = ~Q(document__id__in=models.Document.objects.filter(q))
return q
elif k == 'rightslevel':
q = Q(document__rightslevel=v)
if exclude:
q = ~Q(document__rightslevel=v)
return q
elif k == 'groups':
if op == '==' and v == '$my':
if not owner:
owner = user
groups = owner.groups.all()
else:
key = 'name' + get_operator(op)
groups = Group.objects.filter(**{key: v})
if not groups.count():
return Q(id=0)
q = Q(document__groups__in=groups)
if exclude:
q = ~q
return q
elif k in ('oshash', 'items__public_id'):
q = Q(**{k: v})
if exclude:
q = ~Q(id__in=models.Document.objects.filter(q))
return q
elif isinstance(v, bool):
key = k
elif k == 'entity':
entity_key, entity_v = entity.managers.namePredicate(op, v)
key = 'id__in'
v = entity.models.DocumentProperties.objects.filter(**{
'entity__' + entity_key: entity_v
}).values_list('document_id', flat=True)
elif k == 'collection':
q = Q(id=0)
l = v.split(":", 1)
if len(l) >= 2:
lqs = list(Collection.objects.filter(name=l[1], user__username=l[0]))
if len(lqs) == 1 and lqs[0].accessible(user):
l = lqs[0]
if l.query.get('static', False) is False:
data = l.query
q = parseConditions(data.get('conditions', []),
data.get('operator', '&'),
user, owner=l.user)
else:
q = Q(id__in=l.documents.all())
else:
q = Q(id=0)
return q
elif key_config.get('fulltext'):
qs = models.Page.find_fulltext_ids(v)
q = Q(id__in=qs)
if exclude:
q = ~Q(id__in=qs)
return q
elif key_type == 'boolean':
q = Q(**{'find__key': k, 'find__value': v})
if exclude:
q = ~Q(id__in=models.Document.objects.filter(q))
return q
elif key_type == "string":
in_find = True
if in_find:
value_key = 'find__value'
else:
value_key = k
if isinstance(v, string_types):
v = unicodedata.normalize('NFKD', v).lower()
if k in facet_keys:
in_find = False
facet_value = 'facets__value' + get_operator(op, 'istr')
v = models.Document.objects.filter(**{'facets__key': k, facet_value: v})
value_key = 'id__in'
else:
value_key = value_key + get_operator(op)
k = str(k)
value_key = str(value_key)
if k == '*':
q = Q(**{'find__value' + get_operator(op): v}) | \
Q(**{'facets__value' + get_operator(op, 'istr'): v})
elif in_find:
q = Q(**{'find__key': k, value_key: v})
else:
q = Q(**{value_key: v})
if exclude:
q = ~Q(id__in=models.Document.objects.filter(q))
return q
elif key_type == 'date':
def parse_date(d):
while len(d) < 3:
d.append(1)
return datetime(*[int(i) for i in d])
#using sort here since find only contains strings
v = parse_date(v.split('-'))
vk = 'sort__%s%s' % (k, get_operator(op, 'int'))
vk = str(vk)
q = Q(**{vk: v})
if exclude:
q = ~q
return q
else: # integer, float, list, time
#use sort table here
if key_type == 'time':
v = int(utils.parse_time(v))
vk = 'sort__%s%s' % (k, get_operator(op, 'int'))
vk = str(vk)
q = Q(**{vk: v})
if exclude:
q = ~q
return q
key = str(key)
q = Q(**{key: v})
if exclude:
q = ~q
return q
def parseConditions(conditions, operator, user, item=None, owner=None):
'''
conditions: [
{
value: "war"
}
{
key: "year",
value: "1970-1980,
operator: "!="
},
{
key: "country",
value: "f",
operator: "^"
}
],
operator: "&"
'''
conn = []
for condition in conditions:
if 'conditions' in condition:
q = parseConditions(condition['conditions'],
condition.get('operator', '&'), user, item, owner=owner)
if q:
conn.append(q)
pass
else:
conn.append(parseCondition(condition, user, item, owner=owner))
if conn:
q = conn[0]
for c in conn[1:]:
if operator == '|':
q = q | c
else:
q = q & c
return q
return None
class PageManager(Manager):
def get_query_set(self):
return QuerySet(self.model)
def find(self, data, user, item=None):
'''
query: {
conditions: [
{
value: "war"
}
{
key: "year",
value: "1970-1980,
operator: "!="
},
{
key: "country",
value: "f",
operator: "^"
}
],
operator: "&"
}
'''
#join query with operator
qs = self.get_query_set()
query = data.get('query', {})
conditions = parseConditions(query.get('conditions', []),
query.get('operator', '&'),
user, item)
if conditions:
qs = qs.filter(conditions)
qs = qs.distinct()
#anonymous can only see public items
if not user or user.is_anonymous:
level = 'guest'
allowed_level = settings.CONFIG['capabilities']['canSeeDocument'][level]
qs = qs.filter(document__rightslevel__lte=allowed_level)
rendered_q = Q(rendered=True)
#users can see public items, there own items and items of there groups
else:
level = user.profile.get_level()
allowed_level = settings.CONFIG['capabilities']['canSeeDocument'][level]
q = Q(document__rightslevel__lte=allowed_level) | Q(document__user=user)
rendered_q = Q(rendered=True) | Q(document__user=user)
if user.groups.count():
q |= Q(document__groups__in=user.groups.all())
rendered_q |= Q(document__groups__in=user.groups.all())
qs = qs.filter(q)
return qs

View file

@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2020-05-13 00:01
from __future__ import unicode_literals
import django.core.serializers.json
from django.db import migrations, models
import django.db.models.deletion
import document.fulltext
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('document', '0011_jsonfield'),
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('page', models.IntegerField(default=1)),
('data', oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder)),
],
bases=(models.Model, document.fulltext.FulltextPageMixin),
),
migrations.AddField(
model_name='page',
name='document',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pages_set', to='document.Document'),
),
]

View file

@ -0,0 +1,55 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('document', '0012_auto_20200513_0001'),
]
operations = [
migrations.AlterField(
model_name='access',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='document',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='document',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='facet',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='find',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='itemproperties',
name='description',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='itemproperties',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='page',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -6,11 +6,12 @@ import os
import re import re
import unicodedata import unicodedata
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models, transaction from django.db import models, transaction
from django.db.models import Q, Sum, Max from django.db.models import Q, Sum, Max
from django.contrib.auth import get_user_model
from django.db.models.signals import pre_delete from django.db.models.signals import pre_delete
from django.conf import settings from django.utils import datetime_safe
from oxdjango.fields import JSONField from oxdjango.fields import JSONField
from PIL import Image from PIL import Image
@ -21,7 +22,7 @@ from oxdjango.sortmodel import get_sort_field
from person.models import get_name_sort from person.models import get_name_sort
from item.models import Item from item.models import Item
from annotation.models import Annotation from annotation.models import Annotation
from archive.extract import resize_image from archive.extract import resize_image, open_image_rgb
from archive.chunk import save_chunk from archive.chunk import save_chunk
from user.models import Group from user.models import Group
from user.utils import update_groups from user.utils import update_groups
@ -29,7 +30,7 @@ from user.utils import update_groups
from . import managers from . import managers
from . import utils from . import utils
from . import tasks from . import tasks
from .fulltext import FulltextMixin from .fulltext import FulltextMixin, FulltextPageMixin
User = get_user_model() User = get_user_model()
@ -79,7 +80,7 @@ class Document(models.Model, FulltextMixin):
current_values = [] current_values = []
for k in settings.CONFIG['documentKeys']: for k in settings.CONFIG['documentKeys']:
if k.get('sortType') == 'person': if k.get('sortType') == 'person':
current_values += self.get(k['id'], []) current_values += self.get_value(k['id'], [])
if not isinstance(current_values, list): if not isinstance(current_values, list):
if not current_values: if not current_values:
current_values = [] current_values = []
@ -327,6 +328,9 @@ class Document(models.Model, FulltextMixin):
def editable(self, user, item=None): def editable(self, user, item=None):
if not user or user.is_anonymous: if not user or user.is_anonymous:
return False return False
max_level = len(settings.CONFIG['rightsLevels'])
if self.rightslevel > max_level:
return False
if self.user == user or \ if self.user == user or \
self.groups.filter(id__in=user.groups.all()).count() > 0 or \ self.groups.filter(id__in=user.groups.all()).count() > 0 or \
user.is_staff or \ user.is_staff or \
@ -346,6 +350,8 @@ class Document(models.Model, FulltextMixin):
groups = data.pop('groups') groups = data.pop('groups')
update_groups(self, groups) update_groups(self, groups)
for key in data: for key in data:
if key == "id":
continue
k = list(filter(lambda i: i['id'] == key, settings.CONFIG['documentKeys'])) k = list(filter(lambda i: i['id'] == key, settings.CONFIG['documentKeys']))
ktype = k and k[0].get('type') or '' ktype = k and k[0].get('type') or ''
if key == 'text' and self.extension == 'html': if key == 'text' and self.extension == 'html':
@ -546,10 +552,10 @@ class Document(models.Model, FulltextMixin):
if len(crop) == 4: if len(crop) == 4:
path = os.path.join(folder, '%dp%d,%s.jpg' % (1024, page, ','.join(map(str, crop)))) path = os.path.join(folder, '%dp%d,%s.jpg' % (1024, page, ','.join(map(str, crop))))
if not os.path.exists(path): if not os.path.exists(path):
img = Image.open(src).crop(crop) img = open_image_rgb(src).crop(crop)
img.save(path) img.save(path)
else: else:
img = Image.open(path) img = open_image_rgb(path)
src = path src = path
if size < max(img.size): if size < max(img.size):
path = os.path.join(folder, '%dp%d,%s.jpg' % (size, page, ','.join(map(str, crop)))) path = os.path.join(folder, '%dp%d,%s.jpg' % (size, page, ','.join(map(str, crop))))
@ -562,10 +568,10 @@ class Document(models.Model, FulltextMixin):
if len(crop) == 4: if len(crop) == 4:
path = os.path.join(folder, '%s.jpg' % ','.join(map(str, crop))) path = os.path.join(folder, '%s.jpg' % ','.join(map(str, crop)))
if not os.path.exists(path): if not os.path.exists(path):
img = Image.open(src).crop(crop) img = open_image_rgb(src).convert('RGB').crop(crop)
img.save(path) img.save(path)
else: else:
img = Image.open(path) img = open_image_rgb(path)
src = path src = path
if size < max(img.size): if size < max(img.size):
path = os.path.join(folder, '%sp%s.jpg' % (size, ','.join(map(str, crop)))) path = os.path.join(folder, '%sp%s.jpg' % (size, ','.join(map(str, crop))))
@ -574,7 +580,7 @@ class Document(models.Model, FulltextMixin):
if os.path.exists(src) and not os.path.exists(path): if os.path.exists(src) and not os.path.exists(path):
image_size = max(self.width, self.height) image_size = max(self.width, self.height)
if image_size == -1: if image_size == -1:
image_size = max(*Image.open(src).size) image_size = max(*open_image_rgb(src).size)
if size > image_size: if size > image_size:
path = src path = src
else: else:
@ -586,6 +592,11 @@ class Document(models.Model, FulltextMixin):
image = os.path.join(os.path.dirname(pdf), '1024p%d.jpg' % page) image = os.path.join(os.path.dirname(pdf), '1024p%d.jpg' % page)
utils.extract_pdfpage(pdf, image, page) utils.extract_pdfpage(pdf, image, page)
def create_pages(self):
for page in range(self.pages):
page += 1
p, c = Page.objects.get_or_create(document=self, page=page)
def get_info(self): def get_info(self):
if self.extension == 'pdf': if self.extension == 'pdf':
self.thumbnail(1024) self.thumbnail(1024)
@ -595,7 +606,7 @@ class Document(models.Model, FulltextMixin):
self.pages = utils.pdfpages(self.file.path) self.pages = utils.pdfpages(self.file.path)
elif self.width == -1: elif self.width == -1:
self.pages = -1 self.pages = -1
self.width, self.height = Image.open(self.file.path).size self.width, self.height = open_image_rgb(self.file.path).size
def get_ratio(self): def get_ratio(self):
if self.extension == 'pdf': if self.extension == 'pdf':
@ -702,6 +713,41 @@ class ItemProperties(models.Model):
super(ItemProperties, self).save(*args, **kwargs) super(ItemProperties, self).save(*args, **kwargs)
class Page(models.Model, FulltextPageMixin):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
document = models.ForeignKey(Document, related_name='pages_set', on_delete=models.CASCADE)
page = models.IntegerField(default=1)
data = JSONField(default=dict, editable=False)
objects = managers.PageManager()
def __str__(self):
return u"%s:%s" % (self.document, self.page)
def json(self, keys=None, user=None):
data = {}
data['document'] = ox.toAZ(self.document.id)
data['page'] = self.page
data['id'] = '{document}/{page}'.format(**data)
document_keys = []
if keys:
for key in list(data):
if key not in keys:
del data[key]
for key in keys:
if 'fulltext' in key:
data['fulltext'] = self.extract_fulltext()
elif key in ('document', 'page', 'id'):
pass
else:
document_keys.append(key)
if document_keys:
data.update(self.document.json(document_keys, user))
return data
class Access(models.Model): class Access(models.Model):
class Meta: class Meta:
unique_together = ("document", "user") unique_together = ("document", "user")

View file

@ -0,0 +1,135 @@
# -*- coding: utf-8 -*-
import os
import re
from glob import glob
import unicodedata
import ox
from ox.utils import json
from oxdjango.api import actions
from oxdjango.decorators import login_required_json
from oxdjango.http import HttpFileResponse
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response, HttpErrorJson
from django import forms
from django.db.models import Count, Sum
from django.conf import settings
from item import utils
from item.models import Item
from itemlist.models import List
from entity.models import Entity
from archive.chunk import process_chunk
from changelog.models import add_changelog
from . import models
from . import tasks
def parse_query(data, user):
query = {}
query['range'] = [0, 100]
query['sort'] = [{'key': 'page', 'operator': '+'}, {'key': 'title', 'operator': '+'}]
for key in ('keys', 'group', 'file', 'range', 'position', 'positions', 'sort'):
if key in data:
query[key] = data[key]
query['qs'] = models.Page.objects.find(data, user)
return query
def _order_query(qs, sort):
prefix = 'document__sort__'
order_by = []
for e in sort:
operator = e['operator']
if operator != '-':
operator = ''
key = {
'index': 'document__items__itemproperties__index',
'position': 'id',
'name': 'title',
}.get(e['key'], e['key'])
if key == 'resolution':
order_by.append('%swidth' % operator)
order_by.append('%sheight' % operator)
else:
if '__' not in key and key not in ('created', 'modified', 'page'):
key = "%s%s" % (prefix, key)
order = '%s%s' % (operator, key)
order_by.append(order)
if order_by:
qs = qs.order_by(*order_by, nulls_last=True)
qs = qs.distinct()
return qs
def _order_by_group(query):
prefix = 'document__sort__'
if 'sort' in query:
op = '-' if query['sort'][0]['operator'] == '-' else ''
if len(query['sort']) == 1 and query['sort'][0]['key'] == 'items':
order_by = op + prefix + 'items'
if query['group'] == "year":
secondary = op + prefix + 'sortvalue'
order_by = (order_by, secondary)
elif query['group'] != "keyword":
order_by = (order_by, prefix + 'sortvalue')
else:
order_by = (order_by, 'value')
else:
order_by = op + prefix + 'sortvalue'
order_by = (order_by, prefix + 'items')
else:
order_by = ('-' + prefix + 'sortvalue', prefix + 'items')
return order_by
def findPages(request, data):
'''
Finds documents pages for a given query
takes {
query: object, // query object, see `find`
sort: [object], // list of sort objects, see `find`
range: [int, int], // range of results, per current sort order
keys: [string] // list of keys to return
}
returns {
items: [{ // list of pages
id: string
page: int
}]
}
'''
query = parse_query(data, request.user)
#order
qs = _order_query(query['qs'], query['sort'])
response = json_response()
if 'group' in query:
response['data']['items'] = []
items = 'items'
document_qs = query['qs']
order_by = _order_by_group(query)
qs = models.Facet.objects.filter(key=query['group']).filter(document__id__in=document_qs)
qs = qs.values('value').annotate(items=Count('id')).order_by(*order_by)
if 'positions' in query:
response['data']['positions'] = {}
ids = [j['value'] for j in qs]
response['data']['positions'] = utils.get_positions(ids, query['positions'])
elif 'range' in data:
qs = qs[query['range'][0]:query['range'][1]]
response['data']['items'] = [{'name': i['value'], 'items': i[items]} for i in qs]
else:
response['data']['items'] = qs.count()
elif 'keys' in data:
qs = qs[query['range'][0]:query['range'][1]]
response['data']['items'] = [l.json(data['keys'], request.user) for l in qs]
elif 'position' in data:
#FIXME: actually implement position requests
response['data']['position'] = 0
elif 'positions' in data:
ids = list(qs.values_list('id', flat=True))
response['data']['positions'] = utils.get_positions(ids, query['positions'], decode_id=True)
else:
response['data']['items'] = qs.count()
return render_to_json_response(response)
actions.register(findPages)

View file

@ -1,21 +1,26 @@
import ox import ox
from celery.task import task from app.celery import app
@task(queue="encoding") @app.task(queue="encoding")
def extract_fulltext(id): def extract_fulltext(id):
from . import models from . import models
d = models.Document.objects.get(id=id) d = models.Document.objects.get(id=id)
d.update_fulltext() d.update_fulltext()
d.create_pages()
for page in d.pages_set.all():
page.update_fulltext()
@task(queue='default') @app.task(queue='default')
def bulk_edit(data, username): def bulk_edit(data, username):
from django.db import transaction from django.db import transaction
from . import models from . import models
from item.models import Item from item.models import Item
user = models.User.objects.get(username=username) user = models.User.objects.get(username=username)
item = 'item' in data and Item.objects.get(public_id=data['item']) or None item = 'item' in data and Item.objects.get(public_id=data['item']) or None
documents = models.Document.objects.filter(pk__in=map(ox.fromAZ, data['id'])) ids = data['id']
del data['id']
documents = models.Document.objects.filter(pk__in=map(ox.fromAZ, ids))
for document in documents: for document in documents:
if document.editable(user, item): if document.editable(user, item):
with transaction.atomic(): with transaction.atomic():

View file

@ -12,8 +12,10 @@ from oxdjango.decorators import login_required_json
from oxdjango.http import HttpFileResponse from oxdjango.http import HttpFileResponse
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response, HttpErrorJson from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response, HttpErrorJson
from django import forms from django import forms
from django.db.models import Count, Sum
from django.conf import settings from django.conf import settings
from django.db.models import Count, Sum
from django.http import HttpResponse
from django.shortcuts import render
from item import utils from item import utils
from item.models import Item from item.models import Item
@ -24,6 +26,7 @@ from changelog.models import add_changelog
from . import models from . import models
from . import tasks from . import tasks
from . import page_views
def get_document_or_404_json(request, id): def get_document_or_404_json(request, id):
response = {'status': {'code': 404, response = {'status': {'code': 404,
@ -380,8 +383,12 @@ def file(request, id, name=None):
def thumbnail(request, id, size=256, page=None): def thumbnail(request, id, size=256, page=None):
size = int(size) size = int(size)
document = get_document_or_404_json(request, id) document = get_document_or_404_json(request, id)
if "q" in request.GET and page:
img = document.highlight_page(page, request.GET["q"], size)
return HttpResponse(img, content_type="image/jpeg")
return HttpFileResponse(document.thumbnail(size, page=page)) return HttpFileResponse(document.thumbnail(size, page=page))
@login_required_json @login_required_json
def upload(request): def upload(request):
if 'id' in request.GET: if 'id' in request.GET:
@ -506,3 +513,37 @@ def autocompleteDocuments(request, data):
response['data']['items'] = [i['value'] for i in qs] response['data']['items'] = [i['value'] for i in qs]
return render_to_json_response(response) return render_to_json_response(response)
actions.register(autocompleteDocuments) actions.register(autocompleteDocuments)
def document(request, fragment):
context = {}
parts = fragment.split('/')
# FIXME: parse collection urls and return the right metadata for those
id = parts[0]
page = None
crop = None
if len(parts) == 2:
rect = parts[1].split(',')
if len(rect) == 1:
page = rect[0]
else:
crop = rect
try:
document = models.Document.objects.filter(id=ox.fromAZ(id)).first()
except:
document = None
if document and document.access(request.user):
context['title'] = document.data['title']
if document.data.get('description'):
context['description'] = document.data['description']
link = request.build_absolute_uri(document.get_absolute_url())
public_id = ox.toAZ(document.id)
preview = '/documents/%s/512p.jpg' % public_id
if page:
preview = '/documents/%s/512p%s.jpg' % (public_id, page)
if crop:
preview = '/documents/%s/512p%s.jpg' % (public_id, ','.join(crop))
context['preview'] = request.build_absolute_uri(preview)
context['url'] = request.build_absolute_uri('/documents/' + fragment)
context['settings'] = settings
return render(request, "document.html", context)

View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class DocumentcollectionConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'documentcollection'

View file

@ -0,0 +1,61 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import documentcollection.models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('documentcollection', '0004_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='collection',
name='description',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='collection',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='collection',
name='poster_frames',
field=oxdjango.fields.JSONField(default=list, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='collection',
name='query',
field=oxdjango.fields.JSONField(default=documentcollection.models.default_query, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='collection',
name='sort',
field=oxdjango.fields.JSONField(default=documentcollection.models.get_collectionsort, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='collection',
name='status',
field=models.CharField(default='private', max_length=20),
),
migrations.AlterField(
model_name='collection',
name='type',
field=models.CharField(default='static', max_length=255),
),
migrations.AlterField(
model_name='collectiondocument',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='position',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -34,6 +34,9 @@ def get_collectionview():
def get_collectionsort(): def get_collectionsort():
return tuple(settings.CONFIG['user']['ui']['collectionSort']) return tuple(settings.CONFIG['user']['ui']['collectionSort'])
def default_query():
return {"static": True}
class Collection(models.Model): class Collection(models.Model):
class Meta: class Meta:
@ -46,7 +49,7 @@ class Collection(models.Model):
name = models.CharField(max_length=255) name = models.CharField(max_length=255)
status = models.CharField(max_length=20, default='private') status = models.CharField(max_length=20, default='private')
_status = ['private', 'public', 'featured'] _status = ['private', 'public', 'featured']
query = JSONField(default=lambda: {"static": True}, editable=False) query = JSONField(default=default_query, editable=False)
type = models.CharField(max_length=255, default='static') type = models.CharField(max_length=255, default='static')
description = models.TextField(default='') description = models.TextField(default='')

View file

@ -86,6 +86,11 @@ def findCollections(request, data):
for x in data.get('query', {}).get('conditions', []) for x in data.get('query', {}).get('conditions', [])
) )
is_personal = request.user.is_authenticated and any(
(x['key'] == 'user' and x['value'] == request.user.username and x['operator'] == '==')
for x in data.get('query', {}).get('conditions', [])
)
if is_section_request: if is_section_request:
qs = query['qs'] qs = query['qs']
if not is_featured and not request.user.is_anonymous: if not is_featured and not request.user.is_anonymous:
@ -94,6 +99,9 @@ def findCollections(request, data):
else: else:
qs = _order_query(query['qs'], query['sort']) qs = _order_query(query['qs'], query['sort'])
if is_personal and request.user.profile.ui.get('hidden', {}).get('collections'):
qs = qs.exclude(name__in=request.user.profile.ui['hidden']['collections'])
response = json_response() response = json_response()
if 'keys' in data: if 'keys' in data:
qs = qs[query['range'][0]:query['range'][1]] qs = qs[query['range'][0]:query['range'][1]]
@ -238,7 +246,7 @@ def addCollection(request, data):
'type' and 'view'. 'type' and 'view'.
see: editCollection, findCollections, getCollection, removeCollection, sortCollections see: editCollection, findCollections, getCollection, removeCollection, sortCollections
''' '''
data['name'] = re.sub(' \[\d+\]$', '', data.get('name', 'Untitled')).strip() data['name'] = re.sub(r' \[\d+\]$', '', data.get('name', 'Untitled')).strip()
name = data['name'] name = data['name']
if not name: if not name:
name = "Untitled" name = "Untitled"

7
pandora/edit/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class EditConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'edit'

View file

@ -0,0 +1,41 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import edit.models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('edit', '0005_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='clip',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='edit',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='edit',
name='poster_frames',
field=oxdjango.fields.JSONField(default=list, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='edit',
name='query',
field=oxdjango.fields.JSONField(default=edit.models.default_query, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='position',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -13,6 +13,7 @@ from django.conf import settings
from django.db import models, transaction from django.db import models, transaction
from django.db.models import Max from django.db.models import Max
from django.contrib.auth import get_user_model from django.contrib.auth import get_user_model
from django.core.cache import cache
from oxdjango.fields import JSONField from oxdjango.fields import JSONField
@ -24,6 +25,7 @@ import clip.models
from archive import extract from archive import extract
from user.utils import update_groups from user.utils import update_groups
from user.models import Group from user.models import Group
from clip.utils import add_cuts
from . import managers from . import managers
@ -33,6 +35,9 @@ User = get_user_model()
def get_path(f, x): return f.path(x) def get_path(f, x): return f.path(x)
def get_icon_path(f, x): return get_path(f, 'icon.jpg') def get_icon_path(f, x): return get_path(f, 'icon.jpg')
def default_query():
return {"static": True}
class Edit(models.Model): class Edit(models.Model):
class Meta: class Meta:
@ -51,7 +56,7 @@ class Edit(models.Model):
description = models.TextField(default='') description = models.TextField(default='')
rightslevel = models.IntegerField(db_index=True, default=0) rightslevel = models.IntegerField(db_index=True, default=0)
query = JSONField(default=lambda: {"static": True}, editable=False) query = JSONField(default=default_query, editable=False)
type = models.CharField(max_length=255, default='static') type = models.CharField(max_length=255, default='static')
icon = models.ImageField(default=None, blank=True, null=True, upload_to=get_icon_path) icon = models.ImageField(default=None, blank=True, null=True, upload_to=get_icon_path)
@ -93,6 +98,8 @@ class Edit(models.Model):
# dont add clip if in/out are invalid # dont add clip if in/out are invalid
if not c.annotation: if not c.annotation:
duration = c.item.sort.duration duration = c.item.sort.duration
if c.start is None or c.end is None:
return False
if c.start > c.end \ if c.start > c.end \
or round(c.start, 3) >= round(duration, 3) \ or round(c.start, 3) >= round(duration, 3) \
or round(c.end, 3) > round(duration, 3): or round(c.end, 3) > round(duration, 3):
@ -507,7 +514,7 @@ class Clip(models.Model):
if value: if value:
data[key] = value data[key] = value
data['duration'] = data['out'] - data['in'] data['duration'] = data['out'] - data['in']
data['cuts'] = tuple([c for c in self.item.get('cuts', []) if c > self.start and c < self.end]) add_cuts(data, self.item, self.start, self.end)
data['layers'] = self.get_layers(user) data['layers'] = self.get_layers(user)
data['streams'] = [s.file.oshash for s in self.item.streams()] data['streams'] = [s.file.oshash for s in self.item.streams()]
return data return data

View file

@ -3,14 +3,16 @@
import os import os
import re import re
import ox from oxdjango.api import actions
from oxdjango.decorators import login_required_json from oxdjango.decorators import login_required_json
from oxdjango.http import HttpFileResponse
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
import ox
from django.conf import settings
from django.db import transaction from django.db import transaction
from django.db.models import Max from django.db.models import Max
from oxdjango.http import HttpFileResponse from django.db.models import Sum
from oxdjango.api import actions
from django.conf import settings
from item import utils from item import utils
from changelog.models import add_changelog from changelog.models import add_changelog
@ -190,7 +192,7 @@ def _order_clips(edit, sort):
'in': 'start', 'in': 'start',
'out': 'end', 'out': 'end',
'text': 'sortvalue', 'text': 'sortvalue',
'volume': 'sortvolume', 'volume': 'volume' if edit.type == 'smart' else 'sortvolume',
'item__sort__item': 'item__sort__public_id', 'item__sort__item': 'item__sort__public_id',
}.get(key, key) }.get(key, key)
order = '%s%s' % (operator, key) order = '%s%s' % (operator, key)
@ -260,7 +262,7 @@ def addEdit(request, data):
} }
see: editEdit, findEdit, getEdit, removeEdit, sortEdits see: editEdit, findEdit, getEdit, removeEdit, sortEdits
''' '''
data['name'] = re.sub(' \[\d+\]$', '', data.get('name', 'Untitled')).strip() data['name'] = re.sub(r' \[\d+\]$', '', data.get('name', 'Untitled')).strip()
name = data['name'] name = data['name']
if not name: if not name:
name = "Untitled" name = "Untitled"
@ -412,6 +414,11 @@ def findEdits(request, data):
is_featured = any(filter(is_featured_condition, data.get('query', {}).get('conditions', []))) is_featured = any(filter(is_featured_condition, data.get('query', {}).get('conditions', [])))
is_personal = request.user.is_authenticated and any(
(x['key'] == 'user' and x['value'] == request.user.username and x['operator'] == '==')
for x in data.get('query', {}).get('conditions', [])
)
if is_section_request: if is_section_request:
qs = query['qs'] qs = query['qs']
if not is_featured and not request.user.is_anonymous: if not is_featured and not request.user.is_anonymous:
@ -420,6 +427,9 @@ def findEdits(request, data):
else: else:
qs = _order_query(query['qs'], query['sort']) qs = _order_query(query['qs'], query['sort'])
if is_personal and request.user.profile.ui.get('hidden', {}).get('edits'):
qs = qs.exclude(name__in=request.user.profile.ui['hidden']['edits'])
response = json_response() response = json_response()
if 'keys' in data: if 'keys' in data:
qs = qs[query['range'][0]:query['range'][1]] qs = qs[query['range'][0]:query['range'][1]]

3
pandora/encoding.conf.in Normal file
View file

@ -0,0 +1,3 @@
LOGLEVEL=info
MAX_TASKS_PER_CHILD=500
CONCURRENCY=1

6
pandora/entity/apps.py Normal file
View file

@ -0,0 +1,6 @@
from django.apps import AppConfig
class EntityConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'entity'

View file

@ -0,0 +1,50 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('entity', '0006_auto_20180918_0903'),
]
operations = [
migrations.AlterField(
model_name='documentproperties',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='documentproperties',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='entity',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='entity',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='entity',
name='name_find',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='find',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='link',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

6
pandora/event/apps.py Normal file
View file

@ -0,0 +1,6 @@
from django.apps import AppConfig
class EventConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'event'

View file

@ -0,0 +1,43 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('event', '0003_auto_20160304_1644'),
]
operations = [
migrations.AlterField(
model_name='event',
name='duration',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='event',
name='end',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='event',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='event',
name='name_find',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='event',
name='start',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='event',
name='type',
field=models.CharField(default='', max_length=255),
),
]

View file

@ -1,20 +1,26 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from celery.task import task from app.celery import app
from .models import Event from .models import Event
''' '''
@periodic_task(run_every=crontab(hour=7, minute=30), queue='encoding') from celery.schedules import crontab
@app.task(ignore_results=True, queue='encoding')
def update_all_matches(**kwargs): def update_all_matches(**kwargs):
ids = [e['id'] for e in Event.objects.all().values('id')] ids = [e['id'] for e in Event.objects.all().values('id')]
for i in ids: for i in ids:
e = Event.objects.get(pk=i) e = Event.objects.get(pk=i)
e.update_matches() e.update_matches()
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(crontab(hour=7, minute=30), update_all_matches.s())
''' '''
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_matches(eventId): def update_matches(eventId):
event = Event.objects.get(pk=eventId) event = Event.objects.get(pk=eventId)
event.update_matches() event.update_matches()

View file

@ -2,4 +2,5 @@ from django.apps import AppConfig
class HomeConfig(AppConfig): class HomeConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'home' name = 'home'

View file

@ -0,0 +1,30 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0002_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='item',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='item',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='item',
name='index',
field=models.IntegerField(default=-1),
),
]

6
pandora/item/apps.py Normal file
View file

@ -0,0 +1,6 @@
from django.apps import AppConfig
class ItemConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'item'

View file

@ -33,7 +33,7 @@ def parseCondition(condition, user, owner=None):
k = {'id': 'public_id'}.get(k, k) k = {'id': 'public_id'}.get(k, k)
if not k: if not k:
k = '*' k = '*'
v = condition['value'] v = condition.get('value', '')
op = condition.get('operator') op = condition.get('operator')
if not op: if not op:
op = '=' op = '='
@ -62,6 +62,9 @@ def parseCondition(condition, user, owner=None):
if k == 'list': if k == 'list':
key_type = '' key_type = ''
if k in ('width', 'height'):
key_type = 'integer'
if k == 'groups': if k == 'groups':
if op == '==' and v == '$my': if op == '==' and v == '$my':
if not owner: if not owner:
@ -86,8 +89,11 @@ def parseCondition(condition, user, owner=None):
elif k == 'rendered': elif k == 'rendered':
return Q(rendered=v) return Q(rendered=v)
elif k == 'resolution': elif k == 'resolution':
q = parseCondition({'key': 'width', 'value': v[0], 'operator': op}, user) \ if isinstance(v, list) and len(v) == 2:
& parseCondition({'key': 'height', 'value': v[1], 'operator': op}, user) q = parseCondition({'key': 'width', 'value': v[0], 'operator': op}, user) \
& parseCondition({'key': 'height', 'value': v[1], 'operator': op}, user)
else:
q = Q(id=0)
if exclude: if exclude:
q = ~q q = ~q
return q return q
@ -318,6 +324,8 @@ class ItemManager(Manager):
q |= Q(groups__in=user.groups.all()) q |= Q(groups__in=user.groups.all())
rendered_q |= Q(groups__in=user.groups.all()) rendered_q |= Q(groups__in=user.groups.all())
qs = qs.filter(q) qs = qs.filter(q)
max_level = len(settings.CONFIG['rightsLevels'])
qs = qs.filter(level__lte=max_level)
if settings.CONFIG.get('itemRequiresVideo') and level != 'admin': if settings.CONFIG.get('itemRequiresVideo') and level != 'admin':
qs = qs.filter(rendered_q) qs = qs.filter(rendered_q)
return qs return qs

View file

@ -71,7 +71,7 @@ class Migration(migrations.Migration):
('poster_width', models.IntegerField(default=0)), ('poster_width', models.IntegerField(default=0)),
('poster_frame', models.FloatField(default=-1)), ('poster_frame', models.FloatField(default=-1)),
('icon', models.ImageField(blank=True, default=None, upload_to=item.models.get_icon_path)), ('icon', models.ImageField(blank=True, default=None, upload_to=item.models.get_icon_path)),
('torrent', models.FileField(blank=True, default=None, max_length=1000, upload_to=item.models.get_torrent_path)), ('torrent', models.FileField(blank=True, default=None, max_length=1000)),
('stream_info', oxdjango.fields.DictField(default={}, editable=False)), ('stream_info', oxdjango.fields.DictField(default={}, editable=False)),
('stream_aspect', models.FloatField(default=1.3333333333333333)), ('stream_aspect', models.FloatField(default=1.3333333333333333)),
], ],

View file

@ -0,0 +1,19 @@
# Generated by Django 3.0.10 on 2023-07-10 08:52
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('item', '0004_json_cache'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='torrent',
),
]

View file

@ -0,0 +1,65 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('item', '0005_auto_20230710_0852'),
]
operations = [
migrations.AlterField(
model_name='access',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='annotationsequence',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='description',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='facet',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='item',
name='cache',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='item',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='item',
name='external_data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='item',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='item',
name='stream_info',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='itemfind',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -43,7 +43,7 @@ from user.utils import update_groups
from user.models import Group from user.models import Group
import archive.models import archive.models
logger = logging.getLogger(__name__) logger = logging.getLogger('pandora.' + __name__)
User = get_user_model() User = get_user_model()
@ -157,9 +157,6 @@ def get_icon_path(f, x):
def get_poster_path(f, x): def get_poster_path(f, x):
return get_path(f, 'poster.jpg') return get_path(f, 'poster.jpg')
def get_torrent_path(f, x):
return get_path(f, 'torrent.torrent')
class Item(models.Model): class Item(models.Model):
created = models.DateTimeField(auto_now_add=True) created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True) modified = models.DateTimeField(auto_now=True)
@ -185,7 +182,6 @@ class Item(models.Model):
icon = models.ImageField(default=None, blank=True, upload_to=get_icon_path) icon = models.ImageField(default=None, blank=True, upload_to=get_icon_path)
torrent = models.FileField(default=None, blank=True, max_length=1000, upload_to=get_torrent_path)
stream_info = JSONField(default=dict, editable=False) stream_info = JSONField(default=dict, editable=False)
# stream related fields # stream related fields
@ -233,6 +229,9 @@ class Item(models.Model):
def editable(self, user): def editable(self, user):
if user.is_anonymous: if user.is_anonymous:
return False return False
max_level = len(settings.CONFIG['rightsLevels'])
if self.level > max_level:
return False
if user.profile.capability('canEditMetadata') or \ if user.profile.capability('canEditMetadata') or \
user.is_staff or \ user.is_staff or \
self.user == user or \ self.user == user or \
@ -240,7 +239,7 @@ class Item(models.Model):
return True return True
return False return False
def edit(self, data): def edit(self, data, is_task=False):
data = data.copy() data = data.copy()
# FIXME: how to map the keys to the right place to write them to? # FIXME: how to map the keys to the right place to write them to?
if 'id' in data: if 'id' in data:
@ -257,11 +256,12 @@ class Item(models.Model):
description = data.pop(key) description = data.pop(key)
if isinstance(description, dict): if isinstance(description, dict):
for value in description: for value in description:
value = ox.sanitize_html(value)
d, created = Description.objects.get_or_create(key=k, value=value) d, created = Description.objects.get_or_create(key=k, value=value)
d.description = ox.sanitize_html(description[value]) d.description = ox.sanitize_html(description[value])
d.save() d.save()
else: else:
value = data.get(k, self.get(k, '')) value = ox.sanitize_html(data.get(k, self.get(k, '')))
if not description: if not description:
description = '' description = ''
d, created = Description.objects.get_or_create(key=k, value=value) d, created = Description.objects.get_or_create(key=k, value=value)
@ -296,7 +296,10 @@ class Item(models.Model):
self.data[key] = ox.escape_html(data[key]) self.data[key] = ox.escape_html(data[key])
p = self.save() p = self.save()
if not settings.USE_IMDB and list(filter(lambda k: k in self.poster_keys, data)): if not settings.USE_IMDB and list(filter(lambda k: k in self.poster_keys, data)):
p = tasks.update_poster.delay(self.public_id) if is_task:
tasks.update_poster(self.public_id)
else:
p = tasks.update_poster.delay(self.public_id)
return p return p
def update_external(self): def update_external(self):
@ -475,7 +478,8 @@ class Item(models.Model):
for a in self.annotations.all().order_by('id'): for a in self.annotations.all().order_by('id'):
a.item = other a.item = other
a.set_public_id() with transaction.atomic():
a.set_public_id()
Annotation.objects.filter(id=a.id).update(item=other, public_id=a.public_id) Annotation.objects.filter(id=a.id).update(item=other, public_id=a.public_id)
try: try:
other_sort = other.sort other_sort = other.sort
@ -519,6 +523,7 @@ class Item(models.Model):
cmd, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), close_fds=True) cmd, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), close_fds=True)
p.wait() p.wait()
os.unlink(tmp_output_txt) os.unlink(tmp_output_txt)
os.close(fd)
return True return True
else: else:
return None return None
@ -636,11 +641,11 @@ class Item(models.Model):
if self.poster_height: if self.poster_height:
i['posterRatio'] = self.poster_width / self.poster_height i['posterRatio'] = self.poster_width / self.poster_height
if keys and 'source' in keys: if keys and 'hasSource' in keys:
i['source'] = self.streams().exclude(file__data='').exists() i['hasSource'] = self.streams().exclude(file__data='').exists()
streams = self.streams() streams = self.streams()
i['durations'] = [s.duration for s in streams] i['durations'] = [s[0] for s in streams.values_list('duration')]
i['duration'] = sum(i['durations']) i['duration'] = sum(i['durations'])
i['audioTracks'] = self.audio_tracks() i['audioTracks'] = self.audio_tracks()
if not i['audioTracks']: if not i['audioTracks']:
@ -696,10 +701,12 @@ class Item(models.Model):
else: else:
values = self.get(key) values = self.get(key)
if values: if values:
values = [ox.sanitize_html(value) for value in values]
for d in Description.objects.filter(key=key, value__in=values): for d in Description.objects.filter(key=key, value__in=values):
i['%sdescription' % key][d.value] = d.description i['%sdescription' % key][d.value] = d.description
else: else:
qs = Description.objects.filter(key=key, value=self.get(key, '')) value = ox.sanitize_html(self.get(key, ''))
qs = Description.objects.filter(key=key, value=value)
i['%sdescription' % key] = '' if qs.count() == 0 else qs[0].description i['%sdescription' % key] = '' if qs.count() == 0 else qs[0].description
if keys: if keys:
info = {} info = {}
@ -1019,10 +1026,14 @@ class Item(models.Model):
set_value(s, name, value) set_value(s, name, value)
elif sort_type == 'person': elif sort_type == 'person':
value = sortNames(self.get(source, [])) value = sortNames(self.get(source, []))
if value is None:
value = ''
value = utils.sort_string(value)[:955] value = utils.sort_string(value)[:955]
set_value(s, name, value) set_value(s, name, value)
elif sort_type == 'string': elif sort_type == 'string':
value = self.get(source, '') value = self.get(source, '')
if value is None:
value = ''
if isinstance(value, list): if isinstance(value, list):
value = ','.join([str(v) for v in value]) value = ','.join([str(v) for v in value])
value = utils.sort_string(value)[:955] value = utils.sort_string(value)[:955]
@ -1198,7 +1209,7 @@ class Item(models.Model):
if not r: if not r:
return False return False
path = video.name path = video.name
duration = sum(item.cache['durations']) duration = sum(self.item.cache['durations'])
else: else:
path = stream.media.path path = stream.media.path
duration = stream.info['duration'] duration = stream.info['duration']
@ -1294,90 +1305,6 @@ class Item(models.Model):
self.files.filter(selected=True).update(selected=False) self.files.filter(selected=True).update(selected=False)
self.save() self.save()
def get_torrent(self, request):
if self.torrent:
self.torrent.seek(0)
data = ox.torrent.bdecode(self.torrent.read())
url = request.build_absolute_uri("%s/torrent/" % self.get_absolute_url())
if url.startswith('https://'):
url = 'http' + url[5:]
data['url-list'] = ['%s%s' % (url, u.split('torrent/')[1]) for u in data['url-list']]
return ox.torrent.bencode(data)
def make_torrent(self):
if not settings.CONFIG['video'].get('torrent'):
return
streams = self.streams()
if streams.count() == 0:
return
base = self.path('torrent')
base = os.path.abspath(os.path.join(settings.MEDIA_ROOT, base))
if not isinstance(base, bytes):
base = base.encode('utf-8')
if os.path.exists(base):
shutil.rmtree(base)
ox.makedirs(base)
filename = utils.safe_filename(ox.decode_html(self.get('title')))
base = self.path('torrent/%s' % filename)
base = os.path.abspath(os.path.join(settings.MEDIA_ROOT, base))
size = 0
duration = 0.0
if streams.count() == 1:
v = streams[0]
media_path = v.media.path
extension = media_path.split('.')[-1]
url = "%s/torrent/%s.%s" % (self.get_absolute_url(),
quote(filename.encode('utf-8')),
extension)
video = "%s.%s" % (base, extension)
if not isinstance(media_path, bytes):
media_path = media_path.encode('utf-8')
if not isinstance(video, bytes):
video = video.encode('utf-8')
media_path = os.path.relpath(media_path, os.path.dirname(video))
os.symlink(media_path, video)
size = v.media.size
duration = v.duration
else:
url = "%s/torrent/" % self.get_absolute_url()
part = 1
ox.makedirs(base)
for v in streams:
media_path = v.media.path
extension = media_path.split('.')[-1]
video = "%s/%s.Part %d.%s" % (base, filename, part, extension)
part += 1
if not isinstance(media_path, bytes):
media_path = media_path.encode('utf-8')
if not isinstance(video, bytes):
video = video.encode('utf-8')
media_path = os.path.relpath(media_path, os.path.dirname(video))
os.symlink(media_path, video)
size += v.media.size
duration += v.duration
video = base
torrent = '%s.torrent' % base
url = "http://%s%s" % (settings.CONFIG['site']['url'], url)
meta = {
'filesystem_encoding': 'utf-8',
'target': torrent,
'url-list': url,
}
if duration:
meta['playtime'] = ox.format_duration(duration*1000)[:-4]
# slightly bigger torrent file but better for streaming
piece_size_pow2 = 15 # 1 mbps -> 32KB pieces
if size / duration >= 1000000:
piece_size_pow2 = 16 # 2 mbps -> 64KB pieces
meta['piece_size_pow2'] = piece_size_pow2
ox.torrent.create_torrent(video, settings.TRACKER_URL, meta)
self.torrent.name = torrent[len(settings.MEDIA_ROOT)+1:]
self.save()
def audio_tracks(self): def audio_tracks(self):
tracks = [f['language'] tracks = [f['language']
for f in self.files.filter(selected=True).filter(Q(is_video=True) | Q(is_audio=True)).values('language') for f in self.files.filter(selected=True).filter(Q(is_video=True) | Q(is_audio=True)).values('language')
@ -1385,11 +1312,10 @@ class Item(models.Model):
return sorted(set(tracks)) return sorted(set(tracks))
def streams(self, track=None): def streams(self, track=None):
files = self.files.filter(selected=True).filter(Q(is_audio=True) | Q(is_video=True))
qs = archive.models.Stream.objects.filter( qs = archive.models.Stream.objects.filter(
source=None, available=True, file__item=self, file__selected=True file__in=files, source=None, available=True
).filter( ).select_related()
Q(file__is_audio=True) | Q(file__is_video=True)
)
if not track: if not track:
tracks = self.audio_tracks() tracks = self.audio_tracks()
if len(tracks) > 1: if len(tracks) > 1:
@ -1428,7 +1354,6 @@ class Item(models.Model):
self.select_frame() self.select_frame()
self.make_poster() self.make_poster()
self.make_icon() self.make_icon()
self.make_torrent()
self.rendered = streams.count() > 0 self.rendered = streams.count() > 0
self.save() self.save()
if self.rendered: if self.rendered:
@ -1614,8 +1539,15 @@ class Item(models.Model):
cmd += ['-l', timeline] cmd += ['-l', timeline]
if frame: if frame:
cmd += ['-f', frame] cmd += ['-f', frame]
p = subprocess.Popen(cmd, close_fds=True) if settings.ITEM_ICON_DATA:
p.wait() cmd += '-d', '-'
data = self.json()
data = utils.normalize_dict('NFC', data)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, close_fds=True)
p.communicate(json.dumps(data, default=to_json).encode('utf-8'))
else:
p = subprocess.Popen(cmd, close_fds=True)
p.wait()
# remove cached versions # remove cached versions
icon = os.path.abspath(os.path.join(settings.MEDIA_ROOT, icon)) icon = os.path.abspath(os.path.join(settings.MEDIA_ROOT, icon))
for f in glob(icon.replace('.jpg', '*.jpg')): for f in glob(icon.replace('.jpg', '*.jpg')):
@ -1627,11 +1559,13 @@ class Item(models.Model):
return icon return icon
def add_empty_clips(self): def add_empty_clips(self):
if not settings.EMPTY_CLIPS:
return
subtitles = utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True) subtitles = utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True)
if not subtitles: if not subtitles:
return return
# otherwise add empty 5 seconds annotation every minute # otherwise add empty 5 seconds annotation every minute
duration = sum([s.duration for s in self.streams()]) duration = sum([s[0] for s in self.streams().values_list('duration')])
layer = subtitles['id'] layer = subtitles['id']
# FIXME: allow annotations from no user instead? # FIXME: allow annotations from no user instead?
user = User.objects.all().order_by('id')[0] user = User.objects.all().order_by('id')[0]
@ -1880,6 +1814,8 @@ class Description(models.Model):
value = models.CharField(max_length=1000, db_index=True) value = models.CharField(max_length=1000, db_index=True)
description = models.TextField() description = models.TextField()
def __str__(self):
return "%s=%s" % (self.key, self.value)
class AnnotationSequence(models.Model): class AnnotationSequence(models.Model):
item = models.OneToOneField('Item', related_name='_annotation_sequence', on_delete=models.CASCADE) item = models.OneToOneField('Item', related_name='_annotation_sequence', on_delete=models.CASCADE)
@ -1895,13 +1831,12 @@ class AnnotationSequence(models.Model):
@classmethod @classmethod
def nextid(cls, item): def nextid(cls, item):
with transaction.atomic(): s, created = cls.objects.get_or_create(item=item)
s, created = cls.objects.get_or_create(item=item) if created:
if created: nextid = s.value
nextid = s.value else:
else: cursor = connection.cursor()
cursor = connection.cursor() sql = "UPDATE %s SET value = value + 1 WHERE item_id = %s RETURNING value" % (cls._meta.db_table, item.id)
sql = "UPDATE %s SET value = value + 1 WHERE item_id = %s RETURNING value" % (cls._meta.db_table, item.id) cursor.execute(sql)
cursor.execute(sql) nextid = cursor.fetchone()[0]
nextid = cursor.fetchone()[0]
return "%s/%s" % (item.public_id, ox.toAZ(nextid)) return "%s/%s" % (item.public_id, ox.toAZ(nextid))

View file

@ -24,10 +24,6 @@ urls = [
re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<format>webm|ogv|mp4)$', views.video), re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<format>webm|ogv|mp4)$', views.video),
re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<track>.+)\.(?P<format>webm|ogv|mp4)$', views.video), re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<track>.+)\.(?P<format>webm|ogv|mp4)$', views.video),
#torrent
re_path(r'^(?P<id>[A-Z0-9].*)/torrent$', views.torrent),
re_path(r'^(?P<id>[A-Z0-9].*)/torrent/(?P<filename>.*?)$', views.torrent),
#export #export
re_path(r'^(?P<id>[A-Z0-9].*)/json$', views.item_json), re_path(r'^(?P<id>[A-Z0-9].*)/json$', views.item_json),
re_path(r'^(?P<id>[A-Z0-9].*)/xml$', views.item_xml), re_path(r'^(?P<id>[A-Z0-9].*)/xml$', views.item_xml),

View file

@ -2,31 +2,35 @@
from datetime import timedelta, datetime from datetime import timedelta, datetime
from urllib.parse import quote from urllib.parse import quote
import xml.etree.ElementTree as ET
import gzip import gzip
import os import os
import random import random
import logging import logging
from celery.task import task, periodic_task from app.celery import app
from celery.schedules import crontab
from django.conf import settings from django.conf import settings
from django.db import connection, transaction from django.db import connection, transaction
from django.db.models import Q from django.db.models import Q
from ox.utils import ET
from app.utils import limit_rate from app.utils import limit_rate
from taskqueue.models import Task from taskqueue.models import Task
logger = logging.getLogger(__name__) logger = logging.getLogger('pandora.' + __name__)
@app.task(queue='encoding')
@periodic_task(run_every=timedelta(days=1), queue='encoding')
def cronjob(**kwargs): def cronjob(**kwargs):
if limit_rate('item.tasks.cronjob', 8 * 60 * 60): if limit_rate('item.tasks.cronjob', 8 * 60 * 60):
update_random_sort() update_random_sort()
update_random_clip_sort() update_random_clip_sort()
clear_cache.delay() clear_cache.delay()
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(timedelta(days=1), cronjob.s())
def update_random_sort(): def update_random_sort():
from . import models from . import models
if list(filter(lambda f: f['id'] == 'random', settings.CONFIG['itemKeys'])): if list(filter(lambda f: f['id'] == 'random', settings.CONFIG['itemKeys'])):
@ -54,7 +58,7 @@ def update_random_clip_sort():
cursor.execute(row) cursor.execute(row)
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_clips(public_id): def update_clips(public_id):
from . import models from . import models
try: try:
@ -63,7 +67,7 @@ def update_clips(public_id):
return return
item.clips.all().update(user=item.user.id) item.clips.all().update(user=item.user.id)
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_poster(public_id): def update_poster(public_id):
from . import models from . import models
try: try:
@ -81,7 +85,7 @@ def update_poster(public_id):
icon=item.icon.name icon=item.icon.name
) )
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_file_paths(public_id): def update_file_paths(public_id):
from . import models from . import models
try: try:
@ -90,7 +94,7 @@ def update_file_paths(public_id):
return return
item.update_file_paths() item.update_file_paths()
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_external(public_id): def update_external(public_id):
from . import models from . import models
try: try:
@ -99,7 +103,7 @@ def update_external(public_id):
return return
item.update_external() item.update_external()
@task(queue="encoding") @app.task(queue="encoding")
def update_timeline(public_id): def update_timeline(public_id):
from . import models from . import models
try: try:
@ -109,7 +113,7 @@ def update_timeline(public_id):
item.update_timeline(async_=False) item.update_timeline(async_=False)
Task.finish(item) Task.finish(item)
@task(queue="encoding") @app.task(queue="encoding")
def rebuild_timeline(public_id): def rebuild_timeline(public_id):
from . import models from . import models
i = models.Item.objects.get(public_id=public_id) i = models.Item.objects.get(public_id=public_id)
@ -117,7 +121,7 @@ def rebuild_timeline(public_id):
s.make_timeline() s.make_timeline()
i.update_timeline(async_=False) i.update_timeline(async_=False)
@task(queue="encoding") @app.task(queue="encoding")
def load_subtitles(public_id): def load_subtitles(public_id):
from . import models from . import models
try: try:
@ -130,7 +134,7 @@ def load_subtitles(public_id):
item.update_facets() item.update_facets()
@task(queue="encoding") @app.task(queue="encoding")
def extract_clip(public_id, in_, out, resolution, format, track=None): def extract_clip(public_id, in_, out, resolution, format, track=None):
from . import models from . import models
try: try:
@ -142,7 +146,7 @@ def extract_clip(public_id, in_, out, resolution, format, track=None):
return False return False
@task(queue="encoding") @app.task(queue="encoding")
def clear_cache(days=60): def clear_cache(days=60):
import subprocess import subprocess
path = os.path.join(settings.MEDIA_ROOT, 'media') path = os.path.join(settings.MEDIA_ROOT, 'media')
@ -156,7 +160,7 @@ def clear_cache(days=60):
subprocess.check_output(cmd) subprocess.check_output(cmd)
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_sitemap(base_url): def update_sitemap(base_url):
from . import models from . import models
sitemap = os.path.abspath(os.path.join(settings.MEDIA_ROOT, 'sitemap.xml.gz')) sitemap = os.path.abspath(os.path.join(settings.MEDIA_ROOT, 'sitemap.xml.gz'))
@ -356,7 +360,7 @@ def update_sitemap(base_url):
f.write(data) f.write(data)
@task(queue='default') @app.task(queue='default')
def bulk_edit(data, username): def bulk_edit(data, username):
from django.db import transaction from django.db import transaction
from . import models from . import models
@ -367,5 +371,5 @@ def bulk_edit(data, username):
if item.editable(user): if item.editable(user):
with transaction.atomic(): with transaction.atomic():
item.refresh_from_db() item.refresh_from_db()
response = edit_item(user, item, data) response = edit_item(user, item, data, is_task=True)
return {} return {}

View file

@ -71,7 +71,7 @@ def join_tiles(source_paths, durations, target_path):
if not w or large_tile_i < large_tile_n - 1: if not w or large_tile_i < large_tile_n - 1:
w = 60 w = 60
data['target_images']['large'] = data['target_images']['large'].resize( data['target_images']['large'] = data['target_images']['large'].resize(
(w, small_tile_h), Image.ANTIALIAS (w, small_tile_h), Image.LANCZOS
) )
if data['target_images']['small']: if data['target_images']['small']:
data['target_images']['small'].paste( data['target_images']['small'].paste(
@ -90,7 +90,7 @@ def join_tiles(source_paths, durations, target_path):
if data['full_tile_widths'][0]: if data['full_tile_widths'][0]:
resized = data['target_images']['large'].resize(( resized = data['target_images']['large'].resize((
data['full_tile_widths'][0], large_tile_h data['full_tile_widths'][0], large_tile_h
), Image.ANTIALIAS) ), Image.LANCZOS)
data['target_images']['full'].paste(resized, (data['full_tile_offset'], 0)) data['target_images']['full'].paste(resized, (data['full_tile_offset'], 0))
data['full_tile_offset'] += data['full_tile_widths'][0] data['full_tile_offset'] += data['full_tile_widths'][0]
data['full_tile_widths'] = data['full_tile_widths'][1:] data['full_tile_widths'] = data['full_tile_widths'][1:]
@ -196,7 +196,7 @@ def join_tiles(source_paths, durations, target_path):
#print(image_file) #print(image_file)
image_file = '%stimeline%s%dp.jpg' % (target_path, full_tile_mode, small_tile_h) image_file = '%stimeline%s%dp.jpg' % (target_path, full_tile_mode, small_tile_h)
data['target_images']['full'].resize( data['target_images']['full'].resize(
(full_tile_w, small_tile_h), Image.ANTIALIAS (full_tile_w, small_tile_h), Image.LANCZOS
).save(image_file) ).save(image_file)
#print(image_file) #print(image_file)

View file

@ -61,7 +61,7 @@ def sort_title(title):
title = sort_string(title) title = sort_string(title)
#title #title
title = re.sub('[\'!¿¡,\.;\-"\:\*\[\]]', '', title) title = re.sub(r'[\'!¿¡,\.;\-"\:\*\[\]]', '', title)
return title.strip() return title.strip()
def get_positions(ids, pos, decode_id=False): def get_positions(ids, pos, decode_id=False):

View file

@ -16,12 +16,14 @@ from wsgiref.util import FileWrapper
from django.conf import settings from django.conf import settings
from ox.utils import json, ET from ox.utils import json, ET
from oxdjango.decorators import login_required_json
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
from oxdjango.http import HttpFileResponse
import ox import ox
from oxdjango.api import actions
from oxdjango.decorators import login_required_json
from oxdjango.http import HttpFileResponse
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
import oxdjango
from . import models from . import models
from . import utils from . import utils
from . import tasks from . import tasks
@ -32,7 +34,6 @@ from clip.models import Clip
from user.models import has_capability from user.models import has_capability
from changelog.models import add_changelog from changelog.models import add_changelog
from oxdjango.api import actions
def _order_query(qs, sort, prefix='sort__'): def _order_query(qs, sort, prefix='sort__'):
@ -308,7 +309,7 @@ def find(request, data):
responsive UI: First leave out `keys` to get totals as fast as possible, responsive UI: First leave out `keys` to get totals as fast as possible,
then pass `positions` to get the positions of previously selected items, then pass `positions` to get the positions of previously selected items,
finally make the query with the `keys` you need and an appropriate `range`. finally make the query with the `keys` you need and an appropriate `range`.
For more examples, see https://wiki.0x2620.org/wiki/pandora/QuerySyntax. For more examples, see https://code.0x2620.org/0x2620/pandora/wiki/QuerySyntax.
see: add, edit, get, lookup, remove, upload see: add, edit, get, lookup, remove, upload
''' '''
if settings.JSON_DEBUG: if settings.JSON_DEBUG:
@ -533,7 +534,7 @@ def get(request, data):
return render_to_json_response(response) return render_to_json_response(response)
actions.register(get) actions.register(get)
def edit_item(user, item, data): def edit_item(user, item, data, is_task=False):
data = data.copy() data = data.copy()
update_clips = False update_clips = False
response = json_response(status=200, text='ok') response = json_response(status=200, text='ok')
@ -558,7 +559,7 @@ def edit_item(user, item, data):
user_groups = set([g.name for g in user.groups.all()]) user_groups = set([g.name for g in user.groups.all()])
other_groups = list(groups - user_groups) other_groups = list(groups - user_groups)
data['groups'] = [g for g in data['groups'] if g in user_groups] + other_groups data['groups'] = [g for g in data['groups'] if g in user_groups] + other_groups
r = item.edit(data) r = item.edit(data, is_task=is_task)
if r: if r:
r.wait() r.wait()
if update_clips: if update_clips:
@ -595,7 +596,7 @@ def add(request, data):
if p: if p:
p.wait() p.wait()
else: else:
i.make_poster() item.make_poster()
del data['title'] del data['title']
if data: if data:
response = edit_item(request.user, item, data) response = edit_item(request.user, item, data)
@ -948,9 +949,11 @@ def timeline(request, id, size, position=-1, format='jpg', mode=None):
if not item.access(request.user): if not item.access(request.user):
return HttpResponseForbidden() return HttpResponseForbidden()
modes = [t['id'] for t in settings.CONFIG['timelines']]
if not mode: if not mode:
mode = 'antialias' mode = 'antialias'
modes = [t['id'] for t in settings.CONFIG['timelines']] if mode not in modes:
mode = modes[0]
if mode not in modes: if mode not in modes:
raise Http404 raise Http404
modes.pop(modes.index(mode)) modes.pop(modes.index(mode))
@ -1044,27 +1047,6 @@ def download(request, id, resolution=None, format='webm', part=None):
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % quote(filename.encode('utf-8')) response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % quote(filename.encode('utf-8'))
return response return response
def torrent(request, id, filename=None):
item = get_object_or_404(models.Item, public_id=id)
if not item.access(request.user):
return HttpResponseForbidden()
if not item.torrent:
raise Http404
if not filename or filename.endswith('.torrent'):
response = HttpResponse(item.get_torrent(request),
content_type='application/x-bittorrent')
filename = utils.safe_filename("%s.torrent" % item.get('title'))
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % quote(filename.encode('utf-8'))
return response
while filename.startswith('/'):
filename = filename[1:]
filename = filename.replace('/../', '/')
filename = item.path('torrent/%s' % filename)
filename = os.path.abspath(os.path.join(settings.MEDIA_ROOT, filename))
response = HttpFileResponse(filename)
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % \
quote(os.path.basename(filename.encode('utf-8')))
return response
def video(request, id, resolution, format, index=None, track=None): def video(request, id, resolution, format, index=None, track=None):
resolution = int(resolution) resolution = int(resolution)
@ -1286,12 +1268,6 @@ def atom_xml(request):
el.text = "1:1" el.text = "1:1"
if has_capability(request.user, 'canDownloadVideo'): if has_capability(request.user, 'canDownloadVideo'):
if item.torrent:
el = ET.SubElement(entry, "link")
el.attrib['rel'] = 'enclosure'
el.attrib['type'] = 'application/x-bittorrent'
el.attrib['href'] = '%s/torrent/' % page_link
el.attrib['length'] = '%s' % ox.get_torrent_size(item.torrent.path)
# FIXME: loop over streams # FIXME: loop over streams
# for s in item.streams().filter(resolution=max(settings.CONFIG['video']['resolutions'])): # for s in item.streams().filter(resolution=max(settings.CONFIG['video']['resolutions'])):
for s in item.streams().filter(source=None): for s in item.streams().filter(source=None):
@ -1314,12 +1290,15 @@ def atom_xml(request):
'application/atom+xml' 'application/atom+xml'
) )
def oembed(request): def oembed(request):
format = request.GET.get('format', 'json') format = request.GET.get('format', 'json')
maxwidth = int(request.GET.get('maxwidth', 640)) maxwidth = int(request.GET.get('maxwidth', 640))
maxheight = int(request.GET.get('maxheight', 480)) maxheight = int(request.GET.get('maxheight', 480))
url = request.GET['url'] url = request.GET.get('url')
if not url:
raise Http404
parts = urlparse(url).path.split('/') parts = urlparse(url).path.split('/')
if len(parts) < 2: if len(parts) < 2:
raise Http404 raise Http404

7
pandora/itemlist/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class ItemListConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'itemlist'

View file

@ -0,0 +1,61 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import itemlist.models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('itemlist', '0003_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='list',
name='description',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='list',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='list',
name='poster_frames',
field=oxdjango.fields.JSONField(default=list, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='list',
name='query',
field=oxdjango.fields.JSONField(default=itemlist.models.default_query, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='list',
name='sort',
field=oxdjango.fields.JSONField(default=itemlist.models.get_listsort, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='list',
name='status',
field=models.CharField(default='private', max_length=20),
),
migrations.AlterField(
model_name='list',
name='type',
field=models.CharField(default='static', max_length=255),
),
migrations.AlterField(
model_name='listitem',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='position',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -26,6 +26,9 @@ def get_icon_path(f, x): return get_path(f, 'icon.jpg')
def get_listview(): return settings.CONFIG['user']['ui']['listView'] def get_listview(): return settings.CONFIG['user']['ui']['listView']
def get_listsort(): return tuple(settings.CONFIG['user']['ui']['listSort']) def get_listsort(): return tuple(settings.CONFIG['user']['ui']['listSort'])
def default_query():
return {"static": True}
class List(models.Model): class List(models.Model):
class Meta: class Meta:
@ -38,7 +41,7 @@ class List(models.Model):
name = models.CharField(max_length=255) name = models.CharField(max_length=255)
status = models.CharField(max_length=20, default='private') status = models.CharField(max_length=20, default='private')
_status = ['private', 'public', 'featured'] _status = ['private', 'public', 'featured']
query = JSONField(default=lambda: {"static": True}, editable=False) query = JSONField(default=default_query, editable=False)
type = models.CharField(max_length=255, default='static') type = models.CharField(max_length=255, default='static')
description = models.TextField(default='') description = models.TextField(default='')

View file

@ -84,6 +84,11 @@ def findLists(request, data):
for x in data.get('query', {}).get('conditions', []) for x in data.get('query', {}).get('conditions', [])
) )
is_personal = request.user.is_authenticated and any(
(x['key'] == 'user' and x['value'] == request.user.username and x['operator'] == '==')
for x in data.get('query', {}).get('conditions', [])
)
if is_section_request: if is_section_request:
qs = query['qs'] qs = query['qs']
if not is_featured and not request.user.is_anonymous: if not is_featured and not request.user.is_anonymous:
@ -92,6 +97,9 @@ def findLists(request, data):
else: else:
qs = _order_query(query['qs'], query['sort']) qs = _order_query(query['qs'], query['sort'])
if is_personal and request.user.profile.ui.get('hidden', {}).get('lists'):
qs = qs.exclude(name__in=request.user.profile.ui['hidden']['lists'])
response = json_response() response = json_response()
if 'keys' in data: if 'keys' in data:
qs = qs[query['range'][0]:query['range'][1]] qs = qs[query['range'][0]:query['range'][1]]
@ -234,7 +242,7 @@ def addList(request, data):
'type' and 'view'. 'type' and 'view'.
see: editList, findLists, getList, removeList, sortLists see: editList, findLists, getList, removeList, sortLists
''' '''
data['name'] = re.sub(' \[\d+\]$', '', data.get('name', 'Untitled')).strip() data['name'] = re.sub(r' \[\d+\]$', '', data.get('name', 'Untitled')).strip()
name = data['name'] name = data['name']
if not name: if not name:
name = "Untitled" name = "Untitled"
@ -412,7 +420,10 @@ def sortLists(request, data):
models.Position.objects.filter(section=section, list=l).exclude(id=pos.id).delete() models.Position.objects.filter(section=section, list=l).exclude(id=pos.id).delete()
else: else:
for i in ids: for i in ids:
l = get_list_or_404_json(i) try:
l = get_list_or_404_json(i)
except:
continue
pos, created = models.Position.objects.get_or_create(list=l, pos, created = models.Position.objects.get_or_create(list=l,
user=request.user, section=section) user=request.user, section=section)
if pos.position != position: if pos.position != position:

7
pandora/log/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class LogConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'log'

View file

@ -0,0 +1,23 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('log', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='log',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='log',
name='url',
field=models.CharField(default='', max_length=1000),
),
]

View file

@ -2,10 +2,15 @@
from datetime import timedelta, datetime from datetime import timedelta, datetime
from celery.task import periodic_task from app.celery import app
from celery.schedules import crontab
from . import models from . import models
@periodic_task(run_every=timedelta(days=1), queue='encoding') @app.task(queue='encoding')
def cronjob(**kwargs): def cronjob(**kwargs):
models.Log.objects.filter(modified__lt=datetime.now()-timedelta(days=30)).delete() models.Log.objects.filter(modified__lt=datetime.now()-timedelta(days=30)).delete()
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(timedelta(days=1), cronjob.s())

View file

@ -65,7 +65,7 @@ actions.register(removeErrorLogs, cache=False)
def parse_query(data, user): def parse_query(data, user):
query = {} query = {}
query['range'] = [0, 100] query['range'] = [0, 100]
query['sort'] = [{'key':'name', 'operator':'+'}] query['sort'] = [{'key': 'modified', 'operator': '-'}]
for key in ('keys', 'group', 'list', 'range', 'sort', 'query'): for key in ('keys', 'group', 'list', 'range', 'sort', 'query'):
if key in data: if key in data:
query[key] = data[key] query[key] = data[key]

View file

@ -10,7 +10,8 @@ def activate_venv(base):
bin_path = os.path.join(base, 'bin') bin_path = os.path.join(base, 'bin')
if bin_path not in old_os_path: if bin_path not in old_os_path:
os.environ['PATH'] = os.path.join(base, 'bin') + os.pathsep + old_os_path os.environ['PATH'] = os.path.join(base, 'bin') + os.pathsep + old_os_path
site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages') version = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
site_packages = os.path.join(base, 'lib', 'python%s' % version, 'site-packages')
prev_sys_path = list(sys.path) prev_sys_path = list(sys.path)
import site import site
site.addsitedir(site_packages) site.addsitedir(site_packages)

View file

3
pandora/mobile/admin.py Normal file
View file

@ -0,0 +1,3 @@
from django.contrib import admin
# Register your models here.

6
pandora/mobile/apps.py Normal file
View file

@ -0,0 +1,6 @@
from django.apps import AppConfig
class MobileConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'mobile'

View file

3
pandora/mobile/models.py Normal file
View file

@ -0,0 +1,3 @@
from django.db import models
# Create your models here.

3
pandora/mobile/tests.py Normal file
View file

@ -0,0 +1,3 @@
from django.test import TestCase
# Create your tests here.

Some files were not shown because too many files have changed in this diff Show more