Compare commits

..

1 commit

Author SHA1 Message Date
j
5b048a710d celery version hotfix 2020-09-28 14:43:16 +02:00
637 changed files with 85255 additions and 158406 deletions

2
.gitignore vendored
View file

@ -36,5 +36,3 @@ pandora/gunicorn_config.py
.DS_Store
.env
overlay/
pandora/encoding.conf
pandora/tasks.conf

View file

@ -1,4 +1,4 @@
FROM code.0x2620.org/0x2620/pandora-base:latest
FROM 0x2620/pandora-base:latest
LABEL maintainer="0x2620@0x2620.org"

View file

@ -7,7 +7,7 @@
We recommend to run pan.do/ra inside of LXD or LXC or dedicated VM or server.
You will need at least 2GB of free disk space
pan.do/ra is known to work with Debian/12 (bookworm) and Ubuntu 20.04,
pan.do/ra is known to work with Ubuntu 18.04, 20.04 and Debian/10 (buster),
other distributions might also work, let us know if it works for you.
Use the following commands as root to install pan.do/ra and all dependencies:
@ -16,7 +16,7 @@
cd /root
curl -sL https://pan.do/ra-install > pandora_install.sh
chmod +x pandora_install.sh
export BRANCH=master # change to 'stable' to get the latest release (sometimes outdated)
export BRANCH=stable # change to 'master' to get current developement version
./pandora_install.sh 2>&1 | tee pandora_install.log
```
@ -50,9 +50,4 @@ export BRANCH=master # change to 'stable' to get the latest release (sometimes o
More info at
https://code.0x2620.org/0x2620/pandora/wiki/Customization
## Update
To update your existing instlalation run
pandoractl update

37
ctl
View file

@ -17,7 +17,7 @@ if [ "$action" = "init" ]; then
SUDO=""
PANDORA_USER=`ls -l update.py | cut -f3 -d" "`
if [ `whoami` != $PANDORA_USER ]; then
SUDO="sudo -E -H -u $PANDORA_USER"
SUDO="sudo -H -u $PANDORA_USER"
fi
$SUDO python3 -m venv --system-site-packages .
branch=`cat .git/HEAD | sed 's@/@\n@g' | tail -n1`
@ -27,30 +27,25 @@ if [ "$action" = "init" ]; then
$SUDO bin/python3 -m pip install -U --ignore-installed "pip<9"
fi
if [ ! -d static/oxjs ]; then
$SUDO git clone -b $branch https://code.0x2620.org/0x2620/oxjs.git static/oxjs
$SUDO git clone --depth 1 -b $branch https://git.0x2620.org/oxjs.git static/oxjs
fi
$SUDO mkdir -p src
if [ ! -d src/oxtimelines ]; then
$SUDO git clone --depth 1 -b $branch https://git.0x2620.org/oxtimelines.git src/oxtimelines
fi
for package in oxtimelines python-ox; do
cd ${BASE}
if [ ! -d src/${package} ]; then
$SUDO git clone -b $branch https://code.0x2620.org/0x2620/${package}.git src/${package}
$SUDO git clone --depth 1 -b $branch https://git.0x2620.org/${package}.git src/${package}
fi
cd ${BASE}/src/${package}
$SUDO ${BASE}/bin/pip install -e .
$SUDO ${BASE}/bin/python setup.py develop
done
cd ${BASE}
$SUDO ./bin/pip install -r requirements.txt
for template in gunicorn_config.py encoding.conf tasks.conf; do
if [ ! -e pandora/$template ]; then
$SUDO cp pandora/${template}.in pandora/$template
fi
done
exit 0
fi
if [ "$action" = "version" ]; then
git rev-list HEAD --count
if [ ! -e pandora/gunicorn_config.py ]; then
$SUDO cp pandora/gunicorn_config.py.in pandora/gunicorn_config.py
fi
exit 0
fi
@ -67,10 +62,11 @@ if [ ! -z $cmd ]; then
SUDO=""
PANDORA_USER=`ls -l update.py | cut -f3 -d" "`
if [ `whoami` != $PANDORA_USER ]; then
SUDO="sudo -E -H -u $PANDORA_USER"
SUDO="sudo -H -u $PANDORA_USER"
fi
shift
exec $SUDO "$BASE/$cmd" $@
$SUDO "$BASE/$cmd" $@
exit $?
fi
if [ `whoami` != 'root' ]; then
@ -78,15 +74,10 @@ if [ `whoami` != 'root' ]; then
exit 1
fi
if [ "$action" = "install" ]; then
cd "`dirname "$self"`"
cd "`dirname "$0"`"
BASE=`pwd`
if [ -x /bin/systemctl ]; then
if [ -d /etc/systemd/system/ ]; then
for template in gunicorn_config.py encoding.conf tasks.conf; do
if [ ! -e pandora/$template ]; then
$SUDO cp pandora/${template}.in pandora/$template
fi
done
for service in $SERVICES; do
if [ -e /lib/systemd/system/${service}.service ]; then
rm -f /lib/systemd/system/${service}.service \

View file

@ -15,6 +15,7 @@ services:
- "127.0.0.1:2620:80"
networks:
- backend
- default
links:
- pandora
- websocketd
@ -27,7 +28,7 @@ services:
restart: unless-stopped
db:
image: postgres:15
image: postgres:latest
networks:
- backend
env_file: .env

View file

@ -1,4 +1,4 @@
FROM debian:12
FROM debian:buster
LABEL maintainer="0x2620@0x2620.org"

View file

@ -1,17 +1,9 @@
#!/bin/bash
UBUNTU_CODENAME=bionic
if [ -e /etc/os-release ]; then
. /etc/os-release
fi
if [ -z "$UBUNTU_CODENAME" ]; then
UBUNTU_CODENAME=bionic
fi
if [ "$VERSION_CODENAME" = "bullseye" ]; then
UBUNTU_CODENAME=focal
fi
if [ "$VERSION_CODENAME" = "bookworm" ]; then
UBUNTU_CODENAME=lunar
fi
export DEBIAN_FRONTEND=noninteractive
echo 'Acquire::Languages "none";' > /etc/apt/apt.conf.d/99languages
@ -38,8 +30,6 @@ apt-get update -qq
apt-get install -y \
netcat-openbsd \
sudo \
rsync \
iproute2 \
vim \
wget \
pwgen \
@ -52,23 +42,22 @@ apt-get install -y \
python3-numpy \
python3-psycopg2 \
python3-pyinotify \
python3-simplejson \
python3-lxml \
python3-cssselect \
python3-html5lib \
python3-ox \
python3-elasticsearch \
oxframe \
ffmpeg \
mkvtoolnix \
gpac \
imagemagick \
poppler-utils \
youtube-dl \
ipython3 \
tesseract-ocr \
tesseract-ocr-eng \
postfix \
postgresql-client
apt-get install -y --no-install-recommends youtube-dl rtmpdump
apt-get clean
rm -f /install.sh

View file

@ -11,7 +11,7 @@ else
proxy=
fi
docker build $proxy -t code.0x2620.org/0x2620/pandora-base base
docker build -t code.0x2620.org/0x2620/pandora-nginx nginx
docker build $proxy -t 0x2620/pandora-base base
docker build -t 0x2620/pandora-nginx nginx
cd ..
docker build -t code.0x2620.org/0x2620/pandora .
docker build -t 0x2620/pandora .

View file

@ -6,9 +6,7 @@ user=pandora
export LANG=en_US.UTF-8
mkdir -p /run/pandora
chown -R ${user}:${user} /run/pandora
update="/usr/bin/sudo -u $user -E -H /srv/pandora/update.py"
chown -R ${user}.${user} /run/pandora
# pan.do/ra services
if [ "$action" = "pandora" ]; then
@ -28,12 +26,12 @@ if [ "$action" = "pandora" ]; then
/overlay/install.py
echo "Initializing database..."
echo "CREATE EXTENSION pg_trgm;" | /srv/pandora/pandora/manage.py dbshell || true
echo "CREATE EXTENSION pg_trgm;" | /srv/pandora/pandora/manage.py dbshell
/srv/pandora/pandora/manage.py init_db
$update db
/srv/pandora/update.py db
echo "Generating static files..."
chown -R ${user}:${user} /srv/pandora/
$update static
/srv/pandora/update.py static
chown -R ${user}.${user} /srv/pandora/
touch /srv/pandora/initialized
fi
/srv/pandora_base/docker/wait-for db 5432
@ -46,53 +44,54 @@ if [ "$action" = "encoding" ]; then
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
/srv/pandora_base/docker/wait-for rabbitmq 5672
name=pandora-encoding-$(hostname)
cd /srv/pandora/pandora
exec /usr/bin/sudo -u $user -E -H \
/srv/pandora/bin/celery \
-A app worker \
-Q encoding -n ${name} \
--pidfile /run/pandora/encoding.pid \
--max-tasks-per-child 500 \
-c 1 \
-l INFO
/srv/pandora/bin/python \
/srv/pandora/pandora/manage.py \
celery worker \
-c 1 \
-Q encoding -n $name \
-l INFO
fi
if [ "$action" = "tasks" ]; then
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
/srv/pandora_base/docker/wait-for rabbitmq 5672
name=pandora-default-$(hostname)
cd /srv/pandora/pandora
exec /usr/bin/sudo -u $user -E -H \
/srv/pandora/bin/celery \
-A app worker \
-Q default,celery -n ${name} \
--pidfile /run/pandora/tasks.pid \
--max-tasks-per-child 1000 \
/srv/pandora/bin/python \
/srv/pandora/pandora/manage.py \
celery worker \
-Q default,celery -n $name \
--maxtasksperchild 1000 \
-l INFO
fi
if [ "$action" = "cron" ]; then
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
/srv/pandora_base/docker/wait-for rabbitmq 5672
cd /srv/pandora/pandora
exec /usr/bin/sudo -u $user -E -H \
/srv/pandora/bin/celery \
-A app beat \
-s /run/pandora/celerybeat-schedule \
/srv/pandora/bin/python \
/srv/pandora/pandora/manage.py \
celerybeat -s /run/pandora/celerybeat-schedule \
--pidfile /run/pandora/cron.pid \
-l INFO
fi
if [ "$action" = "websocketd" ]; then
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
/srv/pandora_base/docker/wait-for rabbitmq 5672
cd /srv/pandora/pandora
exec /usr/bin/sudo -u $user -E -H \
/srv/pandora/bin/python \
/srv/pandora/pandora/manage.py websocketd
fi
# pan.do/ra management and update
if [ "$action" = "ctl" ]; then
if [ "$action" = "manage.py" ]; then
shift
exec /srv/pandora/ctl "$@"
exec /usr/bin/sudo -u $user -E -H \
/srv/pandora/pandora/manage.py "$@"
fi
if [ "$action" = "update.py" ]; then
shift
exec /usr/bin/sudo -u $user -E -H \
/srv/pandora/update.py "$@"
fi
if [ "$action" = "bash" ]; then
shift
@ -103,9 +102,9 @@ fi
# pan.do/ra setup hooks
if [ "$action" = "docker-compose.yml" ]; then
cat /srv/pandora_base/docker-compose.yml | \
sed "s#build: \.#image: code.0x2620.org/0x2620/pandora:latest#g" | \
sed "s#build: \.#image: 0x2620/pandora:latest#g" | \
sed "s#\./overlay:#.:#g" | \
sed "s#build: docker/nginx#image: code.0x2620.org/0x2620/pandora-nginx:latest#g"
sed "s#build: docker/nginx#image: 0x2620/pandora-nginx:latest#g"
exit
fi
if [ "$action" = ".env" ]; then
@ -131,5 +130,5 @@ echo " docker run 0x2620/pandora setup | sh"
echo
echo adjust created files to match your needs and run:
echo
echo " docker compose up"
echo " docker-compose up"
echo

View file

@ -56,9 +56,13 @@ cp /srv/pandora/docker/entrypoint.sh /entrypoint.sh
mv /srv/pandora/ /srv/pandora_base/
mkdir /pandora
ln -s /pandora /srv/pandora
cat > /usr/local/bin/pandoractl << EOF
cat > /usr/local/bin/update.py << EOF
#!/bin/sh
exec /srv/pandora/ctl \$@
exec /srv/pandora/update.py \$@
EOF
chmod +x /usr/local/bin/pandoractl
cat > /usr/local/bin/manage.py << EOF
#!/bin/sh
exec /srv/pandora/pandora/manage.py \$@
EOF
chmod +x /usr/local/bin/manage.py /usr/local/bin/update.py

View file

@ -1,12 +0,0 @@
#!/bin/bash
# push new version of pan.do/ra to code.0x2620.org
set -e
cd /tmp
git clone https://code.0x2620.org/0x2620/pandora
cd pandora
./docker/build.sh
docker push code.0x2620.org/0x2620/pandora-base:latest
docker push code.0x2620.org/0x2620/pandora-nginx:latest
docker push code.0x2620.org/0x2620/pandora:latest

View file

@ -1,18 +1,18 @@
#!/bin/sh
docker run --rm code.0x2620.org/0x2620/pandora docker-compose.yml > docker-compose.yml
docker run 0x2620/pandora docker-compose.yml > docker-compose.yml
if [ ! -e .env ]; then
docker run --rm code.0x2620.org/0x2620/pandora .env > .env
docker run 0x2620/pandora .env > .env
echo .env >> .gitignore
fi
if [ ! -e config.jsonc ]; then
docker run --rm code.0x2620.org/0x2620/pandora config.jsonc > config.jsonc
docker run 0x2620/pandora config.jsonc > config.jsonc
fi
cat > README.md << EOF
pan.do/ra docker instance
this folder was created with
docker run --rm code.0x2620.org/0x2620/pandora setup | sh
docker run 0x2620/pandora setup | sh
To start pan.do/ra adjust the files in this folder:
@ -22,14 +22,11 @@ To start pan.do/ra adjust the files in this folder:
and to get started run this:
docker compose up -d
docker-compose up -d
To update pan.do/ra run:
docker compose run --rm pandora ctl update
docker-compose run pandora update.py
To run pan.do/ra manage shell:
docker compose run --rm pandora ctl manage shell
EOF
touch __init__.py

View file

@ -1,5 +1,5 @@
#!/bin/sh
TIMEOUT=180
TIMEOUT=60
TARGET="$1"
for i in `seq $TIMEOUT` ; do

View file

@ -17,7 +17,6 @@ server {
#server_name pandora.YOURDOMAIN.COM;
listen 80 default;
listen [::]:80 default;
access_log /var/log/nginx/pandora.access.log;
error_log /var/log/nginx/pandora.error.log;

View file

@ -1 +0,0 @@
pandora ALL=(ALL:ALL) NOPASSWD:/usr/local/bin/pandoractl

View file

@ -11,7 +11,7 @@ PIDFile=/run/pandora/cron.pid
WorkingDirectory=/srv/pandora/pandora
ExecStart=/srv/pandora/bin/celery \
-A app beat \
--scheduler django_celery_beat.schedulers:DatabaseScheduler \
-s /run/pandora/celerybeat-schedule \
--pidfile /run/pandora/cron.pid \
-l INFO
ExecReload=/bin/kill -HUP $MAINPID

View file

@ -7,16 +7,14 @@ Type=simple
Restart=always
User=pandora
Group=pandora
EnvironmentFile=/srv/pandora/pandora/encoding.conf
PIDFile=/run/pandora/encoding.pid
WorkingDirectory=/srv/pandora/pandora
ExecStart=/srv/pandora/bin/celery \
-A app worker \
-Q encoding -n pandora-encoding \
--pidfile /run/pandora/encoding.pid \
-c $CONCURRENCY \
--max-tasks-per-child $MAX_TASKS_PER_CHILD \
-l $LOGLEVEL
--maxtasksperchild 500 \
-l INFO
ExecReload=/bin/kill -TERM $MAINPID
[Install]

View file

@ -7,16 +7,14 @@ Type=simple
Restart=always
User=pandora
Group=pandora
EnvironmentFile=/srv/pandora/pandora/tasks.conf
PIDFile=/run/pandora/tasks.pid
WorkingDirectory=/srv/pandora/pandora
ExecStart=/srv/pandora/bin/celery \
-A app worker \
-Q default,celery -n pandora-default \
--pidfile /run/pandora/tasks.pid \
-c $CONCURRENCY \
--max-tasks-per-child $MAX_TASKS_PER_CHILD \
-l $LOGLEVEL
--maxtasksperchild 1000 \
-l INFO
ExecReload=/bin/kill -TERM $MAINPID
[Install]

View file

@ -1,6 +0,0 @@
from django.apps import AppConfig
class AnnotationConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'annotation'

View file

@ -27,7 +27,6 @@ class Command(BaseCommand):
parser.add_argument('username', help='username')
parser.add_argument('item', help='item')
parser.add_argument('layer', help='layer')
parser.add_argument('language', help='language', default="")
parser.add_argument('filename', help='filename.srt')
def handle(self, *args, **options):
@ -35,7 +34,6 @@ class Command(BaseCommand):
public_id = options['item']
layer_id = options['layer']
filename = options['filename']
language = options.get("language")
user = User.objects.get(username=username)
item = Item.objects.get(public_id=public_id)
@ -49,9 +47,6 @@ class Command(BaseCommand):
for i in range(len(annotations)-1):
if annotations[i]['out'] == annotations[i+1]['in']:
annotations[i]['out'] = annotations[i]['out'] - 0.001
if language:
for annotation in annotations:
annotation["value"] = '<span lang="%s">%s</span>' % (language, annotation["value"])
tasks.add_annotations.delay({
'item': item.public_id,
'layer': layer_id,

View file

@ -1,18 +0,0 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('annotation', '0003_auto_20160219_1537'),
]
operations = [
migrations.AlterField(
model_name='annotation',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -163,25 +163,28 @@ class Annotation(models.Model):
self.sortvalue = None
self.languages = None
if not self.clip or self.start != self.clip.start or self.end != self.clip.end:
self.clip, created = Clip.get_or_create(self.item, self.start, self.end)
with transaction.atomic():
if not self.clip or self.start != self.clip.start or self.end != self.clip.end:
self.clip, created = Clip.get_or_create(self.item, self.start, self.end)
if set_public_id:
self.set_public_id()
super(Annotation, self).save(*args, **kwargs)
if self.clip:
self.clip.update_findvalue()
setattr(self.clip, self.layer, True)
self.clip.save(update_fields=[self.layer, 'sortvalue', 'findvalue'])
if self.clip:
Clip.objects.filter(**{
'id': self.clip.id,
self.layer: False
}).update(**{self.layer: True})
# update clip.findvalue
self.clip.save()
# update matches in bulk if called from load_subtitles
if not delay_matches:
self.update_matches()
self.update_documents()
self.update_translations()
# update matches in bulk if called from load_subtitles
if not delay_matches:
self.update_matches()
self.update_documents()
self.update_translations()
def update_matches(self):
from place.models import Place
@ -264,10 +267,7 @@ class Annotation(models.Model):
from translation.models import Translation
layer = self.get_layer()
if layer.get('translate'):
for lang in settings.CONFIG['languages']:
if lang == settings.CONFIG['language']:
continue
Translation.objects.get_or_create(lang=lang, key=self.value, defaults={'type': Translation.CONTENT})
Translation.objects.get_or_create(lang=lang, key=self.value, defaults={'type': Translation.CONTENT})
def delete(self, *args, **kwargs):
with transaction.atomic():

View file

@ -5,12 +5,12 @@ from django.contrib.auth import get_user_model
from django.db import transaction
import ox
from app.celery import app
from celery.task import task
from .models import Annotation
@app.task(ignore_results=False, queue='default')
@task(ignore_results=False, queue='default')
def add_annotations(data):
from item.models import Item
from entity.models import Entity
@ -51,7 +51,7 @@ def add_annotations(data):
annotation.item.update_facets()
return True
@app.task(ignore_results=True, queue='default')
@task(ignore_results=True, queue='default')
def update_item(id, force=False):
from item.models import Item
from clip.models import Clip
@ -72,7 +72,7 @@ def update_item(id, force=False):
a.item.save()
@app.task(ignore_results=True, queue='default')
@task(ignore_results=True, queue='default')
def update_annotations(layers, value):
items = {}

View file

@ -180,10 +180,10 @@ def addAnnotation(request, data):
text='invalid data'))
item = get_object_or_404_json(Item, public_id=data['item'])
layer_id = data['layer']
layer = get_by_id(settings.CONFIG['layers'], layer_id)
if layer['canAddAnnotations'].get(request.user.profile.get_level()) or item.editable(request.user):
if layer['canAddAnnotations'].get(request.user.profile.get_level()):
if layer['type'] == 'entity':
try:
value = Entity.get_by_name(ox.decode_html(data['value']), layer['entity']).get_id()
@ -241,7 +241,8 @@ def addAnnotations(request, data):
layer_id = data['layer']
layer = get_by_id(settings.CONFIG['layers'], layer_id)
if item.editable(request.user):
if item.editable(request.user) \
and layer['canAddAnnotations'].get(request.user.profile.get_level()):
response = json_response()
data['user'] = request.user.username
t = add_annotations.delay(data)

View file

@ -1,7 +0,0 @@
from django.apps import AppConfig
class AppConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'app'

View file

@ -6,8 +6,16 @@ root_dir = os.path.normpath(os.path.abspath(os.path.dirname(__file__)))
root_dir = os.path.dirname(root_dir)
os.chdir(root_dir)
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
app = Celery('pandora', broker_connection_retry_on_startup=True)
app = Celery('pandora')
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()

View file

@ -24,6 +24,9 @@ User = get_user_model()
_win = (sys.platform == "win32")
RUN_RELOADER = True
NOTIFIER = None
def get_version():
git_dir = join(dirname(dirname(dirname(__file__))), '.git')
if exists(git_dir):
@ -133,13 +136,7 @@ def load_config(init=False):
added = []
for key in sorted(d):
if key not in c:
if key not in (
'hidden',
'find',
'findDocuments',
'videoPoints',
):
added.append("\"%s\": %s," % (key, json.dumps(d[key])))
added.append("\"%s\": %s," % (key, json.dumps(d[key])))
c[key] = d[key]
if added:
sys.stderr.write("adding default %s:\n\t" % section)
@ -260,6 +257,46 @@ check the README for further details.
except:
pass
def reloader_thread():
global NOTIFIER
settings.RELOADER_RUNNING=True
_config_mtime = 0
try:
import pyinotify
INOTIFY = True
except:
INOTIFY = False
if INOTIFY:
def add_watch():
name = os.path.realpath(settings.SITE_CONFIG)
wm.add_watch(name, pyinotify.IN_CLOSE_WRITE, reload_config)
def reload_config(event):
load_config()
add_watch()
wm = pyinotify.WatchManager()
add_watch()
notifier = pyinotify.Notifier(wm)
NOTIFIER = notifier
notifier.loop()
else:
while RUN_RELOADER:
try:
stat = os.stat(settings.SITE_CONFIG)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if mtime > _config_mtime:
load_config()
_config_mtime = mtime
time.sleep(10)
except:
#sys.stderr.write("reloading config failed\n")
pass
def update_static():
oxjs_build = os.path.join(settings.STATIC_ROOT, 'oxjs/tools/build/build.py')
if os.path.exists(oxjs_build):
@ -327,11 +364,7 @@ def update_static():
#locale
for f in sorted(glob(os.path.join(settings.STATIC_ROOT, 'json/locale.pandora.*.json'))):
with open(f) as fd:
try:
locale = json.load(fd)
except:
print("failed to parse %s" % f)
raise
locale = json.load(fd)
site_locale = f.replace('locale.pandora', 'locale.' + settings.CONFIG['site']['id'])
locale_file = f.replace('locale.pandora', 'locale')
print('write', locale_file)
@ -374,4 +407,17 @@ def update_geoip(force=False):
print('failed to download GeoLite2-City.mmdb')
def init():
load_config(True)
if not settings.RELOADER_RUNNING:
load_config(True)
if settings.RELOAD_CONFIG:
thread.start_new_thread(reloader_thread, ())
def shutdown():
if settings.RELOADER_RUNNING:
RUN_RELOADER = False
settings.RELOADER_RUNNING = False
if NOTIFIER:
NOTIFIER.stop()

View file

@ -11,8 +11,6 @@ def run(cmd):
stdout, stderr = p.communicate()
if p.returncode != 0:
print('failed to run:', cmd)
print(stdout)
print(stderr)
sys.exit(1)

View file

@ -1,23 +0,0 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='page',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='settings',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -1,34 +0,0 @@
import unicodedata
from django.contrib.auth import get_user_model
import mozilla_django_oidc.auth
from user.utils import prepare_user
User = get_user_model()
class OIDCAuthenticationBackend(mozilla_django_oidc.auth.OIDCAuthenticationBackend):
def create_user(self, claims):
user = super(OIDCAuthenticationBackend, self).create_user(claims)
username = claims.get("preferred_username")
n = 1
if username and username != user.username:
uname = username
while User.objects.filter(username=uname).exclude(id=user.id).exists():
n += 1
uname = '%s (%s)' % (username, n)
user.username = uname
user.save()
prepare_user(user)
return user
def update_user(self, user, claims):
print("update user", user, claims)
#user.save()
return user
def generate_username(email):
return unicodedata.normalize('NFKC', email)[:150]

View file

@ -2,16 +2,13 @@
import datetime
from app.celery import app
from celery.task import periodic_task
from celery.schedules import crontab
@app.task(queue='encoding')
@periodic_task(run_every=crontab(hour=6, minute=0), queue='encoding')
def cron(**kwargs):
from django.db import transaction
from django.contrib.sessions.models import Session
Session.objects.filter(expire_date__lt=datetime.datetime.now()).delete()
transaction.commit()
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(crontab(hour=6, minute=0), cron.s())

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from datetime import datetime
import base64
import copy
from datetime import datetime
from django.shortcuts import render, redirect
from django.conf import settings
@ -53,18 +53,17 @@ def embed(request, id):
})
def redirect_url(request, url):
try:
url = base64.decodebytes(url.encode()).decode()
except:
pass
if request.META['QUERY_STRING']:
url += "?" + request.META['QUERY_STRING']
if settings.CONFIG['site'].get('sendReferrer', False):
return redirect(url)
else:
return HttpResponse('<script>document.location.href=%s;</script>' % json.dumps(url))
return HttpResponse('<script>document.location.href=%s;</script>'%json.dumps(url))
def opensearch_xml(request):
osd = ET.Element('OpenSearchDescription')
osd.attrib['xmlns'] = "http://a9.com/-/spec/opensearch/1.1/"
osd.attrib['xmlns']="http://a9.com/-/spec/opensearch/1.1/"
e = ET.SubElement(osd, 'ShortName')
e.text = settings.SITENAME
e = ET.SubElement(osd, 'Description')
@ -163,7 +162,7 @@ def init(request, data):
del config['keys']
if 'HTTP_ACCEPT_LANGUAGE' in request.META:
response['data']['locale'] = request.META['HTTP_ACCEPT_LANGUAGE'].split(';')[0].split('-')[0].split(',')[0]
response['data']['locale'] = request.META['HTTP_ACCEPT_LANGUAGE'].split(';')[0].split('-')[0]
if request.META.get('HTTP_X_PREFIX') == 'NO':
config['site']['videoprefix'] = ''
@ -184,7 +183,6 @@ def init(request, data):
except:
pass
config['site']['oidc'] = bool(getattr(settings, 'OIDC_RP_CLIENT_ID', False))
response['data']['site'] = config
response['data']['user'] = init_user(request.user, request)
request.session['last_init'] = str(datetime.now())
@ -247,7 +245,7 @@ def getEmbedDefaults(request, data):
i = qs[0].cache
response['data']['item'] = i['id']
response['data']['itemDuration'] = i['duration']
response['data']['itemRatio'] = i.get('videoRatio', settings.CONFIG['video']['previewRatio'])
response['data']['itemRatio'] = i['videoRatio']
qs = List.objects.exclude(status='private').order_by('name')
if qs.exists():
i = qs[0].json()

View file

@ -1,7 +0,0 @@
from django.apps import AppConfig
class ArchiveConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'archive'

View file

@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
import json
import logging
import os
import shutil
import subprocess
import shutil
import tempfile
import os
import ox
from django.conf import settings
@ -15,9 +14,6 @@ from item.tasks import load_subtitles
from . import models
logger = logging.getLogger('pandora.' + __name__)
info_keys = [
'title',
'description',
@ -40,14 +36,8 @@ info_key_map = {
'display_id': 'id',
}
YT_DLP = ['yt-dlp']
if settings.YT_DLP_EXTRA:
YT_DLP += settings.YT_DLP_EXTRA
def get_info(url, referer=None):
cmd = YT_DLP + ['-j', '--all-subs', url]
if referer:
cmd += ['--referer', referer]
def get_info(url):
cmd = ['youtube-dl', '-j', '--all-subs', url]
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
@ -67,8 +57,6 @@ def get_info(url, referer=None):
info[-1]['tags'] = []
if 'upload_date' in i and i['upload_date']:
info[-1]['date'] = '-'.join([i['upload_date'][:4], i['upload_date'][4:6], i['upload_date'][6:]])
if 'referer' not in info[-1]:
info[-1]['referer'] = url
return info
def add_subtitles(item, media, tmp):
@ -96,18 +84,9 @@ def add_subtitles(item, media, tmp):
sub.selected = True
sub.save()
def load_formats(url):
cmd = YT_DLP + ['-q', url, '-j', '-F']
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
formats = stdout.decode().strip().split('\n')[-1]
return json.loads(formats)
def download(item_id, url, referer=None):
def download(item_id, url):
item = Item.objects.get(public_id=item_id)
info = get_info(url, referer)
info = get_info(url)
if not len(info):
return '%s contains no videos' % url
media = info[0]
@ -116,19 +95,13 @@ def download(item_id, url, referer=None):
if isinstance(tmp, bytes):
tmp = tmp.decode('utf-8')
os.chdir(tmp)
cmd = YT_DLP + ['-q', media['url']]
if referer:
cmd += ['--referer', referer]
elif 'referer' in media:
cmd += ['--referer', media['referer']]
cmd += ['-o', '%(title)80s.%(ext)s']
if settings.CONFIG['video'].get('reuseUpload', False):
cmd = ['youtube-dl', '-q', media['url']]
if settings.CONFIG['video'].get('reuseUload', False):
max_resolution = max(settings.CONFIG['video']['resolutions'])
format = settings.CONFIG['video']['formats'][0]
if format == 'mp4':
cmd += [
'-f', 'bestvideo[height<=%s][ext=mp4]+bestaudio[ext=m4a]' % max_resolution,
'-f', 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/bestvideo+bestaudio',
'--merge-output-format', 'mp4'
]
elif format == 'webm':
@ -138,50 +111,6 @@ def download(item_id, url, referer=None):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
if stderr and b'Requested format is not available.' in stderr:
formats = load_formats(url)
has_audio = bool([fmt for fmt in formats['formats'] if fmt['resolution'] == 'audio only'])
has_video = bool([fmt for fmt in formats['formats'] if 'x' in fmt['resolution']])
cmd = [
'yt-dlp', '-q', url,
'-o', '%(title)80s.%(ext)s'
]
if referer:
cmd += ['--referer', referer]
elif 'referer' in media:
cmd += ['--referer', media['referer']]
if has_video and not has_audio:
cmd += [
'-f', 'bestvideo[height<=%s][ext=mp4]' % max_resolution,
]
elif not has_video and has_audio:
cmd += [
'bestaudio[ext=m4a]'
]
else:
cmd = []
if cmd:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
if stderr and b'Requested format is not available.' in stderr:
cmd = [
'yt-dlp', '-q', url,
'-o', '%(title)80s.%(ext)s'
]
if referer:
cmd += ['--referer', referer]
elif 'referer' in media:
cmd += ['--referer', media['referer']]
if cmd:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
if stdout or stderr:
logger.error("import failed:\n%s\n%s\n%s", " ".join(cmd), stdout.decode(), stderr.decode())
parts = list(os.listdir(tmp))
if parts:
part = 1
@ -209,7 +138,6 @@ def download(item_id, url, referer=None):
f.extract_stream()
status = True
else:
logger.error("failed to import %s file already exists %s", url, oshash)
status = 'file exists'
if len(parts) == 1:
add_subtitles(f.item, media, tmp)

View file

@ -1,30 +1,26 @@
# -*- coding: utf-8 -*-
from distutils.spawn import find_executable
from glob import glob
from os.path import exists
import fractions
import logging
import math
import os
import re
import shutil
from os.path import exists
import fractions
import subprocess
import tempfile
import time
import math
import shutil
from distutils.spawn import find_executable
from glob import glob
import numpy as np
import ox
import ox.image
from ox.utils import json
from django.conf import settings
from PIL import Image, ImageOps
from PIL import Image
from .chop import Chop, make_keyframe_index
logger = logging.getLogger('pandora.' + __name__)
img_extension = 'jpg'
MAX_DISTANCE = math.sqrt(3 * pow(255, 2))
@ -61,15 +57,14 @@ def supported_formats():
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
version = stderr.split('\n')[0].split(' ')[2]
mp4 = 'libx264' in stdout and bool(re.compile('DEA.L. aac').findall(stdout))
return {
'version': version.split('.'),
'ogg': 'libtheora' in stdout and 'libvorbis' in stdout,
'webm': 'libvpx' in stdout and 'libvorbis' in stdout,
'vp8': 'libvpx' in stdout and 'libvorbis' in stdout,
'vp9': 'libvpx-vp9' in stdout and 'libopus' in stdout,
'mp4': mp4,
'h264': mp4,
'mp4': 'libx264' in stdout and 'DEA.L. aac' in stdout,
'h264': 'libx264' in stdout and 'DEA.L. aac' in stdout,
}
def stream(video, target, profile, info, audio_track=0, flags={}):
@ -150,17 +145,10 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
audioquality = -1
audiobitrate = '22k'
audiochannels = 1
elif profile == '0p':
info['video'] = []
audiorate = 48000
audioquality = 6
audiobitrate = None
audiochannels = None
audio_codec = 'libopus'
else:
height = 96
if settings.USE_VP9 and settings.FFMPEG_SUPPORTS_VP9:
if settings.FFMPEG_SUPPORTS_VP9:
audio_codec = 'libopus'
video_codec = 'libvpx-vp9'
@ -223,7 +211,7 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
bitrate = height*width*fps*bpp/1000
video_settings = trim + [
'-b:v', '%dk' % bitrate,
'-vb', '%dk' % bitrate,
'-aspect', aspect,
# '-vf', 'yadif',
'-max_muxing_queue_size', '512',
@ -251,8 +239,6 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
'-level', '4.0',
'-pix_fmt', 'yuv420p',
]
if info['video'][0].get("force_framerate"):
video_settings += ['-r:v', str(fps)]
video_settings += ['-map', '0:%s,0:0' % info['video'][0]['id']]
audio_only = False
else:
@ -292,7 +278,7 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
ac = min(ac, audiochannels)
audio_settings += ['-ac', str(ac)]
if audiobitrate:
audio_settings += ['-b:a', audiobitrate]
audio_settings += ['-ab', audiobitrate]
if format == 'mp4':
audio_settings += ['-c:a', 'aac', '-strict', '-2']
elif audio_codec == 'libopus':
@ -325,15 +311,14 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
pass1_post = post[:]
pass1_post[-1] = '/dev/null'
if format == 'webm':
if video_codec != 'libvpx-vp9':
pass1_post = ['-speed', '4'] + pass1_post
pass1_post = ['-speed', '4'] + pass1_post
post = ['-speed', '1'] + post
cmds.append(base + ['-pass', '1', '-passlogfile', '%s.log' % target]
+ video_settings + ['-an'] + pass1_post)
cmds.append(base + ['-an', '-pass', '1', '-passlogfile', '%s.log' % target]
+ video_settings + pass1_post)
cmds.append(base + ['-pass', '2', '-passlogfile', '%s.log' % target]
+ video_settings + audio_settings + post)
+ audio_settings + video_settings + post)
else:
cmds.append(base + video_settings + audio_settings + post)
cmds.append(base + audio_settings + video_settings + post)
if settings.FFMPEG_DEBUG:
print('\n'.join([' '.join(cmd) for cmd in cmds]))
@ -441,15 +426,10 @@ def frame_direct(video, target, position):
r = run_command(cmd)
return r == 0
def open_image_rgb(image_source):
source = Image.open(image_source)
source = ImageOps.exif_transpose(source)
source = source.convert('RGB')
return source
def resize_image(image_source, image_output, width=None, size=None):
if exists(image_source):
source = open_image_rgb(image_source)
source = Image.open(image_source).convert('RGB')
source_width = source.size[0]
source_height = source.size[1]
if size:
@ -470,7 +450,7 @@ def resize_image(image_source, image_output, width=None, size=None):
height = max(height, 1)
if width < source_width:
resize_method = Image.LANCZOS
resize_method = Image.ANTIALIAS
else:
resize_method = Image.BICUBIC
output = source.resize((width, height), resize_method)
@ -484,7 +464,7 @@ def timeline(video, prefix, modes=None, size=None):
size = [64, 16]
if isinstance(video, str):
video = [video]
cmd = [os.path.normpath(os.path.join(settings.BASE_DIR, '../bin/oxtimelines')),
cmd = ['../bin/oxtimelines',
'-s', ','.join(map(str, reversed(sorted(size)))),
'-m', ','.join(modes),
'-o', prefix,
@ -616,7 +596,7 @@ def timeline_strip(item, cuts, info, prefix):
print(frame, 'cut', c, 'frame', s, frame, 'width', widths[s], box)
# FIXME: why does this have to be frame+1?
frame_image = Image.open(item.frame((frame+1)/fps))
frame_image = frame_image.crop(box).resize((widths[s], timeline_height), Image.LANCZOS)
frame_image = frame_image.crop(box).resize((widths[s], timeline_height), Image.ANTIALIAS)
for x_ in range(widths[s]):
line_image.append(frame_image.crop((x_, 0, x_ + 1, timeline_height)))
frame += widths[s]
@ -744,24 +724,19 @@ def remux_stream(src, dst):
cmd = [
settings.FFMPEG,
'-nostats', '-loglevel', 'error',
'-i', src,
'-map_metadata', '-1', '-sn',
'-i', src,
] + video + [
] + audio + [
'-movflags', '+faststart',
dst
]
print(cmd)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
close_fds=True)
stdout, stderr = p.communicate()
if stderr:
logger.error("failed to remux %s %s", cmd, stderr)
return False, stderr
else:
return True, None
p.wait()
return True, None
def ffprobe(path, *args):

View file

@ -1,100 +0,0 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('archive', '0005_auto_20180804_1554'),
]
operations = [
migrations.AlterField(
model_name='file',
name='extension',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='file',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='file',
name='info',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='file',
name='language',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='file',
name='part',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='file',
name='part_title',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='file',
name='path',
field=models.CharField(default='', max_length=2048),
),
migrations.AlterField(
model_name='file',
name='sort_path',
field=models.CharField(default='', max_length=2048),
),
migrations.AlterField(
model_name='file',
name='type',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='file',
name='version',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='frame',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='instance',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='stream',
name='error',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='stream',
name='format',
field=models.CharField(default='webm', max_length=255),
),
migrations.AlterField(
model_name='stream',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='stream',
name='info',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='volume',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -1,17 +0,0 @@
# Generated by Django 4.2.3 on 2023-08-18 12:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archive', '0006_alter_file_extension_alter_file_id_alter_file_info_and_more'),
]
operations = [
migrations.AddIndex(
model_name='stream',
index=models.Index(fields=['file', 'source', 'available'], name='archive_str_file_id_69a542_idx'),
),
]

View file

@ -151,10 +151,8 @@ class File(models.Model):
self.sampleate = 0
self.channels = 0
if self.framerate and self.duration > 0:
if self.framerate:
self.pixels = int(self.width * self.height * float(utils.parse_decimal(self.framerate)) * self.duration)
else:
self.pixels = 0
def get_path_info(self):
data = {}
@ -183,13 +181,6 @@ class File(models.Model):
for type in ox.movie.EXTENSIONS:
if data['extension'] in ox.movie.EXTENSIONS[type]:
data['type'] = type
if data['extension'] == 'ogg' and self.info.get('video'):
data['type'] = 'video'
if data['type'] == 'unknown':
if self.info.get('video'):
data['type'] = 'video'
elif self.info.get('audio'):
data['type'] = 'audio'
if 'part' in data and isinstance(data['part'], int):
data['part'] = str(data['part'])
return data
@ -277,7 +268,7 @@ class File(models.Model):
if self.type not in ('audio', 'video'):
self.duration = None
elif self.id:
else:
duration = sum([s.info.get('duration', 0)
for s in self.streams.filter(source=None)])
if duration:
@ -285,7 +276,7 @@ class File(models.Model):
if self.is_subtitle:
self.available = self.data and True or False
elif self.id:
else:
self.available = not self.uploading and \
self.streams.filter(source=None, available=True).count()
super(File, self).save(*args, **kwargs)
@ -345,9 +336,7 @@ class File(models.Model):
def done_cb():
if done:
info = ox.avinfo(self.data.path)
del info['path']
self.info.update(info)
self.info.update(ox.avinfo(self.data.path))
self.parse_info()
# reject invalid uploads
if self.info.get('oshash') != self.oshash:
@ -374,8 +363,8 @@ class File(models.Model):
self.info.update(stream.info)
self.parse_info()
self.save()
#if stream.info.get('video'):
# extract.make_keyframe_index(stream.media.path)
if stream.info.get('video'):
extract.make_keyframe_index(stream.media.path)
return True, stream.media.size
return save_chunk(stream, stream.media, chunk, offset, name, done_cb)
return False, 0
@ -404,7 +393,7 @@ class File(models.Model):
config = settings.CONFIG['video']
height = self.info['video'][0]['height'] if self.info.get('video') else None
max_resolution = max(config['resolutions'])
if height and height <= max_resolution and self.extension in ('mov', 'mkv', 'mp4', 'm4v'):
if height <= max_resolution and self.extension in ('mov', 'mkv', 'mp4', 'm4v'):
vcodec = self.get_codec('video')
acodec = self.get_codec('audio')
if vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS:
@ -415,7 +404,7 @@ class File(models.Model):
config = settings.CONFIG['video']
height = self.info['video'][0]['height'] if self.info.get('video') else None
max_resolution = max(config['resolutions'])
if height and height <= max_resolution and config['formats'][0] == self.extension:
if height <= max_resolution and config['formats'][0] == self.extension:
vcodec = self.get_codec('video')
acodec = self.get_codec('audio')
if self.extension in ['mp4', 'm4v'] and vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS:
@ -492,13 +481,6 @@ class File(models.Model):
user.is_staff or \
self.item.user == user or \
self.item.groups.filter(id__in=user.groups.all()).count() > 0
if 'instances' in data and 'filename' in self.info and self.data:
data['instances'].append({
'ignore': False,
'path': self.info['filename'],
'user': self.item.user.username if self.item and self.item.user else 'system',
'volume': 'Direct Upload'
})
if not can_see_media:
if 'instances' in data:
data['instances'] = []
@ -734,9 +716,6 @@ class Stream(models.Model):
class Meta:
unique_together = ("file", "resolution", "format")
indexes = [
models.Index(fields=['file', 'source', 'available'])
]
file = models.ForeignKey(File, related_name='streams', on_delete=models.CASCADE)
resolution = models.IntegerField(default=96)
@ -816,15 +795,9 @@ class Stream(models.Model):
shutil.move(self.file.data.path, target)
self.file.data.name = ''
self.file.save()
self.available = True
self.save()
done = True
elif self.file.can_remux():
ok, error = extract.remux_stream(media, target)
if ok:
self.available = True
self.save()
done = True
done = True
if not done:
ok, error = extract.stream(media, target, self.name(), info, flags=self.flags)
@ -832,7 +805,7 @@ class Stream(models.Model):
# get current version from db and update
try:
self.refresh_from_db()
except Stream.DoesNotExist:
except archive.models.DoesNotExist:
pass
else:
self.update_status(ok, error)

View file

@ -1,9 +1,11 @@
# -*- coding: utf-8 -*-
from datetime import datetime
from time import time, monotonic
from time import time
import celery.task.control
import kombu.five
from app.celery import app
from .models import File
@ -16,7 +18,7 @@ def parse_job(job):
'file': f.oshash
}
if job['time_start']:
start_time = datetime.fromtimestamp(time() - (monotonic() - job['time_start']))
start_time = datetime.fromtimestamp(time() - (kombu.five.monotonic() - job['time_start']))
r.update({
'started': start_time,
'running': (datetime.now() - start_time).total_seconds()
@ -28,7 +30,7 @@ def parse_job(job):
def status():
status = []
encoding_jobs = ('archive.tasks.extract_stream', 'archive.tasks.process_stream')
c = app.control.inspect()
c = celery.task.control.inspect()
for job in c.active(safe=True).get('celery@pandora-encoding', []):
if job['name'] in encoding_jobs:
status.append(parse_job(job))
@ -65,7 +67,7 @@ def fill_queue():
def get_celery_worker_status():
ERROR_KEY = "ERROR"
try:
insp = app.control.inspect()
insp = celery.task.control.inspect()
d = insp.stats()
if not d:
d = {ERROR_KEY: 'No running Celery workers were found.'}

View file

@ -2,14 +2,13 @@
from glob import glob
from celery.task import task
from django.conf import settings
from django.db import transaction
from django.db.models import Q
from item.models import Item
from item.tasks import update_poster, update_timeline
from taskqueue.models import Task
from app.celery import app
from . import models
from . import extract
@ -69,7 +68,7 @@ def update_or_create_instance(volume, f):
instance.file.item.update_wanted()
return instance
@app.task(ignore_results=True, queue='default')
@task(ignore_results=True, queue='default')
def update_files(user, volume, files):
user = models.User.objects.get(username=user)
volume, created = models.Volume.objects.get_or_create(user=user, name=volume)
@ -101,7 +100,7 @@ def update_files(user, volume, files):
Task.start(i, user)
update_timeline.delay(i.public_id)
@app.task(ignore_results=True, queue='default')
@task(ignore_results=True, queue='default')
def update_info(user, info):
user = models.User.objects.get(username=user)
files = models.File.objects.filter(oshash__in=list(info))
@ -115,7 +114,7 @@ def update_info(user, info):
Task.start(i, user)
update_timeline.delay(i.public_id)
@app.task(queue="encoding")
@task(queue="encoding")
def process_stream(fileId):
'''
process uploaded stream
@ -141,7 +140,7 @@ def process_stream(fileId):
Task.finish(file.item)
return True
@app.task(queue="encoding")
@task(queue="encoding")
def extract_stream(fileId):
'''
extract stream from direct upload
@ -170,7 +169,7 @@ def extract_stream(fileId):
models.File.objects.filter(id=fileId).update(encoding=False)
Task.finish(file.item)
@app.task(queue="encoding")
@task(queue="encoding")
def extract_derivatives(fileId, rebuild=False):
file = models.File.objects.get(id=fileId)
streams = file.streams.filter(source=None)
@ -178,7 +177,7 @@ def extract_derivatives(fileId, rebuild=False):
streams[0].extract_derivatives(rebuild)
return True
@app.task(queue="encoding")
@task(queue="encoding")
def update_stream(id):
s = models.Stream.objects.get(pk=id)
if not glob("%s*" % s.timeline_prefix):
@ -200,11 +199,11 @@ def update_stream(id):
c.update_calculated_values()
c.save()
@app.task(queue="encoding")
def download_media(item_id, url, referer=None):
return external.download(item_id, url, referer)
@task(queue="encoding")
def download_media(item_id, url):
return external.download(item_id, url)
@app.task(queue='default')
@task(queue='default')
def move_media(data, user):
from changelog.models import add_changelog
from item.models import get_item, Item, ItemSort
@ -249,8 +248,7 @@ def move_media(data, user):
if old_item and old_item.files.count() == 0 and i.files.count() == len(data['ids']):
for a in old_item.annotations.all().order_by('id'):
a.item = i
with transaction.atomic():
a.set_public_id()
a.set_public_id()
Annotation.objects.filter(id=a.id).update(item=i, public_id=a.public_id)
old_item.clips.all().update(item=i, sort=i.sort)

View file

@ -103,7 +103,7 @@ def update(request, data):
file__available=False,
file__wanted=True)]
if utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True):
if list(filter(lambda l: l['id'] == 'subtitles', settings.CONFIG['layers'])):
qs = files.filter(
file__is_subtitle=True,
file__available=False
@ -195,9 +195,7 @@ def addMedia(request, data):
response['data']['item'] = f.item.public_id
response['data']['itemUrl'] = request.build_absolute_uri('/%s' % f.item.public_id)
if not f.available:
changelog_data = data.copy()
changelog_data['oshash'] = oshash
add_changelog(request, changelog_data, f.item.public_id)
add_changelog(request, data, f.item.public_id)
else:
if 'item' in data:
i = Item.objects.get(public_id=data['item'])
@ -222,15 +220,11 @@ def addMedia(request, data):
if 'info' in data and data['info'] and isinstance(data['info'], dict):
f.info = data['info']
f.info['extension'] = extension
if 'filename' in data:
f.info['filename'] = data['filename']
f.parse_info()
f.save()
response['data']['item'] = i.public_id
response['data']['itemUrl'] = request.build_absolute_uri('/%s' % i.public_id)
changelog_data = data.copy()
changelog_data['oshash'] = oshash
add_changelog(request, changelog_data, i.public_id)
add_changelog(request, data, i.public_id)
return render_to_json_response(response)
actions.register(addMedia, cache=False)
@ -745,7 +739,6 @@ def addMediaUrl(request, data):
takes {
url: string, // url
referer: string // optional referer url
item: string // item
}
returns {
@ -758,7 +751,7 @@ def addMediaUrl(request, data):
response = json_response()
i = Item.objects.get(public_id=data['item'])
Task.start(i, request.user)
t = tasks.download_media.delay(data['item'], data['url'], data.get('referer'))
t = tasks.download_media.delay(data['item'], data['url'])
response['data']['taskId'] = t.task_id
add_changelog(request, data, data['item'])
return render_to_json_response(response)

View file

@ -1,7 +0,0 @@
from django.apps import AppConfig
class ChangelogConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'changelog'

View file

@ -1,35 +0,0 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('changelog', '0002_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='changelog',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='changelog',
name='value',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='log',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='log',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -1,7 +0,0 @@
from django.apps import AppConfig
class ClipConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'clip'

View file

@ -17,7 +17,6 @@ keymap = {
'place': 'annotations__places__id',
'text': 'findvalue',
'annotations': 'findvalue',
'layer': 'annotations__layer',
'user': 'annotations__user__username',
}
case_insensitive_keys = ('annotations__user__username', )

View file

@ -1,18 +0,0 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clip', '0003_auto_20160219_1805'),
]
operations = [
migrations.AlterField(
model_name='clip',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -8,7 +8,6 @@ import ox
from archive import extract
from . import managers
from .utils import add_cuts
def get_layers(item, interval=None, user=None):
@ -60,7 +59,9 @@ class MetaClip(object):
self.hue = self.saturation = self.lightness = 0
self.volume = 0
def update_findvalue(self):
def save(self, *args, **kwargs):
if self.duration != self.end - self.start:
self.update_calculated_values()
if not self.aspect_ratio and self.item:
streams = self.item.streams()
if streams:
@ -88,11 +89,6 @@ class MetaClip(object):
self.findvalue = '\n'.join(list(filter(None, [a.findvalue for a in anns])))
for l in [k['id'] for k in settings.CONFIG['layers']]:
setattr(self, l, l in anns_by_layer and bool(len(anns_by_layer[l])))
def save(self, *args, **kwargs):
if self.duration != self.end - self.start:
self.update_calculated_values()
self.update_findvalue()
models.Model.save(self, *args, **kwargs)
clip_keys = ('id', 'in', 'out', 'position', 'created', 'modified',
@ -115,7 +111,8 @@ class MetaClip(object):
del j[key]
#needed here to make item find with clips work
if 'annotations' in keys:
annotations = self.annotations.all().exclude(value='')
#annotations = self.annotations.filter(layer__in=settings.CONFIG['clipLayers'])
annotations = self.annotations.all()
if qs:
for q in qs:
annotations = annotations.filter(q)
@ -153,12 +150,12 @@ class MetaClip(object):
data['annotation'] = qs[0].public_id
data['parts'] = self.item.cache['parts']
data['durations'] = self.item.cache['durations']
for key in settings.CONFIG['itemTitleKeys'] + ['videoRatio']:
for key in ('title', 'director', 'year', 'videoRatio'):
value = self.item.cache.get(key)
if value:
data[key] = value
data['duration'] = data['out'] - data['in']
add_cuts(data, self.item, self.start, self.end)
data['cuts'] = tuple([c for c in self.item.get('cuts', []) if c > self.start and c < self.end])
data['layers'] = self.get_layers(user)
data['streams'] = [s.file.oshash for s in self.item.streams()]
return data
@ -189,7 +186,6 @@ class MetaClip(object):
def __str__(self):
return self.public_id
class Meta:
unique_together = ("item", "start", "end")

View file

@ -1,22 +0,0 @@
def add_cuts(data, item, start, end):
cuts = []
last = False
outer = []
first = 0
for cut in item.get('cuts', []):
if cut > start and cut < end:
if not cuts:
outer.append(first)
cuts.append(cut)
last = True
elif cut <= start:
first = cut
elif cut >= end:
if not len(outer):
outer.append(first)
if len(outer) == 1:
outer.append(cut)
data['cuts'] = tuple(cuts)
data['outerCuts'] = tuple(outer)

View file

@ -1009,7 +1009,7 @@
{
"id": "tags",
"title": "Tags",
"canAddAnnotations": {"member": true, "friend": true, "staff": true, "admin": true},
"canAddAnnotations": {"member": true, "staff": true, "admin": true},
"item": "Tag",
"autocomplete": true,
"overlap": true,
@ -1399,8 +1399,10 @@
corner of the screen
"resolutions": List of video resolutions. Supported values are 96, 144,
240, 288, 360, 432, 480, 720 and 1080.
"torrent": If true, video downloads are offered via BitTorrent
*/
"video": {
"torrent": false,
"formats": ["webm", "mp4"],
// fixme: this should be named "ratio" or "defaultRatio",
// as it also applies to clip lists (on load)

View file

@ -73,14 +73,13 @@
"canSeeAccessed": {"researcher": true, "staff": true, "admin": true},
"canSeeAllTasks": {"staff": true, "admin": true},
"canSeeDebugMenu": {"researcher": true, "staff": true, "admin": true},
"canSeeDocument": {"guest": 1, "member": 1, "researcher": 2, "staff": 3, "admin": 3},
"canSeeExtraItemViews": {"researcher": true, "staff": true, "admin": true},
"canSeeItem": {"guest": 2, "member": 2, "researcher": 2, "staff": 3, "admin": 3},
"canSeeMedia": {"researcher": true, "staff": true, "admin": true},
"canSeeDocument": {"guest": 1, "member": 1, "researcher": 2, "staff": 3, "admin": 3},
"canSeeItem": {"guest": 2, "member": 2, "researcher": 2, "staff": 3, "admin": 3},
"canSeeSize": {"researcher": true, "staff": true, "admin": true},
"canSeeSoftwareVersion": {"researcher": true, "staff": true, "admin": true},
"canSendMail": {"staff": true, "admin": true},
"canShare": {"staff": true, "admin": true}
"canSendMail": {"staff": true, "admin": true}
},
/*
"clipKeys" are the properties that clips can be sorted by (the values are
@ -313,14 +312,6 @@
"autocomplete": true,
"columnWidth": 128
},
{
"id": "fulltext",
"operator": "+",
"title": "Fulltext",
"type": "text",
"fulltext": true,
"find": true
},
{
"id": "created",
"operator": "-",
@ -1503,7 +1494,6 @@
"hasEvents": true,
"hasPlaces": true,
"item": "Keyword",
"autocomplete": true,
"overlap": true,
"type": "string"
},
@ -1885,8 +1875,10 @@
corner of the screen
"resolutions": List of video resolutions. Supported values are 96, 144,
240, 288, 360, 432, 480, 720 and 1080.
"torrent": If true, video downloads are offered via BitTorrent
*/
"video": {
"torrent": false,
"formats": ["webm", "mp4"],
"previewRatio": 1.375,
"resolutions": [240, 480]

View file

@ -71,14 +71,13 @@
"canSeeAccessed": {"staff": true, "admin": true},
"canSeeAllTasks": {"staff": true, "admin": true},
"canSeeDebugMenu": {"staff": true, "admin": true},
"canSeeDocument": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
"canSeeExtraItemViews": {"staff": true, "admin": true},
"canSeeItem": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
"canSeeMedia": {"staff": true, "admin": true},
"canSeeDocument": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
"canSeeItem": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
"canSeeSize": {"staff": true, "admin": true},
"canSeeSoftwareVersion": {"staff": true, "admin": true},
"canSendMail": {"staff": true, "admin": true},
"canShare": {"staff": true, "admin": true}
"canSendMail": {"staff": true, "admin": true}
},
/*
"clipKeys" are the properties that clips can be sorted by (the values are
@ -247,28 +246,6 @@
"filter": true,
"find": true
},
{
"id": "source",
"title": "Source",
"type": "string",
"autocomplete": true,
"description": true,
"columnWidth": 180,
"filter": true,
"find": true,
"sort": true
},
{
"id": "project",
"title": "Project",
"type": "string",
"autocomplete": true,
"description": true,
"columnWidth": 120,
"filter": true,
"find": true,
"sort": true
},
{
"id": "id",
"operator": "+",
@ -314,24 +291,6 @@
"sort": true,
"columnWidth": 256
},
{
"id": "content",
"operator": "+",
"title": "Content",
"type": "text",
"find": true,
"sort": true,
"columnWidth": 256
},
{
"id": "translation",
"operator": "+",
"title": "Translation",
"type": "text",
"find": true,
"sort": true,
"columnWidth": 256
},
{
"id": "matches",
"operator": "-",
@ -351,20 +310,6 @@
"autocomplete": true,
"columnWidth": 128
},
{
"id": "notes",
"title": "Notes",
"type": "text",
"capability": "canEditMetadata"
},
{
"id": "fulltext",
"operator": "+",
"title": "Fulltext",
"type": "text",
"fulltext": true,
"find": true
},
{
"id": "created",
"operator": "-",
@ -600,6 +545,7 @@
"title": "Director",
"type": ["string"],
"autocomplete": true,
"columnRequired": true,
"columnWidth": 180,
"sort": true,
"sortType": "person"
@ -618,6 +564,7 @@
"title": "Featuring",
"type": ["string"],
"autocomplete": true,
"columnRequired": true,
"columnWidth": 180,
"filter": true,
"sort": true,
@ -673,7 +620,7 @@
{
"id": "annotations",
"title": "Annotations",
"type": "text", // fixme: not the best type for this magic key
"type": "string", // fixme: not the best type for this magic key
"find": true
},
{
@ -711,7 +658,7 @@
},
{
"id": "numberofannotations",
"title": "Number of Annotations",
"title": "Annotations",
"type": "integer",
"columnWidth": 60,
"sort": true
@ -847,16 +794,12 @@
"id": "user",
"title": "User",
"type": "string",
"columnWidth": 90,
"capability": "canSeeMedia",
"sort": true,
"find": true
},
{
"id": "groups",
"title": "Group",
"columnWidth": 90,
"sort": true,
"type": ["string"]
},
{
@ -1389,8 +1332,10 @@
corner of the screen
"resolutions": List of video resolutions. Supported values are 96, 144,
240, 288, 360, 432, 480, 720 and 1080.
"torrent": If true, video downloads are offered via BitTorrent
*/
"video": {
"torrent": true,
"formats": ["webm", "mp4"],
"previewRatio": 1.3333333333,
//supported resolutions are

View file

@ -29,7 +29,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"text": Text shown on mouseover
*/
"cantPlay": {
"icon": "NoCopyright",
"icon": "noCopyright",
"link": "",
"text": ""
},
@ -67,7 +67,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"canManageEntities": {"member": true, "staff": true, "admin": true},
"canManageHome": {"staff": true, "admin": true},
"canManagePlacesAndEvents": {"member": true, "staff": true, "admin": true},
"canManageTitlesAndNames": {"member": false, "staff": true, "admin": true},
"canManageTitlesAndNames": {"member": true, "staff": true, "admin": true},
"canManageTranslations": {"admin": true},
"canManageUsers": {"staff": true, "admin": true},
"canPlayClips": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
@ -102,7 +102,8 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
],
/*
"clipLayers" is the ordered list of public layers that will appear as the
text of clips (in grid view, below the icon).
text of clips (in grid view, below the icon). Excluding a layer from this
list means it will not be included in find annotations.
*/
"clipLayers": ["publicnotes", "keywords", "subtitles"],
/*
@ -350,11 +351,11 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"type": "enum",
"columnWidth": 90,
"format": {"type": "ColorLevel", "args": [
["Public", "Restricted", "Private"]
["Public", "Out of Copyright", "Under Copyright", "Private"]
]},
"sort": true,
"sortOperator": "+",
"values": ["Public", "Restricted", "Private", "Unknown"]
"values": ["Public", "Out of Copyright", "Under Copyright", "Private", "Unknown"]
}
],
/*
@ -752,13 +753,6 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"capability": "canSeeMedia",
"find": true
},
{
"id": "filename",
"title": "Filename",
"type": ["string"],
"capability": "canSeeMedia",
"find": true
},
{
"id": "created",
"title": "Date Created",
@ -1165,11 +1159,6 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"findDocuments": {"conditions": [], "operator": "&"},
"followPlayer": true,
"help": "",
"hidden": {
"collections": [],
"edits": [],
"lists": []
},
"icons": "posters",
"infoIconSize": 256,
"item": "",
@ -1278,11 +1267,13 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
corner of the screen
"resolutions": List of video resolutions. Supported values are 96, 144,
240, 288, 360, 432, 480, 720 and 1080.
"torrent": If true, video downloads are offered via BitTorrent
*/
"video": {
"downloadFormat": "webm",
"formats": ["webm", "mp4"],
"previewRatio": 1.3333333333,
"resolutions": [240, 480]
"resolutions": [240, 480],
"torrent": false
}
}

View file

@ -1,7 +0,0 @@
from django.apps import AppConfig
class DocumentConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'document'

View file

@ -1,37 +1,14 @@
import logging
import os
import subprocess
import tempfile
from django.conf import settings
logger = logging.getLogger('pandora.' + __name__)
def extract_text(pdf, page=None):
if page is not None:
page = str(page)
cmd = ['pdftotext', '-f', page, '-l', page, pdf, '-']
else:
cmd = ['pdftotext', pdf, '-']
def extract_text(pdf):
cmd = ['pdftotext', pdf, '-']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout.decode().strip()
if not stdout:
if page:
# split page from pdf and ocr
fd, page_pdf = tempfile.mkstemp('.pdf')
cmd = ['pdfseparate', '-f', page, '-l', page, pdf, page_pdf]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
text = ocr_image(page_pdf)
os.unlink(page_pdf)
os.close(fd)
return text
else:
return ocr_image(pdf)
return stdout
stdout = stdout.decode()
return stdout.strip()
def ocr_image(path):
cmd = ['tesseract', path, '-', 'txt']
@ -66,11 +43,9 @@ class FulltextMixin:
if self.has_fulltext_key():
from elasticsearch.exceptions import NotFoundError
try:
res = self.elasticsearch().delete(index=self._ES_INDEX, id=self.id)
res = self.elasticsearch().delete(index=self._ES_INDEX, doc_type='document', id=self.id)
except NotFoundError:
pass
except:
logger.error('failed to delete fulltext document', exc_info=True)
def update_fulltext(self):
if self.has_fulltext_key():
@ -79,7 +54,7 @@ class FulltextMixin:
doc = {
'text': text.lower()
}
res = self.elasticsearch().index(index=self._ES_INDEX, id=self.id, body=doc)
res = self.elasticsearch().index(index=self._ES_INDEX, doc_type='document', id=self.id, body=doc)
@classmethod
def find_fulltext(cls, query):
@ -120,69 +95,3 @@ class FulltextMixin:
ids += [int(r['_id']) for r in res['hits']['hits']]
from_ += len(res['hits']['hits'])
return ids
def highlight_page(self, page, query, size):
import pypdfium2 as pdfium
from PIL import Image
from PIL import ImageDraw
pdfpath = self.file.path
pagenumber = int(page) - 1
jpg = tempfile.NamedTemporaryFile(suffix='.jpg')
output = jpg.name
TINT_COLOR = (255, 255, 0)
TRANSPARENCY = .45
OPACITY = int(255 * TRANSPARENCY)
scale = 150/72
pdf = pdfium.PdfDocument(pdfpath)
page = pdf[pagenumber]
bitmap = page.render(scale=scale, rotation=0)
img = bitmap.to_pil().convert('RGBA')
overlay = Image.new('RGBA', img.size, TINT_COLOR+(0,))
draw = ImageDraw.Draw(overlay)
textpage = page.get_textpage()
search = textpage.search(query)
result = search.get_next()
while result:
pos, steps = result
steps += 1
while steps:
box = textpage.get_charbox(pos)
box = [b*scale for b in box]
tl = (box[0], img.size[1] - box[3])
br = (box[2], img.size[1] - box[1])
draw.rectangle((tl, br), fill=TINT_COLOR+(OPACITY,))
pos += 1
steps -= 1
result = search.get_next()
img = Image.alpha_composite(img, overlay)
img = img.convert("RGB")
aspect = img.size[0] / img.size[1]
resize_method = Image.LANCZOS
if img.size[0] >= img.size[1]:
width = size
height = int(size / aspect)
else:
width = int(size / aspect)
height = size
img = img.resize((width, height), resize_method)
img.save(output, quality=72)
return jpg
class FulltextPageMixin(FulltextMixin):
_ES_INDEX = "document-page-index"
def extract_fulltext(self):
if self.document.file:
if self.document.extension == 'pdf':
return extract_text(self.document.file.path, self.page)
elif self.extension in ('png', 'jpg'):
return ocr_image(self.document.file.path)
elif self.extension == 'html':
# FIXME: is there a nice way to split that into pages
return self.data.get('text', '')
return ''

View file

@ -5,6 +5,7 @@ from django.db import connection, transaction
from django.db.models import fields
from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch
from ... import models

View file

@ -5,6 +5,7 @@ from django.db import connection, transaction
from django.db.models import fields
from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch

View file

@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
from datetime import datetime
import unicodedata
from django.db.models import Q, Manager
@ -15,7 +14,6 @@ from documentcollection.models import Collection
from item import utils
from user.models import Group
from .pages import PageManager
keymap = {
'item': 'items__public_id',
@ -63,7 +61,7 @@ def parseCondition(condition, user, item=None, owner=None):
def buildCondition(k, op, v, user, exclude=False, owner=None):
import entity.models
from .. import models
from . import models
# fixme: frontend should never call with list
if k == 'list':
@ -299,8 +297,5 @@ class DocumentManager(Manager):
q |= Q(groups__in=user.groups.all())
rendered_q |= Q(groups__in=user.groups.all())
qs = qs.filter(q)
max_level = len(settings.CONFIG['documentRightsLevels'])
qs = qs.filter(rightslevel__lte=max_level)
return qs

View file

@ -1,302 +0,0 @@
# -*- coding: utf-8 -*-
from datetime import datetime
import unicodedata
from six import string_types
from django.db.models import Q, Manager
from django.conf import settings
import ox
from oxdjango.query import QuerySet
import entity.managers
from oxdjango.managers import get_operator
from documentcollection.models import Collection
from item import utils
from user.models import Group
keymap = {
'item': 'items__public_id',
}
default_key = 'title'
def get_key_type(k):
key_type = (utils.get_by_id(settings.CONFIG['documentKeys'], k) or {'type': 'string'}).get('type')
if isinstance(key_type, list):
key_type = key_type[0]
key_type = {
'title': 'string',
'person': 'string',
'text': 'string',
'year': 'string',
'length': 'string',
'layer': 'string',
'list': 'list',
}.get(key_type, key_type)
return key_type
def parseCondition(condition, user, item=None, owner=None):
'''
'''
k = condition.get('key', default_key)
k = keymap.get(k, k)
if not k:
k = default_key
if item and k == 'description':
item_conditions = condition.copy()
item_conditions['key'] = 'items__itemproperties__description'
return parseCondition(condition, user) | parseCondition(item_conditions, user)
v = condition['value']
op = condition.get('operator')
if not op:
op = '='
if op.startswith('!'):
return buildCondition(k, op[1:], v, user, True, owner=owner)
else:
return buildCondition(k, op, v, user, owner=owner)
def buildCondition(k, op, v, user, exclude=False, owner=None):
import entity.models
from .. import models
# fixme: frontend should never call with list
if k == 'list':
print('fixme: frontend should never call with list', k, op, v)
k = 'collection'
key_type = get_key_type(k)
key_config = (utils.get_by_id(settings.CONFIG['documentKeys'], k) or {'type': 'string'})
facet_keys = models.Document.facet_keys
if k == 'document':
k = 'document__id'
if op == '&' and isinstance(v, list):
v = [ox.fromAZ(id_) for id_ in v]
k += get_operator(op)
else:
v = ox.fromAZ(v)
q = Q(**{k: v})
if exclude:
q = ~Q(document__id__in=models.Document.objects.filter(q))
return q
elif k == 'rightslevel':
q = Q(document__rightslevel=v)
if exclude:
q = ~Q(document__rightslevel=v)
return q
elif k == 'groups':
if op == '==' and v == '$my':
if not owner:
owner = user
groups = owner.groups.all()
else:
key = 'name' + get_operator(op)
groups = Group.objects.filter(**{key: v})
if not groups.count():
return Q(id=0)
q = Q(document__groups__in=groups)
if exclude:
q = ~q
return q
elif k in ('oshash', 'items__public_id'):
q = Q(**{k: v})
if exclude:
q = ~Q(id__in=models.Document.objects.filter(q))
return q
elif isinstance(v, bool):
key = k
elif k == 'entity':
entity_key, entity_v = entity.managers.namePredicate(op, v)
key = 'id__in'
v = entity.models.DocumentProperties.objects.filter(**{
'entity__' + entity_key: entity_v
}).values_list('document_id', flat=True)
elif k == 'collection':
q = Q(id=0)
l = v.split(":", 1)
if len(l) >= 2:
lqs = list(Collection.objects.filter(name=l[1], user__username=l[0]))
if len(lqs) == 1 and lqs[0].accessible(user):
l = lqs[0]
if l.query.get('static', False) is False:
data = l.query
q = parseConditions(data.get('conditions', []),
data.get('operator', '&'),
user, owner=l.user)
else:
q = Q(id__in=l.documents.all())
else:
q = Q(id=0)
return q
elif key_config.get('fulltext'):
qs = models.Page.find_fulltext_ids(v)
q = Q(id__in=qs)
if exclude:
q = ~Q(id__in=qs)
return q
elif key_type == 'boolean':
q = Q(**{'find__key': k, 'find__value': v})
if exclude:
q = ~Q(id__in=models.Document.objects.filter(q))
return q
elif key_type == "string":
in_find = True
if in_find:
value_key = 'find__value'
else:
value_key = k
if isinstance(v, string_types):
v = unicodedata.normalize('NFKD', v).lower()
if k in facet_keys:
in_find = False
facet_value = 'facets__value' + get_operator(op, 'istr')
v = models.Document.objects.filter(**{'facets__key': k, facet_value: v})
value_key = 'id__in'
else:
value_key = value_key + get_operator(op)
k = str(k)
value_key = str(value_key)
if k == '*':
q = Q(**{'find__value' + get_operator(op): v}) | \
Q(**{'facets__value' + get_operator(op, 'istr'): v})
elif in_find:
q = Q(**{'find__key': k, value_key: v})
else:
q = Q(**{value_key: v})
if exclude:
q = ~Q(id__in=models.Document.objects.filter(q))
return q
elif key_type == 'date':
def parse_date(d):
while len(d) < 3:
d.append(1)
return datetime(*[int(i) for i in d])
#using sort here since find only contains strings
v = parse_date(v.split('-'))
vk = 'sort__%s%s' % (k, get_operator(op, 'int'))
vk = str(vk)
q = Q(**{vk: v})
if exclude:
q = ~q
return q
else: # integer, float, list, time
#use sort table here
if key_type == 'time':
v = int(utils.parse_time(v))
vk = 'sort__%s%s' % (k, get_operator(op, 'int'))
vk = str(vk)
q = Q(**{vk: v})
if exclude:
q = ~q
return q
key = str(key)
q = Q(**{key: v})
if exclude:
q = ~q
return q
def parseConditions(conditions, operator, user, item=None, owner=None):
'''
conditions: [
{
value: "war"
}
{
key: "year",
value: "1970-1980,
operator: "!="
},
{
key: "country",
value: "f",
operator: "^"
}
],
operator: "&"
'''
conn = []
for condition in conditions:
if 'conditions' in condition:
q = parseConditions(condition['conditions'],
condition.get('operator', '&'), user, item, owner=owner)
if q:
conn.append(q)
pass
else:
conn.append(parseCondition(condition, user, item, owner=owner))
if conn:
q = conn[0]
for c in conn[1:]:
if operator == '|':
q = q | c
else:
q = q & c
return q
return None
class PageManager(Manager):
def get_query_set(self):
return QuerySet(self.model)
def find(self, data, user, item=None):
'''
query: {
conditions: [
{
value: "war"
}
{
key: "year",
value: "1970-1980,
operator: "!="
},
{
key: "country",
value: "f",
operator: "^"
}
],
operator: "&"
}
'''
#join query with operator
qs = self.get_query_set()
query = data.get('query', {})
conditions = parseConditions(query.get('conditions', []),
query.get('operator', '&'),
user, item)
if conditions:
qs = qs.filter(conditions)
qs = qs.distinct()
#anonymous can only see public items
if not user or user.is_anonymous:
level = 'guest'
allowed_level = settings.CONFIG['capabilities']['canSeeDocument'][level]
qs = qs.filter(document__rightslevel__lte=allowed_level)
rendered_q = Q(rendered=True)
#users can see public items, there own items and items of there groups
else:
level = user.profile.get_level()
allowed_level = settings.CONFIG['capabilities']['canSeeDocument'][level]
q = Q(document__rightslevel__lte=allowed_level) | Q(document__user=user)
rendered_q = Q(rendered=True) | Q(document__user=user)
if user.groups.count():
q |= Q(document__groups__in=user.groups.all())
rendered_q |= Q(document__groups__in=user.groups.all())
qs = qs.filter(q)
return qs

View file

@ -1,35 +0,0 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2020-05-13 00:01
from __future__ import unicode_literals
import django.core.serializers.json
from django.db import migrations, models
import django.db.models.deletion
import document.fulltext
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('document', '0011_jsonfield'),
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('page', models.IntegerField(default=1)),
('data', oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder)),
],
bases=(models.Model, document.fulltext.FulltextPageMixin),
),
migrations.AddField(
model_name='page',
name='document',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pages_set', to='document.Document'),
),
]

View file

@ -1,55 +0,0 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('document', '0012_auto_20200513_0001'),
]
operations = [
migrations.AlterField(
model_name='access',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='document',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='document',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='facet',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='find',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='itemproperties',
name='description',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='itemproperties',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='page',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -6,12 +6,11 @@ import os
import re
import unicodedata
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models, transaction
from django.db.models import Q, Sum, Max
from django.contrib.auth import get_user_model
from django.db.models.signals import pre_delete
from django.utils import datetime_safe
from django.conf import settings
from oxdjango.fields import JSONField
from PIL import Image
@ -22,7 +21,7 @@ from oxdjango.sortmodel import get_sort_field
from person.models import get_name_sort
from item.models import Item
from annotation.models import Annotation
from archive.extract import resize_image, open_image_rgb
from archive.extract import resize_image
from archive.chunk import save_chunk
from user.models import Group
from user.utils import update_groups
@ -30,7 +29,7 @@ from user.utils import update_groups
from . import managers
from . import utils
from . import tasks
from .fulltext import FulltextMixin, FulltextPageMixin
from .fulltext import FulltextMixin
User = get_user_model()
@ -80,7 +79,7 @@ class Document(models.Model, FulltextMixin):
current_values = []
for k in settings.CONFIG['documentKeys']:
if k.get('sortType') == 'person':
current_values += self.get_value(k['id'], [])
current_values += self.get(k['id'], [])
if not isinstance(current_values, list):
if not current_values:
current_values = []
@ -328,9 +327,6 @@ class Document(models.Model, FulltextMixin):
def editable(self, user, item=None):
if not user or user.is_anonymous:
return False
max_level = len(settings.CONFIG['rightsLevels'])
if self.rightslevel > max_level:
return False
if self.user == user or \
self.groups.filter(id__in=user.groups.all()).count() > 0 or \
user.is_staff or \
@ -350,8 +346,6 @@ class Document(models.Model, FulltextMixin):
groups = data.pop('groups')
update_groups(self, groups)
for key in data:
if key == "id":
continue
k = list(filter(lambda i: i['id'] == key, settings.CONFIG['documentKeys']))
ktype = k and k[0].get('type') or ''
if key == 'text' and self.extension == 'html':
@ -552,10 +546,10 @@ class Document(models.Model, FulltextMixin):
if len(crop) == 4:
path = os.path.join(folder, '%dp%d,%s.jpg' % (1024, page, ','.join(map(str, crop))))
if not os.path.exists(path):
img = open_image_rgb(src).crop(crop)
img = Image.open(src).crop(crop)
img.save(path)
else:
img = open_image_rgb(path)
img = Image.open(path)
src = path
if size < max(img.size):
path = os.path.join(folder, '%dp%d,%s.jpg' % (size, page, ','.join(map(str, crop))))
@ -568,10 +562,10 @@ class Document(models.Model, FulltextMixin):
if len(crop) == 4:
path = os.path.join(folder, '%s.jpg' % ','.join(map(str, crop)))
if not os.path.exists(path):
img = open_image_rgb(src).convert('RGB').crop(crop)
img = Image.open(src).crop(crop)
img.save(path)
else:
img = open_image_rgb(path)
img = Image.open(path)
src = path
if size < max(img.size):
path = os.path.join(folder, '%sp%s.jpg' % (size, ','.join(map(str, crop))))
@ -580,7 +574,7 @@ class Document(models.Model, FulltextMixin):
if os.path.exists(src) and not os.path.exists(path):
image_size = max(self.width, self.height)
if image_size == -1:
image_size = max(*open_image_rgb(src).size)
image_size = max(*Image.open(src).size)
if size > image_size:
path = src
else:
@ -592,11 +586,6 @@ class Document(models.Model, FulltextMixin):
image = os.path.join(os.path.dirname(pdf), '1024p%d.jpg' % page)
utils.extract_pdfpage(pdf, image, page)
def create_pages(self):
for page in range(self.pages):
page += 1
p, c = Page.objects.get_or_create(document=self, page=page)
def get_info(self):
if self.extension == 'pdf':
self.thumbnail(1024)
@ -606,7 +595,7 @@ class Document(models.Model, FulltextMixin):
self.pages = utils.pdfpages(self.file.path)
elif self.width == -1:
self.pages = -1
self.width, self.height = open_image_rgb(self.file.path).size
self.width, self.height = Image.open(self.file.path).size
def get_ratio(self):
if self.extension == 'pdf':
@ -713,41 +702,6 @@ class ItemProperties(models.Model):
super(ItemProperties, self).save(*args, **kwargs)
class Page(models.Model, FulltextPageMixin):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
document = models.ForeignKey(Document, related_name='pages_set', on_delete=models.CASCADE)
page = models.IntegerField(default=1)
data = JSONField(default=dict, editable=False)
objects = managers.PageManager()
def __str__(self):
return u"%s:%s" % (self.document, self.page)
def json(self, keys=None, user=None):
data = {}
data['document'] = ox.toAZ(self.document.id)
data['page'] = self.page
data['id'] = '{document}/{page}'.format(**data)
document_keys = []
if keys:
for key in list(data):
if key not in keys:
del data[key]
for key in keys:
if 'fulltext' in key:
data['fulltext'] = self.extract_fulltext()
elif key in ('document', 'page', 'id'):
pass
else:
document_keys.append(key)
if document_keys:
data.update(self.document.json(document_keys, user))
return data
class Access(models.Model):
class Meta:
unique_together = ("document", "user")

View file

@ -1,135 +0,0 @@
# -*- coding: utf-8 -*-
import os
import re
from glob import glob
import unicodedata
import ox
from ox.utils import json
from oxdjango.api import actions
from oxdjango.decorators import login_required_json
from oxdjango.http import HttpFileResponse
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response, HttpErrorJson
from django import forms
from django.db.models import Count, Sum
from django.conf import settings
from item import utils
from item.models import Item
from itemlist.models import List
from entity.models import Entity
from archive.chunk import process_chunk
from changelog.models import add_changelog
from . import models
from . import tasks
def parse_query(data, user):
query = {}
query['range'] = [0, 100]
query['sort'] = [{'key': 'page', 'operator': '+'}, {'key': 'title', 'operator': '+'}]
for key in ('keys', 'group', 'file', 'range', 'position', 'positions', 'sort'):
if key in data:
query[key] = data[key]
query['qs'] = models.Page.objects.find(data, user)
return query
def _order_query(qs, sort):
prefix = 'document__sort__'
order_by = []
for e in sort:
operator = e['operator']
if operator != '-':
operator = ''
key = {
'index': 'document__items__itemproperties__index',
'position': 'id',
'name': 'title',
}.get(e['key'], e['key'])
if key == 'resolution':
order_by.append('%swidth' % operator)
order_by.append('%sheight' % operator)
else:
if '__' not in key and key not in ('created', 'modified', 'page'):
key = "%s%s" % (prefix, key)
order = '%s%s' % (operator, key)
order_by.append(order)
if order_by:
qs = qs.order_by(*order_by, nulls_last=True)
qs = qs.distinct()
return qs
def _order_by_group(query):
prefix = 'document__sort__'
if 'sort' in query:
op = '-' if query['sort'][0]['operator'] == '-' else ''
if len(query['sort']) == 1 and query['sort'][0]['key'] == 'items':
order_by = op + prefix + 'items'
if query['group'] == "year":
secondary = op + prefix + 'sortvalue'
order_by = (order_by, secondary)
elif query['group'] != "keyword":
order_by = (order_by, prefix + 'sortvalue')
else:
order_by = (order_by, 'value')
else:
order_by = op + prefix + 'sortvalue'
order_by = (order_by, prefix + 'items')
else:
order_by = ('-' + prefix + 'sortvalue', prefix + 'items')
return order_by
def findPages(request, data):
'''
Finds documents pages for a given query
takes {
query: object, // query object, see `find`
sort: [object], // list of sort objects, see `find`
range: [int, int], // range of results, per current sort order
keys: [string] // list of keys to return
}
returns {
items: [{ // list of pages
id: string
page: int
}]
}
'''
query = parse_query(data, request.user)
#order
qs = _order_query(query['qs'], query['sort'])
response = json_response()
if 'group' in query:
response['data']['items'] = []
items = 'items'
document_qs = query['qs']
order_by = _order_by_group(query)
qs = models.Facet.objects.filter(key=query['group']).filter(document__id__in=document_qs)
qs = qs.values('value').annotate(items=Count('id')).order_by(*order_by)
if 'positions' in query:
response['data']['positions'] = {}
ids = [j['value'] for j in qs]
response['data']['positions'] = utils.get_positions(ids, query['positions'])
elif 'range' in data:
qs = qs[query['range'][0]:query['range'][1]]
response['data']['items'] = [{'name': i['value'], 'items': i[items]} for i in qs]
else:
response['data']['items'] = qs.count()
elif 'keys' in data:
qs = qs[query['range'][0]:query['range'][1]]
response['data']['items'] = [l.json(data['keys'], request.user) for l in qs]
elif 'position' in data:
#FIXME: actually implement position requests
response['data']['position'] = 0
elif 'positions' in data:
ids = list(qs.values_list('id', flat=True))
response['data']['positions'] = utils.get_positions(ids, query['positions'], decode_id=True)
else:
response['data']['items'] = qs.count()
return render_to_json_response(response)
actions.register(findPages)

View file

@ -1,30 +1,8 @@
import ox
from app.celery import app
# -*- coding: utf-8 -*-
from celery.task import task
@app.task(queue="encoding")
@task(queue="encoding")
def extract_fulltext(id):
from . import models
d = models.Document.objects.get(id=id)
d.update_fulltext()
d.create_pages()
for page in d.pages_set.all():
page.update_fulltext()
@app.task(queue='default')
def bulk_edit(data, username):
from django.db import transaction
from . import models
from item.models import Item
user = models.User.objects.get(username=username)
item = 'item' in data and Item.objects.get(public_id=data['item']) or None
ids = data['id']
del data['id']
documents = models.Document.objects.filter(pk__in=map(ox.fromAZ, ids))
for document in documents:
if document.editable(user, item):
with transaction.atomic():
document.refresh_from_db()
document.edit(data, user, item)
document.save()
return {}

View file

@ -12,10 +12,8 @@ from oxdjango.decorators import login_required_json
from oxdjango.http import HttpFileResponse
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response, HttpErrorJson
from django import forms
from django.conf import settings
from django.db.models import Count, Sum
from django.http import HttpResponse
from django.shortcuts import render
from django.conf import settings
from item import utils
from item.models import Item
@ -25,8 +23,6 @@ from archive.chunk import process_chunk
from changelog.models import add_changelog
from . import models
from . import tasks
from . import page_views
def get_document_or_404_json(request, id):
response = {'status': {'code': 404,
@ -135,13 +131,13 @@ def editDocument(request, data):
item = 'item' in data and Item.objects.get(public_id=data['item']) or None
if data['id']:
if isinstance(data['id'], list):
add_changelog(request, data)
t = tasks.bulk_edit.delay(data, request.user.username)
response['data']['taskId'] = t.task_id
documents = models.Document.objects.filter(pk__in=map(ox.fromAZ, data['id']))
else:
document = models.Document.get(data['id'])
documents = [models.Document.get(data['id'])]
for document in documents:
if document.editable(request.user, item):
add_changelog(request, data)
if document == documents[0]:
add_changelog(request, data)
document.edit(data, request.user, item)
document.save()
response['data'] = document.json(user=request.user, item=item)
@ -383,12 +379,8 @@ def file(request, id, name=None):
def thumbnail(request, id, size=256, page=None):
size = int(size)
document = get_document_or_404_json(request, id)
if "q" in request.GET and page:
img = document.highlight_page(page, request.GET["q"], size)
return HttpResponse(img, content_type="image/jpeg")
return HttpFileResponse(document.thumbnail(size, page=page))
@login_required_json
def upload(request):
if 'id' in request.GET:
@ -513,37 +505,3 @@ def autocompleteDocuments(request, data):
response['data']['items'] = [i['value'] for i in qs]
return render_to_json_response(response)
actions.register(autocompleteDocuments)
def document(request, fragment):
context = {}
parts = fragment.split('/')
# FIXME: parse collection urls and return the right metadata for those
id = parts[0]
page = None
crop = None
if len(parts) == 2:
rect = parts[1].split(',')
if len(rect) == 1:
page = rect[0]
else:
crop = rect
try:
document = models.Document.objects.filter(id=ox.fromAZ(id)).first()
except:
document = None
if document and document.access(request.user):
context['title'] = document.data['title']
if document.data.get('description'):
context['description'] = document.data['description']
link = request.build_absolute_uri(document.get_absolute_url())
public_id = ox.toAZ(document.id)
preview = '/documents/%s/512p.jpg' % public_id
if page:
preview = '/documents/%s/512p%s.jpg' % (public_id, page)
if crop:
preview = '/documents/%s/512p%s.jpg' % (public_id, ','.join(crop))
context['preview'] = request.build_absolute_uri(preview)
context['url'] = request.build_absolute_uri('/documents/' + fragment)
context['settings'] = settings
return render(request, "document.html", context)

View file

@ -1,7 +0,0 @@
from django.apps import AppConfig
class DocumentcollectionConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'documentcollection'

View file

@ -1,61 +0,0 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import documentcollection.models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('documentcollection', '0004_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='collection',
name='description',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='collection',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='collection',
name='poster_frames',
field=oxdjango.fields.JSONField(default=list, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='collection',
name='query',
field=oxdjango.fields.JSONField(default=documentcollection.models.default_query, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='collection',
name='sort',
field=oxdjango.fields.JSONField(default=documentcollection.models.get_collectionsort, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='collection',
name='status',
field=models.CharField(default='private', max_length=20),
),
migrations.AlterField(
model_name='collection',
name='type',
field=models.CharField(default='static', max_length=255),
),
migrations.AlterField(
model_name='collectiondocument',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='position',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -34,9 +34,6 @@ def get_collectionview():
def get_collectionsort():
return tuple(settings.CONFIG['user']['ui']['collectionSort'])
def default_query():
return {"static": True}
class Collection(models.Model):
class Meta:
@ -49,7 +46,7 @@ class Collection(models.Model):
name = models.CharField(max_length=255)
status = models.CharField(max_length=20, default='private')
_status = ['private', 'public', 'featured']
query = JSONField(default=default_query, editable=False)
query = JSONField(default=lambda: {"static": True}, editable=False)
type = models.CharField(max_length=255, default='static')
description = models.TextField(default='')

View file

@ -86,11 +86,6 @@ def findCollections(request, data):
for x in data.get('query', {}).get('conditions', [])
)
is_personal = request.user.is_authenticated and any(
(x['key'] == 'user' and x['value'] == request.user.username and x['operator'] == '==')
for x in data.get('query', {}).get('conditions', [])
)
if is_section_request:
qs = query['qs']
if not is_featured and not request.user.is_anonymous:
@ -99,9 +94,6 @@ def findCollections(request, data):
else:
qs = _order_query(query['qs'], query['sort'])
if is_personal and request.user.profile.ui.get('hidden', {}).get('collections'):
qs = qs.exclude(name__in=request.user.profile.ui['hidden']['collections'])
response = json_response()
if 'keys' in data:
qs = qs[query['range'][0]:query['range'][1]]
@ -246,7 +238,7 @@ def addCollection(request, data):
'type' and 'view'.
see: editCollection, findCollections, getCollection, removeCollection, sortCollections
'''
data['name'] = re.sub(r' \[\d+\]$', '', data.get('name', 'Untitled')).strip()
data['name'] = re.sub(' \[\d+\]$', '', data.get('name', 'Untitled')).strip()
name = data['name']
if not name:
name = "Untitled"

View file

@ -1,7 +0,0 @@
from django.apps import AppConfig
class EditConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'edit'

View file

@ -1,41 +0,0 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import edit.models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('edit', '0005_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='clip',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='edit',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='edit',
name='poster_frames',
field=oxdjango.fields.JSONField(default=list, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='edit',
name='query',
field=oxdjango.fields.JSONField(default=edit.models.default_query, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='position',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -13,7 +13,6 @@ from django.conf import settings
from django.db import models, transaction
from django.db.models import Max
from django.contrib.auth import get_user_model
from django.core.cache import cache
from oxdjango.fields import JSONField
@ -25,7 +24,6 @@ import clip.models
from archive import extract
from user.utils import update_groups
from user.models import Group
from clip.utils import add_cuts
from . import managers
@ -35,9 +33,6 @@ User = get_user_model()
def get_path(f, x): return f.path(x)
def get_icon_path(f, x): return get_path(f, 'icon.jpg')
def default_query():
return {"static": True}
class Edit(models.Model):
class Meta:
@ -56,7 +51,7 @@ class Edit(models.Model):
description = models.TextField(default='')
rightslevel = models.IntegerField(db_index=True, default=0)
query = JSONField(default=default_query, editable=False)
query = JSONField(default=lambda: {"static": True}, editable=False)
type = models.CharField(max_length=255, default='static')
icon = models.ImageField(default=None, blank=True, null=True, upload_to=get_icon_path)
@ -98,8 +93,6 @@ class Edit(models.Model):
# dont add clip if in/out are invalid
if not c.annotation:
duration = c.item.sort.duration
if c.start is None or c.end is None:
return False
if c.start > c.end \
or round(c.start, 3) >= round(duration, 3) \
or round(c.end, 3) > round(duration, 3):
@ -514,7 +507,7 @@ class Clip(models.Model):
if value:
data[key] = value
data['duration'] = data['out'] - data['in']
add_cuts(data, self.item, self.start, self.end)
data['cuts'] = tuple([c for c in self.item.get('cuts', []) if c > self.start and c < self.end])
data['layers'] = self.get_layers(user)
data['streams'] = [s.file.oshash for s in self.item.streams()]
return data

View file

@ -3,16 +3,14 @@
import os
import re
from oxdjango.api import actions
from oxdjango.decorators import login_required_json
from oxdjango.http import HttpFileResponse
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
import ox
from django.conf import settings
from oxdjango.decorators import login_required_json
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
from django.db import transaction
from django.db.models import Max
from django.db.models import Sum
from oxdjango.http import HttpFileResponse
from oxdjango.api import actions
from django.conf import settings
from item import utils
from changelog.models import add_changelog
@ -192,7 +190,7 @@ def _order_clips(edit, sort):
'in': 'start',
'out': 'end',
'text': 'sortvalue',
'volume': 'volume' if edit.type == 'smart' else 'sortvolume',
'volume': 'sortvolume',
'item__sort__item': 'item__sort__public_id',
}.get(key, key)
order = '%s%s' % (operator, key)
@ -262,7 +260,7 @@ def addEdit(request, data):
}
see: editEdit, findEdit, getEdit, removeEdit, sortEdits
'''
data['name'] = re.sub(r' \[\d+\]$', '', data.get('name', 'Untitled')).strip()
data['name'] = re.sub(' \[\d+\]$', '', data.get('name', 'Untitled')).strip()
name = data['name']
if not name:
name = "Untitled"
@ -414,11 +412,6 @@ def findEdits(request, data):
is_featured = any(filter(is_featured_condition, data.get('query', {}).get('conditions', [])))
is_personal = request.user.is_authenticated and any(
(x['key'] == 'user' and x['value'] == request.user.username and x['operator'] == '==')
for x in data.get('query', {}).get('conditions', [])
)
if is_section_request:
qs = query['qs']
if not is_featured and not request.user.is_anonymous:
@ -427,9 +420,6 @@ def findEdits(request, data):
else:
qs = _order_query(query['qs'], query['sort'])
if is_personal and request.user.profile.ui.get('hidden', {}).get('edits'):
qs = qs.exclude(name__in=request.user.profile.ui['hidden']['edits'])
response = json_response()
if 'keys' in data:
qs = qs[query['range'][0]:query['range'][1]]

View file

@ -1,3 +0,0 @@
LOGLEVEL=info
MAX_TASKS_PER_CHILD=500
CONCURRENCY=1

View file

@ -1,6 +0,0 @@
from django.apps import AppConfig
class EntityConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'entity'

View file

@ -1,50 +0,0 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('entity', '0006_auto_20180918_0903'),
]
operations = [
migrations.AlterField(
model_name='documentproperties',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='documentproperties',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='entity',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='entity',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='entity',
name='name_find',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='find',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='link',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -1,6 +0,0 @@
from django.apps import AppConfig
class EventConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'event'

View file

@ -1,43 +0,0 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('event', '0003_auto_20160304_1644'),
]
operations = [
migrations.AlterField(
model_name='event',
name='duration',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='event',
name='end',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='event',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='event',
name='name_find',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='event',
name='start',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='event',
name='type',
field=models.CharField(default='', max_length=255),
),
]

View file

@ -1,26 +1,20 @@
# -*- coding: utf-8 -*-
from app.celery import app
from celery.task import task
from .models import Event
'''
from celery.schedules import crontab
@app.task(ignore_results=True, queue='encoding')
@periodic_task(run_every=crontab(hour=7, minute=30), queue='encoding')
def update_all_matches(**kwargs):
ids = [e['id'] for e in Event.objects.all().values('id')]
for i in ids:
e = Event.objects.get(pk=i)
e.update_matches()
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(crontab(hour=7, minute=30), update_all_matches.s())
'''
@app.task(ignore_results=True, queue='default')
@task(ignore_results=True, queue='default')
def update_matches(eventId):
event = Event.objects.get(pk=eventId)
event.update_matches()

View file

@ -2,5 +2,4 @@ from django.apps import AppConfig
class HomeConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'home'

View file

@ -1,30 +0,0 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0002_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='item',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='item',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='item',
name='index',
field=models.IntegerField(default=-1),
),
]

View file

@ -1,6 +0,0 @@
from django.apps import AppConfig
class ItemConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'item'

View file

@ -4,6 +4,7 @@ from django.core.management.base import BaseCommand
from django.conf import settings
from django.db import transaction
settings.RELOAD_CONFIG = False
import app.monkey_patch
from ... import models

View file

@ -6,6 +6,7 @@ from django.db import connection, transaction
from django.db.models import fields
from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch
from ... import models
import clip.models

View file

@ -5,6 +5,7 @@ from django.db import connection, transaction
from django.db.models import fields
from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch
from ... import models
import clip.models

View file

@ -5,6 +5,7 @@ from django.db import connection, transaction
from django.db.models import fields
from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch
from ... import models
import clip.models

View file

@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
import os
from glob import glob
from django.core.management.base import BaseCommand
import app.monkey_patch
from ... import models
from ... import tasks
class Command(BaseCommand):
"""
rebuild posters for all items.
"""
help = 'rebuild all posters for all items.'
args = ''
def handle(self, **options):
offset = 0
chunk = 100
count = models.Item.objects.count()
while offset <= count:
for i in models.Item.objects.all().order_by('id')[offset:offset+chunk]:
print(i)
if i.poster:
i.poster.delete()
i.make_poster()
offset += chunk

View file

@ -6,6 +6,7 @@ from django.db import connection, transaction
from django.db.models import fields
from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch
from ... import models

View file

@ -5,6 +5,7 @@ from django.core.management.base import BaseCommand
from django.db import connection, transaction
from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch
from ... import models

View file

@ -5,6 +5,7 @@ from django.db import connection, transaction
from django.db.models import fields
from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch
from ... import models
import clip.models

View file

@ -33,7 +33,7 @@ def parseCondition(condition, user, owner=None):
k = {'id': 'public_id'}.get(k, k)
if not k:
k = '*'
v = condition.get('value', '')
v = condition['value']
op = condition.get('operator')
if not op:
op = '='
@ -62,9 +62,6 @@ def parseCondition(condition, user, owner=None):
if k == 'list':
key_type = ''
if k in ('width', 'height'):
key_type = 'integer'
if k == 'groups':
if op == '==' and v == '$my':
if not owner:
@ -89,11 +86,8 @@ def parseCondition(condition, user, owner=None):
elif k == 'rendered':
return Q(rendered=v)
elif k == 'resolution':
if isinstance(v, list) and len(v) == 2:
q = parseCondition({'key': 'width', 'value': v[0], 'operator': op}, user) \
& parseCondition({'key': 'height', 'value': v[1], 'operator': op}, user)
else:
q = Q(id=0)
q = parseCondition({'key': 'width', 'value': v[0], 'operator': op}, user) \
& parseCondition({'key': 'height', 'value': v[1], 'operator': op}, user)
if exclude:
q = ~q
return q
@ -324,8 +318,6 @@ class ItemManager(Manager):
q |= Q(groups__in=user.groups.all())
rendered_q |= Q(groups__in=user.groups.all())
qs = qs.filter(q)
max_level = len(settings.CONFIG['rightsLevels'])
qs = qs.filter(level__lte=max_level)
if settings.CONFIG.get('itemRequiresVideo') and level != 'admin':
qs = qs.filter(rendered_q)
return qs

View file

@ -71,7 +71,7 @@ class Migration(migrations.Migration):
('poster_width', models.IntegerField(default=0)),
('poster_frame', models.FloatField(default=-1)),
('icon', models.ImageField(blank=True, default=None, upload_to=item.models.get_icon_path)),
('torrent', models.FileField(blank=True, default=None, max_length=1000)),
('torrent', models.FileField(blank=True, default=None, max_length=1000, upload_to=item.models.get_torrent_path)),
('stream_info', oxdjango.fields.DictField(default={}, editable=False)),
('stream_aspect', models.FloatField(default=1.3333333333333333)),
],

View file

@ -1,19 +0,0 @@
# Generated by Django 3.0.10 on 2023-07-10 08:52
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('item', '0004_json_cache'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='torrent',
),
]

View file

@ -1,65 +0,0 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('item', '0005_auto_20230710_0852'),
]
operations = [
migrations.AlterField(
model_name='access',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='annotationsequence',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='description',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='facet',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='item',
name='cache',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='item',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='item',
name='external_data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='item',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='item',
name='stream_info',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='itemfind',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
import json
import logging
import os
import re
import shutil
@ -43,7 +42,6 @@ from user.utils import update_groups
from user.models import Group
import archive.models
logger = logging.getLogger('pandora.' + __name__)
User = get_user_model()
@ -157,6 +155,9 @@ def get_icon_path(f, x):
def get_poster_path(f, x):
return get_path(f, 'poster.jpg')
def get_torrent_path(f, x):
return get_path(f, 'torrent.torrent')
class Item(models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
@ -182,6 +183,7 @@ class Item(models.Model):
icon = models.ImageField(default=None, blank=True, upload_to=get_icon_path)
torrent = models.FileField(default=None, blank=True, max_length=1000, upload_to=get_torrent_path)
stream_info = JSONField(default=dict, editable=False)
# stream related fields
@ -229,9 +231,6 @@ class Item(models.Model):
def editable(self, user):
if user.is_anonymous:
return False
max_level = len(settings.CONFIG['rightsLevels'])
if self.level > max_level:
return False
if user.profile.capability('canEditMetadata') or \
user.is_staff or \
self.user == user or \
@ -239,7 +238,7 @@ class Item(models.Model):
return True
return False
def edit(self, data, is_task=False):
def edit(self, data):
data = data.copy()
# FIXME: how to map the keys to the right place to write them to?
if 'id' in data:
@ -256,12 +255,11 @@ class Item(models.Model):
description = data.pop(key)
if isinstance(description, dict):
for value in description:
value = ox.sanitize_html(value)
d, created = Description.objects.get_or_create(key=k, value=value)
d.description = ox.sanitize_html(description[value])
d.save()
else:
value = ox.sanitize_html(data.get(k, self.get(k, '')))
value = data.get(k, self.get(k, ''))
if not description:
description = ''
d, created = Description.objects.get_or_create(key=k, value=value)
@ -296,10 +294,7 @@ class Item(models.Model):
self.data[key] = ox.escape_html(data[key])
p = self.save()
if not settings.USE_IMDB and list(filter(lambda k: k in self.poster_keys, data)):
if is_task:
tasks.update_poster(self.public_id)
else:
p = tasks.update_poster.delay(self.public_id)
p = tasks.update_poster.delay(self.public_id)
return p
def update_external(self):
@ -478,8 +473,7 @@ class Item(models.Model):
for a in self.annotations.all().order_by('id'):
a.item = other
with transaction.atomic():
a.set_public_id()
a.set_public_id()
Annotation.objects.filter(id=a.id).update(item=other, public_id=a.public_id)
try:
other_sort = other.sort
@ -523,7 +517,6 @@ class Item(models.Model):
cmd, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), close_fds=True)
p.wait()
os.unlink(tmp_output_txt)
os.close(fd)
return True
else:
return None
@ -641,11 +634,11 @@ class Item(models.Model):
if self.poster_height:
i['posterRatio'] = self.poster_width / self.poster_height
if keys and 'hasSource' in keys:
i['hasSource'] = self.streams().exclude(file__data='').exists()
if keys and 'source' in keys:
i['source'] = self.streams().exclude(file__data='').exists()
streams = self.streams()
i['durations'] = [s[0] for s in streams.values_list('duration')]
i['durations'] = [s.duration for s in streams]
i['duration'] = sum(i['durations'])
i['audioTracks'] = self.audio_tracks()
if not i['audioTracks']:
@ -701,12 +694,10 @@ class Item(models.Model):
else:
values = self.get(key)
if values:
values = [ox.sanitize_html(value) for value in values]
for d in Description.objects.filter(key=key, value__in=values):
i['%sdescription' % key][d.value] = d.description
else:
value = ox.sanitize_html(self.get(key, ''))
qs = Description.objects.filter(key=key, value=value)
qs = Description.objects.filter(key=key, value=self.get(key, ''))
i['%sdescription' % key] = '' if qs.count() == 0 else qs[0].description
if keys:
info = {}
@ -864,7 +855,7 @@ class Item(models.Model):
values = list(set(values))
else:
values = self.get(key, '')
if values and isinstance(values, list) and isinstance(values[0], str):
if isinstance(values, list):
save(key, '\n'.join(values))
else:
save(key, values)
@ -1026,16 +1017,12 @@ class Item(models.Model):
set_value(s, name, value)
elif sort_type == 'person':
value = sortNames(self.get(source, []))
if value is None:
value = ''
value = utils.sort_string(value)[:955]
set_value(s, name, value)
elif sort_type == 'string':
value = self.get(source, '')
if value is None:
value = ''
if isinstance(value, list):
value = ','.join([str(v) for v in value])
value = ','.join(value)
value = utils.sort_string(value)[:955]
set_value(s, name, value)
elif sort_type == 'words':
@ -1112,11 +1099,7 @@ class Item(models.Model):
_current_values.append(value[0])
current_values = _current_values
try:
current_values = list(set(current_values))
except:
logger.error('invalid facet data for %s: %s', key, current_values)
current_values = []
current_values = list(set(current_values))
current_values = [ox.decode_html(ox.strip_tags(v)) for v in current_values]
current_values = [unicodedata.normalize('NFKD', v) for v in current_values]
self.update_facet_values(key, current_values)
@ -1209,7 +1192,7 @@ class Item(models.Model):
if not r:
return False
path = video.name
duration = sum(self.item.cache['durations'])
duration = sum(item.cache['durations'])
else:
path = stream.media.path
duration = stream.info['duration']
@ -1305,6 +1288,90 @@ class Item(models.Model):
self.files.filter(selected=True).update(selected=False)
self.save()
def get_torrent(self, request):
if self.torrent:
self.torrent.seek(0)
data = ox.torrent.bdecode(self.torrent.read())
url = request.build_absolute_uri("%s/torrent/" % self.get_absolute_url())
if url.startswith('https://'):
url = 'http' + url[5:]
data['url-list'] = ['%s%s' % (url, u.split('torrent/')[1]) for u in data['url-list']]
return ox.torrent.bencode(data)
def make_torrent(self):
if not settings.CONFIG['video'].get('torrent'):
return
streams = self.streams()
if streams.count() == 0:
return
base = self.path('torrent')
base = os.path.abspath(os.path.join(settings.MEDIA_ROOT, base))
if not isinstance(base, bytes):
base = base.encode('utf-8')
if os.path.exists(base):
shutil.rmtree(base)
ox.makedirs(base)
filename = utils.safe_filename(ox.decode_html(self.get('title')))
base = self.path('torrent/%s' % filename)
base = os.path.abspath(os.path.join(settings.MEDIA_ROOT, base))
size = 0
duration = 0.0
if streams.count() == 1:
v = streams[0]
media_path = v.media.path
extension = media_path.split('.')[-1]
url = "%s/torrent/%s.%s" % (self.get_absolute_url(),
quote(filename.encode('utf-8')),
extension)
video = "%s.%s" % (base, extension)
if not isinstance(media_path, bytes):
media_path = media_path.encode('utf-8')
if not isinstance(video, bytes):
video = video.encode('utf-8')
media_path = os.path.relpath(media_path, os.path.dirname(video))
os.symlink(media_path, video)
size = v.media.size
duration = v.duration
else:
url = "%s/torrent/" % self.get_absolute_url()
part = 1
ox.makedirs(base)
for v in streams:
media_path = v.media.path
extension = media_path.split('.')[-1]
video = "%s/%s.Part %d.%s" % (base, filename, part, extension)
part += 1
if not isinstance(media_path, bytes):
media_path = media_path.encode('utf-8')
if not isinstance(video, bytes):
video = video.encode('utf-8')
media_path = os.path.relpath(media_path, os.path.dirname(video))
os.symlink(media_path, video)
size += v.media.size
duration += v.duration
video = base
torrent = '%s.torrent' % base
url = "http://%s%s" % (settings.CONFIG['site']['url'], url)
meta = {
'filesystem_encoding': 'utf-8',
'target': torrent,
'url-list': url,
}
if duration:
meta['playtime'] = ox.format_duration(duration*1000)[:-4]
# slightly bigger torrent file but better for streaming
piece_size_pow2 = 15 # 1 mbps -> 32KB pieces
if size / duration >= 1000000:
piece_size_pow2 = 16 # 2 mbps -> 64KB pieces
meta['piece_size_pow2'] = piece_size_pow2
ox.torrent.create_torrent(video, settings.TRACKER_URL, meta)
self.torrent.name = torrent[len(settings.MEDIA_ROOT)+1:]
self.save()
def audio_tracks(self):
tracks = [f['language']
for f in self.files.filter(selected=True).filter(Q(is_video=True) | Q(is_audio=True)).values('language')
@ -1312,10 +1379,11 @@ class Item(models.Model):
return sorted(set(tracks))
def streams(self, track=None):
files = self.files.filter(selected=True).filter(Q(is_audio=True) | Q(is_video=True))
qs = archive.models.Stream.objects.filter(
file__in=files, source=None, available=True
).select_related()
source=None, available=True, file__item=self, file__selected=True
).filter(
Q(file__is_audio=True) | Q(file__is_video=True)
)
if not track:
tracks = self.audio_tracks()
if len(tracks) > 1:
@ -1354,6 +1422,7 @@ class Item(models.Model):
self.select_frame()
self.make_poster()
self.make_icon()
self.make_torrent()
self.rendered = streams.count() > 0
self.save()
if self.rendered:
@ -1539,15 +1608,8 @@ class Item(models.Model):
cmd += ['-l', timeline]
if frame:
cmd += ['-f', frame]
if settings.ITEM_ICON_DATA:
cmd += '-d', '-'
data = self.json()
data = utils.normalize_dict('NFC', data)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, close_fds=True)
p.communicate(json.dumps(data, default=to_json).encode('utf-8'))
else:
p = subprocess.Popen(cmd, close_fds=True)
p.wait()
p = subprocess.Popen(cmd, close_fds=True)
p.wait()
# remove cached versions
icon = os.path.abspath(os.path.join(settings.MEDIA_ROOT, icon))
for f in glob(icon.replace('.jpg', '*.jpg')):
@ -1559,13 +1621,11 @@ class Item(models.Model):
return icon
def add_empty_clips(self):
if not settings.EMPTY_CLIPS:
return
subtitles = utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True)
if not subtitles:
return
# otherwise add empty 5 seconds annotation every minute
duration = sum([s[0] for s in self.streams().values_list('duration')])
duration = sum([s.duration for s in self.streams()])
layer = subtitles['id']
# FIXME: allow annotations from no user instead?
user = User.objects.all().order_by('id')[0]
@ -1814,8 +1874,6 @@ class Description(models.Model):
value = models.CharField(max_length=1000, db_index=True)
description = models.TextField()
def __str__(self):
return "%s=%s" % (self.key, self.value)
class AnnotationSequence(models.Model):
item = models.OneToOneField('Item', related_name='_annotation_sequence', on_delete=models.CASCADE)
@ -1831,12 +1889,13 @@ class AnnotationSequence(models.Model):
@classmethod
def nextid(cls, item):
s, created = cls.objects.get_or_create(item=item)
if created:
nextid = s.value
else:
cursor = connection.cursor()
sql = "UPDATE %s SET value = value + 1 WHERE item_id = %s RETURNING value" % (cls._meta.db_table, item.id)
cursor.execute(sql)
nextid = cursor.fetchone()[0]
with transaction.atomic():
s, created = cls.objects.get_or_create(item=item)
if created:
nextid = s.value
else:
cursor = connection.cursor()
sql = "UPDATE %s SET value = value + 1 WHERE item_id = %s RETURNING value" % (cls._meta.db_table, item.id)
cursor.execute(sql)
nextid = cursor.fetchone()[0]
return "%s/%s" % (item.public_id, ox.toAZ(nextid))

View file

@ -24,6 +24,10 @@ urls = [
re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<format>webm|ogv|mp4)$', views.video),
re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<track>.+)\.(?P<format>webm|ogv|mp4)$', views.video),
#torrent
re_path(r'^(?P<id>[A-Z0-9].*)/torrent$', views.torrent),
re_path(r'^(?P<id>[A-Z0-9].*)/torrent/(?P<filename>.*?)$', views.torrent),
#export
re_path(r'^(?P<id>[A-Z0-9].*)/json$', views.item_json),
re_path(r'^(?P<id>[A-Z0-9].*)/xml$', views.item_xml),

View file

@ -2,35 +2,27 @@
from datetime import timedelta, datetime
from urllib.parse import quote
import xml.etree.ElementTree as ET
import gzip
import os
import random
import logging
from app.celery import app
from celery.schedules import crontab
from celery.task import task, periodic_task
from django.conf import settings
from django.db import connection, transaction
from django.db.models import Q
from ox.utils import ET
from app.utils import limit_rate
from taskqueue.models import Task
logger = logging.getLogger('pandora.' + __name__)
@app.task(queue='encoding')
@periodic_task(run_every=timedelta(days=1), queue='encoding')
def cronjob(**kwargs):
if limit_rate('item.tasks.cronjob', 8 * 60 * 60):
update_random_sort()
update_random_clip_sort()
clear_cache.delay()
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(timedelta(days=1), cronjob.s())
def update_random_sort():
from . import models
if list(filter(lambda f: f['id'] == 'random', settings.CONFIG['itemKeys'])):
@ -58,7 +50,7 @@ def update_random_clip_sort():
cursor.execute(row)
@app.task(ignore_results=True, queue='default')
@task(ignore_results=True, queue='default')
def update_clips(public_id):
from . import models
try:
@ -67,7 +59,7 @@ def update_clips(public_id):
return
item.clips.all().update(user=item.user.id)
@app.task(ignore_results=True, queue='default')
@task(ignore_results=True, queue='default')
def update_poster(public_id):
from . import models
try:
@ -85,7 +77,7 @@ def update_poster(public_id):
icon=item.icon.name
)
@app.task(ignore_results=True, queue='default')
@task(ignore_results=True, queue='default')
def update_file_paths(public_id):
from . import models
try:
@ -94,7 +86,7 @@ def update_file_paths(public_id):
return
item.update_file_paths()
@app.task(ignore_results=True, queue='default')
@task(ignore_results=True, queue='default')
def update_external(public_id):
from . import models
try:
@ -103,7 +95,7 @@ def update_external(public_id):
return
item.update_external()
@app.task(queue="encoding")
@task(queue="encoding")
def update_timeline(public_id):
from . import models
try:
@ -113,7 +105,7 @@ def update_timeline(public_id):
item.update_timeline(async_=False)
Task.finish(item)
@app.task(queue="encoding")
@task(queue="encoding")
def rebuild_timeline(public_id):
from . import models
i = models.Item.objects.get(public_id=public_id)
@ -121,7 +113,7 @@ def rebuild_timeline(public_id):
s.make_timeline()
i.update_timeline(async_=False)
@app.task(queue="encoding")
@task(queue="encoding")
def load_subtitles(public_id):
from . import models
try:
@ -134,7 +126,7 @@ def load_subtitles(public_id):
item.update_facets()
@app.task(queue="encoding")
@task(queue="encoding")
def extract_clip(public_id, in_, out, resolution, format, track=None):
from . import models
try:
@ -146,7 +138,7 @@ def extract_clip(public_id, in_, out, resolution, format, track=None):
return False
@app.task(queue="encoding")
@task(queue="encoding")
def clear_cache(days=60):
import subprocess
path = os.path.join(settings.MEDIA_ROOT, 'media')
@ -160,7 +152,7 @@ def clear_cache(days=60):
subprocess.check_output(cmd)
@app.task(ignore_results=True, queue='default')
@task(ignore_results=True, queue='default')
def update_sitemap(base_url):
from . import models
sitemap = os.path.abspath(os.path.join(settings.MEDIA_ROOT, 'sitemap.xml.gz'))
@ -358,18 +350,3 @@ def update_sitemap(base_url):
f.write(data)
with gzip.open(sitemap, 'wb') as f:
f.write(data)
@app.task(queue='default')
def bulk_edit(data, username):
from django.db import transaction
from . import models
from .views import edit_item
user = models.User.objects.get(username=username)
items = models.Item.objects.filter(public_id__in=data['id'])
for item in items:
if item.editable(user):
with transaction.atomic():
item.refresh_from_db()
response = edit_item(user, item, data, is_task=True)
return {}

View file

@ -71,7 +71,7 @@ def join_tiles(source_paths, durations, target_path):
if not w or large_tile_i < large_tile_n - 1:
w = 60
data['target_images']['large'] = data['target_images']['large'].resize(
(w, small_tile_h), Image.LANCZOS
(w, small_tile_h), Image.ANTIALIAS
)
if data['target_images']['small']:
data['target_images']['small'].paste(
@ -90,7 +90,7 @@ def join_tiles(source_paths, durations, target_path):
if data['full_tile_widths'][0]:
resized = data['target_images']['large'].resize((
data['full_tile_widths'][0], large_tile_h
), Image.LANCZOS)
), Image.ANTIALIAS)
data['target_images']['full'].paste(resized, (data['full_tile_offset'], 0))
data['full_tile_offset'] += data['full_tile_widths'][0]
data['full_tile_widths'] = data['full_tile_widths'][1:]
@ -196,7 +196,7 @@ def join_tiles(source_paths, durations, target_path):
#print(image_file)
image_file = '%stimeline%s%dp.jpg' % (target_path, full_tile_mode, small_tile_h)
data['target_images']['full'].resize(
(full_tile_w, small_tile_h), Image.LANCZOS
(full_tile_w, small_tile_h), Image.ANTIALIAS
).save(image_file)
#print(image_file)

View file

@ -61,7 +61,7 @@ def sort_title(title):
title = sort_string(title)
#title
title = re.sub(r'[\'!¿¡,\.;\-"\:\*\[\]]', '', title)
title = re.sub('[\'!¿¡,\.;\-"\:\*\[\]]', '', title)
return title.strip()
def get_positions(ids, pos, decode_id=False):

View file

@ -16,13 +16,11 @@ from wsgiref.util import FileWrapper
from django.conf import settings
from ox.utils import json, ET
import ox
from oxdjango.api import actions
from oxdjango.decorators import login_required_json
from oxdjango.http import HttpFileResponse
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
import oxdjango
from oxdjango.http import HttpFileResponse
import ox
from . import models
from . import utils
@ -34,6 +32,7 @@ from clip.models import Clip
from user.models import has_capability
from changelog.models import add_changelog
from oxdjango.api import actions
def _order_query(qs, sort, prefix='sort__'):
@ -309,7 +308,7 @@ def find(request, data):
responsive UI: First leave out `keys` to get totals as fast as possible,
then pass `positions` to get the positions of previously selected items,
finally make the query with the `keys` you need and an appropriate `range`.
For more examples, see https://code.0x2620.org/0x2620/pandora/wiki/QuerySyntax.
For more examples, see https://wiki.0x2620.org/wiki/pandora/QuerySyntax.
see: add, edit, get, lookup, remove, upload
'''
if settings.JSON_DEBUG:
@ -534,18 +533,17 @@ def get(request, data):
return render_to_json_response(response)
actions.register(get)
def edit_item(user, item, data, is_task=False):
data = data.copy()
def edit_item(request, item, data):
update_clips = False
response = json_response(status=200, text='ok')
if 'rightslevel' in data:
if user.profile.capability('canEditRightsLevel'):
if request.user.profile.capability('canEditRightsLevel'):
item.level = int(data['rightslevel'])
else:
response = json_response(status=403, text='permission denied')
del data['rightslevel']
if 'user' in data:
if user.profile.get_level() in ('admin', 'staff') and \
if request.user.profile.get_level() in ('admin', 'staff') and \
models.User.objects.filter(username=data['user']).exists():
new_user = models.User.objects.get(username=data['user'])
if new_user != item.user:
@ -553,13 +551,13 @@ def edit_item(user, item, data, is_task=False):
update_clips = True
del data['user']
if 'groups' in data:
if not user.profile.capability('canManageUsers'):
if not request.user.profile.capability('canManageUsers'):
# Users wihtout canManageUsers can only add/remove groups they are not in
groups = set([g.name for g in item.groups.all()])
user_groups = set([g.name for g in user.groups.all()])
user_groups = set([g.name for g in request.user.groups.all()])
other_groups = list(groups - user_groups)
data['groups'] = [g for g in data['groups'] if g in user_groups] + other_groups
r = item.edit(data, is_task=is_task)
r = item.edit(data)
if r:
r.wait()
if update_clips:
@ -596,10 +594,10 @@ def add(request, data):
if p:
p.wait()
else:
item.make_poster()
i.make_poster()
del data['title']
if data:
response = edit_item(request.user, item, data)
response = edit_item(request, item, data)
response['data'] = item.json()
add_changelog(request, request_data, item.public_id)
return render_to_json_response(response)
@ -621,16 +619,16 @@ def edit(request, data):
see: add, find, get, lookup, remove, upload
'''
if isinstance(data['id'], list):
add_changelog(request, data)
t = tasks.bulk_edit.delay(data, request.user.username)
response = json_response(status=200, text='ok')
response['data']['taskId'] = t.task_id
items = models.Item.objects.filter(public_id__in=data['id'])
else:
item = get_object_or_404_json(models.Item, public_id=data['id'])
items = [get_object_or_404_json(models.Item, public_id=data['id'])]
for item in items:
if item.editable(request.user):
add_changelog(request, data)
response = edit_item(request.user, item, data)
request_data = data.copy()
response = edit_item(request, item, data)
response['data'] = item.json()
if item == items[0]:
add_changelog(request, request_data)
else:
response = json_response(status=403, text='permission denied')
return render_to_json_response(response)
@ -949,11 +947,9 @@ def timeline(request, id, size, position=-1, format='jpg', mode=None):
if not item.access(request.user):
return HttpResponseForbidden()
modes = [t['id'] for t in settings.CONFIG['timelines']]
if not mode:
mode = 'antialias'
if mode not in modes:
mode = modes[0]
modes = [t['id'] for t in settings.CONFIG['timelines']]
if mode not in modes:
raise Http404
modes.pop(modes.index(mode))
@ -1033,10 +1029,7 @@ def download(request, id, resolution=None, format='webm', part=None):
return HttpResponseForbidden()
elif r is True:
response = HttpResponse(FileWrapper(video), content_type=content_type)
try:
response['Content-Length'] = os.path.getsize(video.name)
except:
pass
response['Content-Length'] = os.path.getsize(video.name)
else:
response = HttpFileResponse(r, content_type=content_type)
else:
@ -1047,6 +1040,27 @@ def download(request, id, resolution=None, format='webm', part=None):
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % quote(filename.encode('utf-8'))
return response
def torrent(request, id, filename=None):
item = get_object_or_404(models.Item, public_id=id)
if not item.access(request.user):
return HttpResponseForbidden()
if not item.torrent:
raise Http404
if not filename or filename.endswith('.torrent'):
response = HttpResponse(item.get_torrent(request),
content_type='application/x-bittorrent')
filename = utils.safe_filename("%s.torrent" % item.get('title'))
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % quote(filename.encode('utf-8'))
return response
while filename.startswith('/'):
filename = filename[1:]
filename = filename.replace('/../', '/')
filename = item.path('torrent/%s' % filename)
filename = os.path.abspath(os.path.join(settings.MEDIA_ROOT, filename))
response = HttpFileResponse(filename)
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % \
quote(os.path.basename(filename.encode('utf-8')))
return response
def video(request, id, resolution, format, index=None, track=None):
resolution = int(resolution)
@ -1268,6 +1282,12 @@ def atom_xml(request):
el.text = "1:1"
if has_capability(request.user, 'canDownloadVideo'):
if item.torrent:
el = ET.SubElement(entry, "link")
el.attrib['rel'] = 'enclosure'
el.attrib['type'] = 'application/x-bittorrent'
el.attrib['href'] = '%s/torrent/' % page_link
el.attrib['length'] = '%s' % ox.get_torrent_size(item.torrent.path)
# FIXME: loop over streams
# for s in item.streams().filter(resolution=max(settings.CONFIG['video']['resolutions'])):
for s in item.streams().filter(source=None):
@ -1290,15 +1310,12 @@ def atom_xml(request):
'application/atom+xml'
)
def oembed(request):
format = request.GET.get('format', 'json')
maxwidth = int(request.GET.get('maxwidth', 640))
maxheight = int(request.GET.get('maxheight', 480))
url = request.GET.get('url')
if not url:
raise Http404
url = request.GET['url']
parts = urlparse(url).path.split('/')
if len(parts) < 2:
raise Http404

View file

@ -1,7 +0,0 @@
from django.apps import AppConfig
class ItemListConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'itemlist'

Some files were not shown because too many files have changed in this diff Show more