Compare commits
1 commit
Author | SHA1 | Date | |
---|---|---|---|
5b048a710d |
637 changed files with 85255 additions and 158406 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -36,5 +36,3 @@ pandora/gunicorn_config.py
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.env
|
.env
|
||||||
overlay/
|
overlay/
|
||||||
pandora/encoding.conf
|
|
||||||
pandora/tasks.conf
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM code.0x2620.org/0x2620/pandora-base:latest
|
FROM 0x2620/pandora-base:latest
|
||||||
|
|
||||||
LABEL maintainer="0x2620@0x2620.org"
|
LABEL maintainer="0x2620@0x2620.org"
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
We recommend to run pan.do/ra inside of LXD or LXC or dedicated VM or server.
|
We recommend to run pan.do/ra inside of LXD or LXC or dedicated VM or server.
|
||||||
You will need at least 2GB of free disk space
|
You will need at least 2GB of free disk space
|
||||||
|
|
||||||
pan.do/ra is known to work with Debian/12 (bookworm) and Ubuntu 20.04,
|
pan.do/ra is known to work with Ubuntu 18.04, 20.04 and Debian/10 (buster),
|
||||||
other distributions might also work, let us know if it works for you.
|
other distributions might also work, let us know if it works for you.
|
||||||
|
|
||||||
Use the following commands as root to install pan.do/ra and all dependencies:
|
Use the following commands as root to install pan.do/ra and all dependencies:
|
||||||
|
@ -16,7 +16,7 @@
|
||||||
cd /root
|
cd /root
|
||||||
curl -sL https://pan.do/ra-install > pandora_install.sh
|
curl -sL https://pan.do/ra-install > pandora_install.sh
|
||||||
chmod +x pandora_install.sh
|
chmod +x pandora_install.sh
|
||||||
export BRANCH=master # change to 'stable' to get the latest release (sometimes outdated)
|
export BRANCH=stable # change to 'master' to get current developement version
|
||||||
./pandora_install.sh 2>&1 | tee pandora_install.log
|
./pandora_install.sh 2>&1 | tee pandora_install.log
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -50,9 +50,4 @@ export BRANCH=master # change to 'stable' to get the latest release (sometimes o
|
||||||
More info at
|
More info at
|
||||||
https://code.0x2620.org/0x2620/pandora/wiki/Customization
|
https://code.0x2620.org/0x2620/pandora/wiki/Customization
|
||||||
|
|
||||||
## Update
|
|
||||||
|
|
||||||
To update your existing instlalation run
|
|
||||||
|
|
||||||
pandoractl update
|
|
||||||
|
|
||||||
|
|
37
ctl
37
ctl
|
@ -17,7 +17,7 @@ if [ "$action" = "init" ]; then
|
||||||
SUDO=""
|
SUDO=""
|
||||||
PANDORA_USER=`ls -l update.py | cut -f3 -d" "`
|
PANDORA_USER=`ls -l update.py | cut -f3 -d" "`
|
||||||
if [ `whoami` != $PANDORA_USER ]; then
|
if [ `whoami` != $PANDORA_USER ]; then
|
||||||
SUDO="sudo -E -H -u $PANDORA_USER"
|
SUDO="sudo -H -u $PANDORA_USER"
|
||||||
fi
|
fi
|
||||||
$SUDO python3 -m venv --system-site-packages .
|
$SUDO python3 -m venv --system-site-packages .
|
||||||
branch=`cat .git/HEAD | sed 's@/@\n@g' | tail -n1`
|
branch=`cat .git/HEAD | sed 's@/@\n@g' | tail -n1`
|
||||||
|
@ -27,30 +27,25 @@ if [ "$action" = "init" ]; then
|
||||||
$SUDO bin/python3 -m pip install -U --ignore-installed "pip<9"
|
$SUDO bin/python3 -m pip install -U --ignore-installed "pip<9"
|
||||||
fi
|
fi
|
||||||
if [ ! -d static/oxjs ]; then
|
if [ ! -d static/oxjs ]; then
|
||||||
$SUDO git clone -b $branch https://code.0x2620.org/0x2620/oxjs.git static/oxjs
|
$SUDO git clone --depth 1 -b $branch https://git.0x2620.org/oxjs.git static/oxjs
|
||||||
fi
|
fi
|
||||||
$SUDO mkdir -p src
|
$SUDO mkdir -p src
|
||||||
|
if [ ! -d src/oxtimelines ]; then
|
||||||
|
$SUDO git clone --depth 1 -b $branch https://git.0x2620.org/oxtimelines.git src/oxtimelines
|
||||||
|
fi
|
||||||
for package in oxtimelines python-ox; do
|
for package in oxtimelines python-ox; do
|
||||||
cd ${BASE}
|
cd ${BASE}
|
||||||
if [ ! -d src/${package} ]; then
|
if [ ! -d src/${package} ]; then
|
||||||
$SUDO git clone -b $branch https://code.0x2620.org/0x2620/${package}.git src/${package}
|
$SUDO git clone --depth 1 -b $branch https://git.0x2620.org/${package}.git src/${package}
|
||||||
fi
|
fi
|
||||||
cd ${BASE}/src/${package}
|
cd ${BASE}/src/${package}
|
||||||
|
$SUDO ${BASE}/bin/python setup.py develop
|
||||||
$SUDO ${BASE}/bin/pip install -e .
|
|
||||||
|
|
||||||
done
|
done
|
||||||
cd ${BASE}
|
cd ${BASE}
|
||||||
$SUDO ./bin/pip install -r requirements.txt
|
$SUDO ./bin/pip install -r requirements.txt
|
||||||
for template in gunicorn_config.py encoding.conf tasks.conf; do
|
if [ ! -e pandora/gunicorn_config.py ]; then
|
||||||
if [ ! -e pandora/$template ]; then
|
$SUDO cp pandora/gunicorn_config.py.in pandora/gunicorn_config.py
|
||||||
$SUDO cp pandora/${template}.in pandora/$template
|
fi
|
||||||
fi
|
|
||||||
done
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
if [ "$action" = "version" ]; then
|
|
||||||
git rev-list HEAD --count
|
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -67,10 +62,11 @@ if [ ! -z $cmd ]; then
|
||||||
SUDO=""
|
SUDO=""
|
||||||
PANDORA_USER=`ls -l update.py | cut -f3 -d" "`
|
PANDORA_USER=`ls -l update.py | cut -f3 -d" "`
|
||||||
if [ `whoami` != $PANDORA_USER ]; then
|
if [ `whoami` != $PANDORA_USER ]; then
|
||||||
SUDO="sudo -E -H -u $PANDORA_USER"
|
SUDO="sudo -H -u $PANDORA_USER"
|
||||||
fi
|
fi
|
||||||
shift
|
shift
|
||||||
exec $SUDO "$BASE/$cmd" $@
|
$SUDO "$BASE/$cmd" $@
|
||||||
|
exit $?
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ `whoami` != 'root' ]; then
|
if [ `whoami` != 'root' ]; then
|
||||||
|
@ -78,15 +74,10 @@ if [ `whoami` != 'root' ]; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if [ "$action" = "install" ]; then
|
if [ "$action" = "install" ]; then
|
||||||
cd "`dirname "$self"`"
|
cd "`dirname "$0"`"
|
||||||
BASE=`pwd`
|
BASE=`pwd`
|
||||||
if [ -x /bin/systemctl ]; then
|
if [ -x /bin/systemctl ]; then
|
||||||
if [ -d /etc/systemd/system/ ]; then
|
if [ -d /etc/systemd/system/ ]; then
|
||||||
for template in gunicorn_config.py encoding.conf tasks.conf; do
|
|
||||||
if [ ! -e pandora/$template ]; then
|
|
||||||
$SUDO cp pandora/${template}.in pandora/$template
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
for service in $SERVICES; do
|
for service in $SERVICES; do
|
||||||
if [ -e /lib/systemd/system/${service}.service ]; then
|
if [ -e /lib/systemd/system/${service}.service ]; then
|
||||||
rm -f /lib/systemd/system/${service}.service \
|
rm -f /lib/systemd/system/${service}.service \
|
||||||
|
|
|
@ -15,6 +15,7 @@ services:
|
||||||
- "127.0.0.1:2620:80"
|
- "127.0.0.1:2620:80"
|
||||||
networks:
|
networks:
|
||||||
- backend
|
- backend
|
||||||
|
- default
|
||||||
links:
|
links:
|
||||||
- pandora
|
- pandora
|
||||||
- websocketd
|
- websocketd
|
||||||
|
@ -27,7 +28,7 @@ services:
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
db:
|
db:
|
||||||
image: postgres:15
|
image: postgres:latest
|
||||||
networks:
|
networks:
|
||||||
- backend
|
- backend
|
||||||
env_file: .env
|
env_file: .env
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM debian:12
|
FROM debian:buster
|
||||||
|
|
||||||
LABEL maintainer="0x2620@0x2620.org"
|
LABEL maintainer="0x2620@0x2620.org"
|
||||||
|
|
||||||
|
|
|
@ -1,17 +1,9 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
UBUNTU_CODENAME=bionic
|
||||||
if [ -e /etc/os-release ]; then
|
if [ -e /etc/os-release ]; then
|
||||||
. /etc/os-release
|
. /etc/os-release
|
||||||
fi
|
fi
|
||||||
if [ -z "$UBUNTU_CODENAME" ]; then
|
|
||||||
UBUNTU_CODENAME=bionic
|
|
||||||
fi
|
|
||||||
if [ "$VERSION_CODENAME" = "bullseye" ]; then
|
|
||||||
UBUNTU_CODENAME=focal
|
|
||||||
fi
|
|
||||||
if [ "$VERSION_CODENAME" = "bookworm" ]; then
|
|
||||||
UBUNTU_CODENAME=lunar
|
|
||||||
fi
|
|
||||||
|
|
||||||
export DEBIAN_FRONTEND=noninteractive
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
echo 'Acquire::Languages "none";' > /etc/apt/apt.conf.d/99languages
|
echo 'Acquire::Languages "none";' > /etc/apt/apt.conf.d/99languages
|
||||||
|
@ -38,8 +30,6 @@ apt-get update -qq
|
||||||
apt-get install -y \
|
apt-get install -y \
|
||||||
netcat-openbsd \
|
netcat-openbsd \
|
||||||
sudo \
|
sudo \
|
||||||
rsync \
|
|
||||||
iproute2 \
|
|
||||||
vim \
|
vim \
|
||||||
wget \
|
wget \
|
||||||
pwgen \
|
pwgen \
|
||||||
|
@ -52,23 +42,22 @@ apt-get install -y \
|
||||||
python3-numpy \
|
python3-numpy \
|
||||||
python3-psycopg2 \
|
python3-psycopg2 \
|
||||||
python3-pyinotify \
|
python3-pyinotify \
|
||||||
|
python3-simplejson \
|
||||||
python3-lxml \
|
python3-lxml \
|
||||||
python3-cssselect \
|
python3-cssselect \
|
||||||
python3-html5lib \
|
python3-html5lib \
|
||||||
python3-ox \
|
python3-ox \
|
||||||
python3-elasticsearch \
|
|
||||||
oxframe \
|
oxframe \
|
||||||
ffmpeg \
|
ffmpeg \
|
||||||
mkvtoolnix \
|
mkvtoolnix \
|
||||||
|
gpac \
|
||||||
imagemagick \
|
imagemagick \
|
||||||
poppler-utils \
|
poppler-utils \
|
||||||
|
youtube-dl \
|
||||||
ipython3 \
|
ipython3 \
|
||||||
tesseract-ocr \
|
|
||||||
tesseract-ocr-eng \
|
|
||||||
postfix \
|
postfix \
|
||||||
postgresql-client
|
postgresql-client
|
||||||
|
|
||||||
apt-get install -y --no-install-recommends youtube-dl rtmpdump
|
|
||||||
apt-get clean
|
apt-get clean
|
||||||
|
|
||||||
rm -f /install.sh
|
rm -f /install.sh
|
||||||
|
|
|
@ -11,7 +11,7 @@ else
|
||||||
proxy=
|
proxy=
|
||||||
fi
|
fi
|
||||||
|
|
||||||
docker build $proxy -t code.0x2620.org/0x2620/pandora-base base
|
docker build $proxy -t 0x2620/pandora-base base
|
||||||
docker build -t code.0x2620.org/0x2620/pandora-nginx nginx
|
docker build -t 0x2620/pandora-nginx nginx
|
||||||
cd ..
|
cd ..
|
||||||
docker build -t code.0x2620.org/0x2620/pandora .
|
docker build -t 0x2620/pandora .
|
||||||
|
|
|
@ -6,9 +6,7 @@ user=pandora
|
||||||
|
|
||||||
export LANG=en_US.UTF-8
|
export LANG=en_US.UTF-8
|
||||||
mkdir -p /run/pandora
|
mkdir -p /run/pandora
|
||||||
chown -R ${user}:${user} /run/pandora
|
chown -R ${user}.${user} /run/pandora
|
||||||
|
|
||||||
update="/usr/bin/sudo -u $user -E -H /srv/pandora/update.py"
|
|
||||||
|
|
||||||
# pan.do/ra services
|
# pan.do/ra services
|
||||||
if [ "$action" = "pandora" ]; then
|
if [ "$action" = "pandora" ]; then
|
||||||
|
@ -28,12 +26,12 @@ if [ "$action" = "pandora" ]; then
|
||||||
/overlay/install.py
|
/overlay/install.py
|
||||||
|
|
||||||
echo "Initializing database..."
|
echo "Initializing database..."
|
||||||
echo "CREATE EXTENSION pg_trgm;" | /srv/pandora/pandora/manage.py dbshell || true
|
echo "CREATE EXTENSION pg_trgm;" | /srv/pandora/pandora/manage.py dbshell
|
||||||
/srv/pandora/pandora/manage.py init_db
|
/srv/pandora/pandora/manage.py init_db
|
||||||
$update db
|
/srv/pandora/update.py db
|
||||||
echo "Generating static files..."
|
echo "Generating static files..."
|
||||||
chown -R ${user}:${user} /srv/pandora/
|
/srv/pandora/update.py static
|
||||||
$update static
|
chown -R ${user}.${user} /srv/pandora/
|
||||||
touch /srv/pandora/initialized
|
touch /srv/pandora/initialized
|
||||||
fi
|
fi
|
||||||
/srv/pandora_base/docker/wait-for db 5432
|
/srv/pandora_base/docker/wait-for db 5432
|
||||||
|
@ -46,53 +44,54 @@ if [ "$action" = "encoding" ]; then
|
||||||
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
|
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
|
||||||
/srv/pandora_base/docker/wait-for rabbitmq 5672
|
/srv/pandora_base/docker/wait-for rabbitmq 5672
|
||||||
name=pandora-encoding-$(hostname)
|
name=pandora-encoding-$(hostname)
|
||||||
cd /srv/pandora/pandora
|
|
||||||
exec /usr/bin/sudo -u $user -E -H \
|
exec /usr/bin/sudo -u $user -E -H \
|
||||||
/srv/pandora/bin/celery \
|
/srv/pandora/bin/python \
|
||||||
-A app worker \
|
/srv/pandora/pandora/manage.py \
|
||||||
-Q encoding -n ${name} \
|
celery worker \
|
||||||
--pidfile /run/pandora/encoding.pid \
|
-c 1 \
|
||||||
--max-tasks-per-child 500 \
|
-Q encoding -n $name \
|
||||||
-c 1 \
|
-l INFO
|
||||||
-l INFO
|
|
||||||
fi
|
fi
|
||||||
if [ "$action" = "tasks" ]; then
|
if [ "$action" = "tasks" ]; then
|
||||||
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
|
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
|
||||||
/srv/pandora_base/docker/wait-for rabbitmq 5672
|
/srv/pandora_base/docker/wait-for rabbitmq 5672
|
||||||
name=pandora-default-$(hostname)
|
name=pandora-default-$(hostname)
|
||||||
cd /srv/pandora/pandora
|
|
||||||
exec /usr/bin/sudo -u $user -E -H \
|
exec /usr/bin/sudo -u $user -E -H \
|
||||||
/srv/pandora/bin/celery \
|
/srv/pandora/bin/python \
|
||||||
-A app worker \
|
/srv/pandora/pandora/manage.py \
|
||||||
-Q default,celery -n ${name} \
|
celery worker \
|
||||||
--pidfile /run/pandora/tasks.pid \
|
-Q default,celery -n $name \
|
||||||
--max-tasks-per-child 1000 \
|
--maxtasksperchild 1000 \
|
||||||
-l INFO
|
-l INFO
|
||||||
fi
|
fi
|
||||||
if [ "$action" = "cron" ]; then
|
if [ "$action" = "cron" ]; then
|
||||||
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
|
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
|
||||||
/srv/pandora_base/docker/wait-for rabbitmq 5672
|
/srv/pandora_base/docker/wait-for rabbitmq 5672
|
||||||
cd /srv/pandora/pandora
|
|
||||||
exec /usr/bin/sudo -u $user -E -H \
|
exec /usr/bin/sudo -u $user -E -H \
|
||||||
/srv/pandora/bin/celery \
|
/srv/pandora/bin/python \
|
||||||
-A app beat \
|
/srv/pandora/pandora/manage.py \
|
||||||
-s /run/pandora/celerybeat-schedule \
|
celerybeat -s /run/pandora/celerybeat-schedule \
|
||||||
--pidfile /run/pandora/cron.pid \
|
--pidfile /run/pandora/cron.pid \
|
||||||
-l INFO
|
-l INFO
|
||||||
fi
|
fi
|
||||||
if [ "$action" = "websocketd" ]; then
|
if [ "$action" = "websocketd" ]; then
|
||||||
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
|
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
|
||||||
/srv/pandora_base/docker/wait-for rabbitmq 5672
|
/srv/pandora_base/docker/wait-for rabbitmq 5672
|
||||||
cd /srv/pandora/pandora
|
|
||||||
exec /usr/bin/sudo -u $user -E -H \
|
exec /usr/bin/sudo -u $user -E -H \
|
||||||
/srv/pandora/bin/python \
|
/srv/pandora/bin/python \
|
||||||
/srv/pandora/pandora/manage.py websocketd
|
/srv/pandora/pandora/manage.py websocketd
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# pan.do/ra management and update
|
# pan.do/ra management and update
|
||||||
if [ "$action" = "ctl" ]; then
|
if [ "$action" = "manage.py" ]; then
|
||||||
shift
|
shift
|
||||||
exec /srv/pandora/ctl "$@"
|
exec /usr/bin/sudo -u $user -E -H \
|
||||||
|
/srv/pandora/pandora/manage.py "$@"
|
||||||
|
fi
|
||||||
|
if [ "$action" = "update.py" ]; then
|
||||||
|
shift
|
||||||
|
exec /usr/bin/sudo -u $user -E -H \
|
||||||
|
/srv/pandora/update.py "$@"
|
||||||
fi
|
fi
|
||||||
if [ "$action" = "bash" ]; then
|
if [ "$action" = "bash" ]; then
|
||||||
shift
|
shift
|
||||||
|
@ -103,9 +102,9 @@ fi
|
||||||
# pan.do/ra setup hooks
|
# pan.do/ra setup hooks
|
||||||
if [ "$action" = "docker-compose.yml" ]; then
|
if [ "$action" = "docker-compose.yml" ]; then
|
||||||
cat /srv/pandora_base/docker-compose.yml | \
|
cat /srv/pandora_base/docker-compose.yml | \
|
||||||
sed "s#build: \.#image: code.0x2620.org/0x2620/pandora:latest#g" | \
|
sed "s#build: \.#image: 0x2620/pandora:latest#g" | \
|
||||||
sed "s#\./overlay:#.:#g" | \
|
sed "s#\./overlay:#.:#g" | \
|
||||||
sed "s#build: docker/nginx#image: code.0x2620.org/0x2620/pandora-nginx:latest#g"
|
sed "s#build: docker/nginx#image: 0x2620/pandora-nginx:latest#g"
|
||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
if [ "$action" = ".env" ]; then
|
if [ "$action" = ".env" ]; then
|
||||||
|
@ -131,5 +130,5 @@ echo " docker run 0x2620/pandora setup | sh"
|
||||||
echo
|
echo
|
||||||
echo adjust created files to match your needs and run:
|
echo adjust created files to match your needs and run:
|
||||||
echo
|
echo
|
||||||
echo " docker compose up"
|
echo " docker-compose up"
|
||||||
echo
|
echo
|
||||||
|
|
|
@ -56,9 +56,13 @@ cp /srv/pandora/docker/entrypoint.sh /entrypoint.sh
|
||||||
mv /srv/pandora/ /srv/pandora_base/
|
mv /srv/pandora/ /srv/pandora_base/
|
||||||
mkdir /pandora
|
mkdir /pandora
|
||||||
ln -s /pandora /srv/pandora
|
ln -s /pandora /srv/pandora
|
||||||
|
cat > /usr/local/bin/update.py << EOF
|
||||||
cat > /usr/local/bin/pandoractl << EOF
|
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
exec /srv/pandora/ctl \$@
|
exec /srv/pandora/update.py \$@
|
||||||
EOF
|
EOF
|
||||||
chmod +x /usr/local/bin/pandoractl
|
|
||||||
|
cat > /usr/local/bin/manage.py << EOF
|
||||||
|
#!/bin/sh
|
||||||
|
exec /srv/pandora/pandora/manage.py \$@
|
||||||
|
EOF
|
||||||
|
chmod +x /usr/local/bin/manage.py /usr/local/bin/update.py
|
||||||
|
|
|
@ -1,12 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
# push new version of pan.do/ra to code.0x2620.org
|
|
||||||
set -e
|
|
||||||
|
|
||||||
cd /tmp
|
|
||||||
git clone https://code.0x2620.org/0x2620/pandora
|
|
||||||
cd pandora
|
|
||||||
./docker/build.sh
|
|
||||||
|
|
||||||
docker push code.0x2620.org/0x2620/pandora-base:latest
|
|
||||||
docker push code.0x2620.org/0x2620/pandora-nginx:latest
|
|
||||||
docker push code.0x2620.org/0x2620/pandora:latest
|
|
|
@ -1,18 +1,18 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
docker run --rm code.0x2620.org/0x2620/pandora docker-compose.yml > docker-compose.yml
|
docker run 0x2620/pandora docker-compose.yml > docker-compose.yml
|
||||||
if [ ! -e .env ]; then
|
if [ ! -e .env ]; then
|
||||||
docker run --rm code.0x2620.org/0x2620/pandora .env > .env
|
docker run 0x2620/pandora .env > .env
|
||||||
echo .env >> .gitignore
|
echo .env >> .gitignore
|
||||||
fi
|
fi
|
||||||
if [ ! -e config.jsonc ]; then
|
if [ ! -e config.jsonc ]; then
|
||||||
docker run --rm code.0x2620.org/0x2620/pandora config.jsonc > config.jsonc
|
docker run 0x2620/pandora config.jsonc > config.jsonc
|
||||||
fi
|
fi
|
||||||
cat > README.md << EOF
|
cat > README.md << EOF
|
||||||
pan.do/ra docker instance
|
pan.do/ra docker instance
|
||||||
|
|
||||||
this folder was created with
|
this folder was created with
|
||||||
|
|
||||||
docker run --rm code.0x2620.org/0x2620/pandora setup | sh
|
docker run 0x2620/pandora setup | sh
|
||||||
|
|
||||||
To start pan.do/ra adjust the files in this folder:
|
To start pan.do/ra adjust the files in this folder:
|
||||||
|
|
||||||
|
@ -22,14 +22,11 @@ To start pan.do/ra adjust the files in this folder:
|
||||||
|
|
||||||
and to get started run this:
|
and to get started run this:
|
||||||
|
|
||||||
docker compose up -d
|
docker-compose up -d
|
||||||
|
|
||||||
To update pan.do/ra run:
|
To update pan.do/ra run:
|
||||||
|
|
||||||
docker compose run --rm pandora ctl update
|
docker-compose run pandora update.py
|
||||||
|
|
||||||
To run pan.do/ra manage shell:
|
|
||||||
|
|
||||||
docker compose run --rm pandora ctl manage shell
|
|
||||||
EOF
|
EOF
|
||||||
touch __init__.py
|
touch __init__.py
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
TIMEOUT=180
|
TIMEOUT=60
|
||||||
TARGET="$1"
|
TARGET="$1"
|
||||||
|
|
||||||
for i in `seq $TIMEOUT` ; do
|
for i in `seq $TIMEOUT` ; do
|
||||||
|
|
|
@ -17,7 +17,6 @@ server {
|
||||||
#server_name pandora.YOURDOMAIN.COM;
|
#server_name pandora.YOURDOMAIN.COM;
|
||||||
|
|
||||||
listen 80 default;
|
listen 80 default;
|
||||||
listen [::]:80 default;
|
|
||||||
|
|
||||||
access_log /var/log/nginx/pandora.access.log;
|
access_log /var/log/nginx/pandora.access.log;
|
||||||
error_log /var/log/nginx/pandora.error.log;
|
error_log /var/log/nginx/pandora.error.log;
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
pandora ALL=(ALL:ALL) NOPASSWD:/usr/local/bin/pandoractl
|
|
|
@ -11,7 +11,7 @@ PIDFile=/run/pandora/cron.pid
|
||||||
WorkingDirectory=/srv/pandora/pandora
|
WorkingDirectory=/srv/pandora/pandora
|
||||||
ExecStart=/srv/pandora/bin/celery \
|
ExecStart=/srv/pandora/bin/celery \
|
||||||
-A app beat \
|
-A app beat \
|
||||||
--scheduler django_celery_beat.schedulers:DatabaseScheduler \
|
-s /run/pandora/celerybeat-schedule \
|
||||||
--pidfile /run/pandora/cron.pid \
|
--pidfile /run/pandora/cron.pid \
|
||||||
-l INFO
|
-l INFO
|
||||||
ExecReload=/bin/kill -HUP $MAINPID
|
ExecReload=/bin/kill -HUP $MAINPID
|
||||||
|
|
|
@ -7,16 +7,14 @@ Type=simple
|
||||||
Restart=always
|
Restart=always
|
||||||
User=pandora
|
User=pandora
|
||||||
Group=pandora
|
Group=pandora
|
||||||
EnvironmentFile=/srv/pandora/pandora/encoding.conf
|
|
||||||
PIDFile=/run/pandora/encoding.pid
|
PIDFile=/run/pandora/encoding.pid
|
||||||
WorkingDirectory=/srv/pandora/pandora
|
WorkingDirectory=/srv/pandora/pandora
|
||||||
ExecStart=/srv/pandora/bin/celery \
|
ExecStart=/srv/pandora/bin/celery \
|
||||||
-A app worker \
|
-A app worker \
|
||||||
-Q encoding -n pandora-encoding \
|
-Q encoding -n pandora-encoding \
|
||||||
--pidfile /run/pandora/encoding.pid \
|
--pidfile /run/pandora/encoding.pid \
|
||||||
-c $CONCURRENCY \
|
--maxtasksperchild 500 \
|
||||||
--max-tasks-per-child $MAX_TASKS_PER_CHILD \
|
-l INFO
|
||||||
-l $LOGLEVEL
|
|
||||||
ExecReload=/bin/kill -TERM $MAINPID
|
ExecReload=/bin/kill -TERM $MAINPID
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
|
|
|
@ -7,16 +7,14 @@ Type=simple
|
||||||
Restart=always
|
Restart=always
|
||||||
User=pandora
|
User=pandora
|
||||||
Group=pandora
|
Group=pandora
|
||||||
EnvironmentFile=/srv/pandora/pandora/tasks.conf
|
|
||||||
PIDFile=/run/pandora/tasks.pid
|
PIDFile=/run/pandora/tasks.pid
|
||||||
WorkingDirectory=/srv/pandora/pandora
|
WorkingDirectory=/srv/pandora/pandora
|
||||||
ExecStart=/srv/pandora/bin/celery \
|
ExecStart=/srv/pandora/bin/celery \
|
||||||
-A app worker \
|
-A app worker \
|
||||||
-Q default,celery -n pandora-default \
|
-Q default,celery -n pandora-default \
|
||||||
--pidfile /run/pandora/tasks.pid \
|
--pidfile /run/pandora/tasks.pid \
|
||||||
-c $CONCURRENCY \
|
--maxtasksperchild 1000 \
|
||||||
--max-tasks-per-child $MAX_TASKS_PER_CHILD \
|
-l INFO
|
||||||
-l $LOGLEVEL
|
|
||||||
ExecReload=/bin/kill -TERM $MAINPID
|
ExecReload=/bin/kill -TERM $MAINPID
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
|
|
|
@ -1,6 +0,0 @@
|
||||||
from django.apps import AppConfig
|
|
||||||
|
|
||||||
|
|
||||||
class AnnotationConfig(AppConfig):
|
|
||||||
default_auto_field = "django.db.models.BigAutoField"
|
|
||||||
name = 'annotation'
|
|
|
@ -27,7 +27,6 @@ class Command(BaseCommand):
|
||||||
parser.add_argument('username', help='username')
|
parser.add_argument('username', help='username')
|
||||||
parser.add_argument('item', help='item')
|
parser.add_argument('item', help='item')
|
||||||
parser.add_argument('layer', help='layer')
|
parser.add_argument('layer', help='layer')
|
||||||
parser.add_argument('language', help='language', default="")
|
|
||||||
parser.add_argument('filename', help='filename.srt')
|
parser.add_argument('filename', help='filename.srt')
|
||||||
|
|
||||||
def handle(self, *args, **options):
|
def handle(self, *args, **options):
|
||||||
|
@ -35,7 +34,6 @@ class Command(BaseCommand):
|
||||||
public_id = options['item']
|
public_id = options['item']
|
||||||
layer_id = options['layer']
|
layer_id = options['layer']
|
||||||
filename = options['filename']
|
filename = options['filename']
|
||||||
language = options.get("language")
|
|
||||||
|
|
||||||
user = User.objects.get(username=username)
|
user = User.objects.get(username=username)
|
||||||
item = Item.objects.get(public_id=public_id)
|
item = Item.objects.get(public_id=public_id)
|
||||||
|
@ -49,9 +47,6 @@ class Command(BaseCommand):
|
||||||
for i in range(len(annotations)-1):
|
for i in range(len(annotations)-1):
|
||||||
if annotations[i]['out'] == annotations[i+1]['in']:
|
if annotations[i]['out'] == annotations[i+1]['in']:
|
||||||
annotations[i]['out'] = annotations[i]['out'] - 0.001
|
annotations[i]['out'] = annotations[i]['out'] - 0.001
|
||||||
if language:
|
|
||||||
for annotation in annotations:
|
|
||||||
annotation["value"] = '<span lang="%s">%s</span>' % (language, annotation["value"])
|
|
||||||
tasks.add_annotations.delay({
|
tasks.add_annotations.delay({
|
||||||
'item': item.public_id,
|
'item': item.public_id,
|
||||||
'layer': layer_id,
|
'layer': layer_id,
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
# Generated by Django 4.2.3 on 2023-07-27 21:24
|
|
||||||
|
|
||||||
from django.db import migrations, models
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('annotation', '0003_auto_20160219_1537'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='annotation',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -163,25 +163,28 @@ class Annotation(models.Model):
|
||||||
self.sortvalue = None
|
self.sortvalue = None
|
||||||
self.languages = None
|
self.languages = None
|
||||||
|
|
||||||
if not self.clip or self.start != self.clip.start or self.end != self.clip.end:
|
|
||||||
self.clip, created = Clip.get_or_create(self.item, self.start, self.end)
|
|
||||||
|
|
||||||
with transaction.atomic():
|
with transaction.atomic():
|
||||||
|
if not self.clip or self.start != self.clip.start or self.end != self.clip.end:
|
||||||
|
self.clip, created = Clip.get_or_create(self.item, self.start, self.end)
|
||||||
|
|
||||||
if set_public_id:
|
if set_public_id:
|
||||||
self.set_public_id()
|
self.set_public_id()
|
||||||
|
|
||||||
super(Annotation, self).save(*args, **kwargs)
|
super(Annotation, self).save(*args, **kwargs)
|
||||||
|
|
||||||
if self.clip:
|
if self.clip:
|
||||||
self.clip.update_findvalue()
|
Clip.objects.filter(**{
|
||||||
setattr(self.clip, self.layer, True)
|
'id': self.clip.id,
|
||||||
self.clip.save(update_fields=[self.layer, 'sortvalue', 'findvalue'])
|
self.layer: False
|
||||||
|
}).update(**{self.layer: True})
|
||||||
|
# update clip.findvalue
|
||||||
|
self.clip.save()
|
||||||
|
|
||||||
# update matches in bulk if called from load_subtitles
|
# update matches in bulk if called from load_subtitles
|
||||||
if not delay_matches:
|
if not delay_matches:
|
||||||
self.update_matches()
|
self.update_matches()
|
||||||
self.update_documents()
|
self.update_documents()
|
||||||
self.update_translations()
|
self.update_translations()
|
||||||
|
|
||||||
def update_matches(self):
|
def update_matches(self):
|
||||||
from place.models import Place
|
from place.models import Place
|
||||||
|
@ -264,10 +267,7 @@ class Annotation(models.Model):
|
||||||
from translation.models import Translation
|
from translation.models import Translation
|
||||||
layer = self.get_layer()
|
layer = self.get_layer()
|
||||||
if layer.get('translate'):
|
if layer.get('translate'):
|
||||||
for lang in settings.CONFIG['languages']:
|
Translation.objects.get_or_create(lang=lang, key=self.value, defaults={'type': Translation.CONTENT})
|
||||||
if lang == settings.CONFIG['language']:
|
|
||||||
continue
|
|
||||||
Translation.objects.get_or_create(lang=lang, key=self.value, defaults={'type': Translation.CONTENT})
|
|
||||||
|
|
||||||
def delete(self, *args, **kwargs):
|
def delete(self, *args, **kwargs):
|
||||||
with transaction.atomic():
|
with transaction.atomic():
|
||||||
|
|
|
@ -5,12 +5,12 @@ from django.contrib.auth import get_user_model
|
||||||
from django.db import transaction
|
from django.db import transaction
|
||||||
|
|
||||||
import ox
|
import ox
|
||||||
from app.celery import app
|
from celery.task import task
|
||||||
|
|
||||||
from .models import Annotation
|
from .models import Annotation
|
||||||
|
|
||||||
|
|
||||||
@app.task(ignore_results=False, queue='default')
|
@task(ignore_results=False, queue='default')
|
||||||
def add_annotations(data):
|
def add_annotations(data):
|
||||||
from item.models import Item
|
from item.models import Item
|
||||||
from entity.models import Entity
|
from entity.models import Entity
|
||||||
|
@ -51,7 +51,7 @@ def add_annotations(data):
|
||||||
annotation.item.update_facets()
|
annotation.item.update_facets()
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@app.task(ignore_results=True, queue='default')
|
@task(ignore_results=True, queue='default')
|
||||||
def update_item(id, force=False):
|
def update_item(id, force=False):
|
||||||
from item.models import Item
|
from item.models import Item
|
||||||
from clip.models import Clip
|
from clip.models import Clip
|
||||||
|
@ -72,7 +72,7 @@ def update_item(id, force=False):
|
||||||
a.item.save()
|
a.item.save()
|
||||||
|
|
||||||
|
|
||||||
@app.task(ignore_results=True, queue='default')
|
@task(ignore_results=True, queue='default')
|
||||||
def update_annotations(layers, value):
|
def update_annotations(layers, value):
|
||||||
items = {}
|
items = {}
|
||||||
|
|
||||||
|
|
|
@ -183,7 +183,7 @@ def addAnnotation(request, data):
|
||||||
|
|
||||||
layer_id = data['layer']
|
layer_id = data['layer']
|
||||||
layer = get_by_id(settings.CONFIG['layers'], layer_id)
|
layer = get_by_id(settings.CONFIG['layers'], layer_id)
|
||||||
if layer['canAddAnnotations'].get(request.user.profile.get_level()) or item.editable(request.user):
|
if layer['canAddAnnotations'].get(request.user.profile.get_level()):
|
||||||
if layer['type'] == 'entity':
|
if layer['type'] == 'entity':
|
||||||
try:
|
try:
|
||||||
value = Entity.get_by_name(ox.decode_html(data['value']), layer['entity']).get_id()
|
value = Entity.get_by_name(ox.decode_html(data['value']), layer['entity']).get_id()
|
||||||
|
@ -241,7 +241,8 @@ def addAnnotations(request, data):
|
||||||
|
|
||||||
layer_id = data['layer']
|
layer_id = data['layer']
|
||||||
layer = get_by_id(settings.CONFIG['layers'], layer_id)
|
layer = get_by_id(settings.CONFIG['layers'], layer_id)
|
||||||
if item.editable(request.user):
|
if item.editable(request.user) \
|
||||||
|
and layer['canAddAnnotations'].get(request.user.profile.get_level()):
|
||||||
response = json_response()
|
response = json_response()
|
||||||
data['user'] = request.user.username
|
data['user'] = request.user.username
|
||||||
t = add_annotations.delay(data)
|
t = add_annotations.delay(data)
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
from django.apps import AppConfig
|
|
||||||
|
|
||||||
|
|
||||||
class AppConfig(AppConfig):
|
|
||||||
default_auto_field = "django.db.models.BigAutoField"
|
|
||||||
name = 'app'
|
|
||||||
|
|
|
@ -6,8 +6,16 @@ root_dir = os.path.normpath(os.path.abspath(os.path.dirname(__file__)))
|
||||||
root_dir = os.path.dirname(root_dir)
|
root_dir = os.path.dirname(root_dir)
|
||||||
os.chdir(root_dir)
|
os.chdir(root_dir)
|
||||||
|
|
||||||
|
# set the default Django settings module for the 'celery' program.
|
||||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
|
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
|
||||||
|
|
||||||
app = Celery('pandora', broker_connection_retry_on_startup=True)
|
app = Celery('pandora')
|
||||||
|
|
||||||
|
# Using a string here means the worker doesn't have to serialize
|
||||||
|
# the configuration object to child processes.
|
||||||
|
# - namespace='CELERY' means all celery-related configuration keys
|
||||||
|
# should have a `CELERY_` prefix.
|
||||||
app.config_from_object('django.conf:settings', namespace='CELERY')
|
app.config_from_object('django.conf:settings', namespace='CELERY')
|
||||||
|
|
||||||
|
# Load task modules from all registered Django app configs.
|
||||||
app.autodiscover_tasks()
|
app.autodiscover_tasks()
|
||||||
|
|
|
@ -24,6 +24,9 @@ User = get_user_model()
|
||||||
|
|
||||||
_win = (sys.platform == "win32")
|
_win = (sys.platform == "win32")
|
||||||
|
|
||||||
|
RUN_RELOADER = True
|
||||||
|
NOTIFIER = None
|
||||||
|
|
||||||
def get_version():
|
def get_version():
|
||||||
git_dir = join(dirname(dirname(dirname(__file__))), '.git')
|
git_dir = join(dirname(dirname(dirname(__file__))), '.git')
|
||||||
if exists(git_dir):
|
if exists(git_dir):
|
||||||
|
@ -133,13 +136,7 @@ def load_config(init=False):
|
||||||
added = []
|
added = []
|
||||||
for key in sorted(d):
|
for key in sorted(d):
|
||||||
if key not in c:
|
if key not in c:
|
||||||
if key not in (
|
added.append("\"%s\": %s," % (key, json.dumps(d[key])))
|
||||||
'hidden',
|
|
||||||
'find',
|
|
||||||
'findDocuments',
|
|
||||||
'videoPoints',
|
|
||||||
):
|
|
||||||
added.append("\"%s\": %s," % (key, json.dumps(d[key])))
|
|
||||||
c[key] = d[key]
|
c[key] = d[key]
|
||||||
if added:
|
if added:
|
||||||
sys.stderr.write("adding default %s:\n\t" % section)
|
sys.stderr.write("adding default %s:\n\t" % section)
|
||||||
|
@ -260,6 +257,46 @@ check the README for further details.
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def reloader_thread():
|
||||||
|
global NOTIFIER
|
||||||
|
settings.RELOADER_RUNNING=True
|
||||||
|
_config_mtime = 0
|
||||||
|
try:
|
||||||
|
import pyinotify
|
||||||
|
INOTIFY = True
|
||||||
|
except:
|
||||||
|
INOTIFY = False
|
||||||
|
if INOTIFY:
|
||||||
|
def add_watch():
|
||||||
|
name = os.path.realpath(settings.SITE_CONFIG)
|
||||||
|
wm.add_watch(name, pyinotify.IN_CLOSE_WRITE, reload_config)
|
||||||
|
|
||||||
|
def reload_config(event):
|
||||||
|
load_config()
|
||||||
|
add_watch()
|
||||||
|
|
||||||
|
wm = pyinotify.WatchManager()
|
||||||
|
add_watch()
|
||||||
|
notifier = pyinotify.Notifier(wm)
|
||||||
|
NOTIFIER = notifier
|
||||||
|
notifier.loop()
|
||||||
|
else:
|
||||||
|
while RUN_RELOADER:
|
||||||
|
try:
|
||||||
|
stat = os.stat(settings.SITE_CONFIG)
|
||||||
|
mtime = stat.st_mtime
|
||||||
|
if _win:
|
||||||
|
mtime -= stat.st_ctime
|
||||||
|
if mtime > _config_mtime:
|
||||||
|
load_config()
|
||||||
|
_config_mtime = mtime
|
||||||
|
time.sleep(10)
|
||||||
|
except:
|
||||||
|
#sys.stderr.write("reloading config failed\n")
|
||||||
|
pass
|
||||||
|
|
||||||
def update_static():
|
def update_static():
|
||||||
oxjs_build = os.path.join(settings.STATIC_ROOT, 'oxjs/tools/build/build.py')
|
oxjs_build = os.path.join(settings.STATIC_ROOT, 'oxjs/tools/build/build.py')
|
||||||
if os.path.exists(oxjs_build):
|
if os.path.exists(oxjs_build):
|
||||||
|
@ -327,11 +364,7 @@ def update_static():
|
||||||
#locale
|
#locale
|
||||||
for f in sorted(glob(os.path.join(settings.STATIC_ROOT, 'json/locale.pandora.*.json'))):
|
for f in sorted(glob(os.path.join(settings.STATIC_ROOT, 'json/locale.pandora.*.json'))):
|
||||||
with open(f) as fd:
|
with open(f) as fd:
|
||||||
try:
|
locale = json.load(fd)
|
||||||
locale = json.load(fd)
|
|
||||||
except:
|
|
||||||
print("failed to parse %s" % f)
|
|
||||||
raise
|
|
||||||
site_locale = f.replace('locale.pandora', 'locale.' + settings.CONFIG['site']['id'])
|
site_locale = f.replace('locale.pandora', 'locale.' + settings.CONFIG['site']['id'])
|
||||||
locale_file = f.replace('locale.pandora', 'locale')
|
locale_file = f.replace('locale.pandora', 'locale')
|
||||||
print('write', locale_file)
|
print('write', locale_file)
|
||||||
|
@ -374,4 +407,17 @@ def update_geoip(force=False):
|
||||||
print('failed to download GeoLite2-City.mmdb')
|
print('failed to download GeoLite2-City.mmdb')
|
||||||
|
|
||||||
def init():
|
def init():
|
||||||
load_config(True)
|
if not settings.RELOADER_RUNNING:
|
||||||
|
load_config(True)
|
||||||
|
if settings.RELOAD_CONFIG:
|
||||||
|
thread.start_new_thread(reloader_thread, ())
|
||||||
|
|
||||||
|
def shutdown():
|
||||||
|
if settings.RELOADER_RUNNING:
|
||||||
|
RUN_RELOADER = False
|
||||||
|
settings.RELOADER_RUNNING = False
|
||||||
|
if NOTIFIER:
|
||||||
|
NOTIFIER.stop()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -11,8 +11,6 @@ def run(cmd):
|
||||||
stdout, stderr = p.communicate()
|
stdout, stderr = p.communicate()
|
||||||
|
|
||||||
if p.returncode != 0:
|
if p.returncode != 0:
|
||||||
print('failed to run:', cmd)
|
|
||||||
print(stdout)
|
|
||||||
print(stderr)
|
print(stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
# Generated by Django 4.2.3 on 2023-07-27 21:24
|
|
||||||
|
|
||||||
from django.db import migrations, models
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('app', '0001_initial'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='page',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='settings',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -1,34 +0,0 @@
|
||||||
import unicodedata
|
|
||||||
|
|
||||||
from django.contrib.auth import get_user_model
|
|
||||||
|
|
||||||
import mozilla_django_oidc.auth
|
|
||||||
|
|
||||||
from user.utils import prepare_user
|
|
||||||
|
|
||||||
User = get_user_model()
|
|
||||||
|
|
||||||
|
|
||||||
class OIDCAuthenticationBackend(mozilla_django_oidc.auth.OIDCAuthenticationBackend):
|
|
||||||
def create_user(self, claims):
|
|
||||||
user = super(OIDCAuthenticationBackend, self).create_user(claims)
|
|
||||||
username = claims.get("preferred_username")
|
|
||||||
n = 1
|
|
||||||
if username and username != user.username:
|
|
||||||
uname = username
|
|
||||||
while User.objects.filter(username=uname).exclude(id=user.id).exists():
|
|
||||||
n += 1
|
|
||||||
uname = '%s (%s)' % (username, n)
|
|
||||||
user.username = uname
|
|
||||||
user.save()
|
|
||||||
prepare_user(user)
|
|
||||||
return user
|
|
||||||
|
|
||||||
def update_user(self, user, claims):
|
|
||||||
print("update user", user, claims)
|
|
||||||
#user.save()
|
|
||||||
return user
|
|
||||||
|
|
||||||
|
|
||||||
def generate_username(email):
|
|
||||||
return unicodedata.normalize('NFKC', email)[:150]
|
|
|
@ -2,16 +2,13 @@
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
from app.celery import app
|
from celery.task import periodic_task
|
||||||
from celery.schedules import crontab
|
from celery.schedules import crontab
|
||||||
|
|
||||||
@app.task(queue='encoding')
|
|
||||||
|
@periodic_task(run_every=crontab(hour=6, minute=0), queue='encoding')
|
||||||
def cron(**kwargs):
|
def cron(**kwargs):
|
||||||
from django.db import transaction
|
from django.db import transaction
|
||||||
from django.contrib.sessions.models import Session
|
from django.contrib.sessions.models import Session
|
||||||
Session.objects.filter(expire_date__lt=datetime.datetime.now()).delete()
|
Session.objects.filter(expire_date__lt=datetime.datetime.now()).delete()
|
||||||
transaction.commit()
|
transaction.commit()
|
||||||
|
|
||||||
@app.on_after_finalize.connect
|
|
||||||
def setup_periodic_tasks(sender, **kwargs):
|
|
||||||
sender.add_periodic_task(crontab(hour=6, minute=0), cron.s())
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
from datetime import datetime
|
|
||||||
import base64
|
|
||||||
import copy
|
import copy
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
from django.shortcuts import render, redirect
|
from django.shortcuts import render, redirect
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
@ -53,18 +53,17 @@ def embed(request, id):
|
||||||
})
|
})
|
||||||
|
|
||||||
def redirect_url(request, url):
|
def redirect_url(request, url):
|
||||||
try:
|
if request.META['QUERY_STRING']:
|
||||||
url = base64.decodebytes(url.encode()).decode()
|
url += "?" + request.META['QUERY_STRING']
|
||||||
except:
|
|
||||||
pass
|
|
||||||
if settings.CONFIG['site'].get('sendReferrer', False):
|
if settings.CONFIG['site'].get('sendReferrer', False):
|
||||||
return redirect(url)
|
return redirect(url)
|
||||||
else:
|
else:
|
||||||
return HttpResponse('<script>document.location.href=%s;</script>' % json.dumps(url))
|
return HttpResponse('<script>document.location.href=%s;</script>'%json.dumps(url))
|
||||||
|
|
||||||
def opensearch_xml(request):
|
def opensearch_xml(request):
|
||||||
osd = ET.Element('OpenSearchDescription')
|
osd = ET.Element('OpenSearchDescription')
|
||||||
osd.attrib['xmlns'] = "http://a9.com/-/spec/opensearch/1.1/"
|
osd.attrib['xmlns']="http://a9.com/-/spec/opensearch/1.1/"
|
||||||
e = ET.SubElement(osd, 'ShortName')
|
e = ET.SubElement(osd, 'ShortName')
|
||||||
e.text = settings.SITENAME
|
e.text = settings.SITENAME
|
||||||
e = ET.SubElement(osd, 'Description')
|
e = ET.SubElement(osd, 'Description')
|
||||||
|
@ -163,7 +162,7 @@ def init(request, data):
|
||||||
del config['keys']
|
del config['keys']
|
||||||
|
|
||||||
if 'HTTP_ACCEPT_LANGUAGE' in request.META:
|
if 'HTTP_ACCEPT_LANGUAGE' in request.META:
|
||||||
response['data']['locale'] = request.META['HTTP_ACCEPT_LANGUAGE'].split(';')[0].split('-')[0].split(',')[0]
|
response['data']['locale'] = request.META['HTTP_ACCEPT_LANGUAGE'].split(';')[0].split('-')[0]
|
||||||
|
|
||||||
if request.META.get('HTTP_X_PREFIX') == 'NO':
|
if request.META.get('HTTP_X_PREFIX') == 'NO':
|
||||||
config['site']['videoprefix'] = ''
|
config['site']['videoprefix'] = ''
|
||||||
|
@ -184,7 +183,6 @@ def init(request, data):
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
config['site']['oidc'] = bool(getattr(settings, 'OIDC_RP_CLIENT_ID', False))
|
|
||||||
response['data']['site'] = config
|
response['data']['site'] = config
|
||||||
response['data']['user'] = init_user(request.user, request)
|
response['data']['user'] = init_user(request.user, request)
|
||||||
request.session['last_init'] = str(datetime.now())
|
request.session['last_init'] = str(datetime.now())
|
||||||
|
@ -247,7 +245,7 @@ def getEmbedDefaults(request, data):
|
||||||
i = qs[0].cache
|
i = qs[0].cache
|
||||||
response['data']['item'] = i['id']
|
response['data']['item'] = i['id']
|
||||||
response['data']['itemDuration'] = i['duration']
|
response['data']['itemDuration'] = i['duration']
|
||||||
response['data']['itemRatio'] = i.get('videoRatio', settings.CONFIG['video']['previewRatio'])
|
response['data']['itemRatio'] = i['videoRatio']
|
||||||
qs = List.objects.exclude(status='private').order_by('name')
|
qs = List.objects.exclude(status='private').order_by('name')
|
||||||
if qs.exists():
|
if qs.exists():
|
||||||
i = qs[0].json()
|
i = qs[0].json()
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
from django.apps import AppConfig
|
|
||||||
|
|
||||||
|
|
||||||
class ArchiveConfig(AppConfig):
|
|
||||||
default_auto_field = "django.db.models.BigAutoField"
|
|
||||||
name = 'archive'
|
|
||||||
|
|
|
@ -1,11 +1,10 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import shutil
|
||||||
import tempfile
|
import tempfile
|
||||||
|
import os
|
||||||
|
|
||||||
import ox
|
import ox
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
@ -15,9 +14,6 @@ from item.tasks import load_subtitles
|
||||||
|
|
||||||
from . import models
|
from . import models
|
||||||
|
|
||||||
logger = logging.getLogger('pandora.' + __name__)
|
|
||||||
|
|
||||||
|
|
||||||
info_keys = [
|
info_keys = [
|
||||||
'title',
|
'title',
|
||||||
'description',
|
'description',
|
||||||
|
@ -40,14 +36,8 @@ info_key_map = {
|
||||||
'display_id': 'id',
|
'display_id': 'id',
|
||||||
}
|
}
|
||||||
|
|
||||||
YT_DLP = ['yt-dlp']
|
def get_info(url):
|
||||||
if settings.YT_DLP_EXTRA:
|
cmd = ['youtube-dl', '-j', '--all-subs', url]
|
||||||
YT_DLP += settings.YT_DLP_EXTRA
|
|
||||||
|
|
||||||
def get_info(url, referer=None):
|
|
||||||
cmd = YT_DLP + ['-j', '--all-subs', url]
|
|
||||||
if referer:
|
|
||||||
cmd += ['--referer', referer]
|
|
||||||
p = subprocess.Popen(cmd,
|
p = subprocess.Popen(cmd,
|
||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
stderr=subprocess.PIPE, close_fds=True)
|
stderr=subprocess.PIPE, close_fds=True)
|
||||||
|
@ -67,8 +57,6 @@ def get_info(url, referer=None):
|
||||||
info[-1]['tags'] = []
|
info[-1]['tags'] = []
|
||||||
if 'upload_date' in i and i['upload_date']:
|
if 'upload_date' in i and i['upload_date']:
|
||||||
info[-1]['date'] = '-'.join([i['upload_date'][:4], i['upload_date'][4:6], i['upload_date'][6:]])
|
info[-1]['date'] = '-'.join([i['upload_date'][:4], i['upload_date'][4:6], i['upload_date'][6:]])
|
||||||
if 'referer' not in info[-1]:
|
|
||||||
info[-1]['referer'] = url
|
|
||||||
return info
|
return info
|
||||||
|
|
||||||
def add_subtitles(item, media, tmp):
|
def add_subtitles(item, media, tmp):
|
||||||
|
@ -96,18 +84,9 @@ def add_subtitles(item, media, tmp):
|
||||||
sub.selected = True
|
sub.selected = True
|
||||||
sub.save()
|
sub.save()
|
||||||
|
|
||||||
def load_formats(url):
|
def download(item_id, url):
|
||||||
cmd = YT_DLP + ['-q', url, '-j', '-F']
|
|
||||||
p = subprocess.Popen(cmd,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.PIPE, close_fds=True)
|
|
||||||
stdout, stderr = p.communicate()
|
|
||||||
formats = stdout.decode().strip().split('\n')[-1]
|
|
||||||
return json.loads(formats)
|
|
||||||
|
|
||||||
def download(item_id, url, referer=None):
|
|
||||||
item = Item.objects.get(public_id=item_id)
|
item = Item.objects.get(public_id=item_id)
|
||||||
info = get_info(url, referer)
|
info = get_info(url)
|
||||||
if not len(info):
|
if not len(info):
|
||||||
return '%s contains no videos' % url
|
return '%s contains no videos' % url
|
||||||
media = info[0]
|
media = info[0]
|
||||||
|
@ -116,19 +95,13 @@ def download(item_id, url, referer=None):
|
||||||
if isinstance(tmp, bytes):
|
if isinstance(tmp, bytes):
|
||||||
tmp = tmp.decode('utf-8')
|
tmp = tmp.decode('utf-8')
|
||||||
os.chdir(tmp)
|
os.chdir(tmp)
|
||||||
cmd = YT_DLP + ['-q', media['url']]
|
cmd = ['youtube-dl', '-q', media['url']]
|
||||||
if referer:
|
if settings.CONFIG['video'].get('reuseUload', False):
|
||||||
cmd += ['--referer', referer]
|
|
||||||
elif 'referer' in media:
|
|
||||||
cmd += ['--referer', media['referer']]
|
|
||||||
cmd += ['-o', '%(title)80s.%(ext)s']
|
|
||||||
|
|
||||||
if settings.CONFIG['video'].get('reuseUpload', False):
|
|
||||||
max_resolution = max(settings.CONFIG['video']['resolutions'])
|
max_resolution = max(settings.CONFIG['video']['resolutions'])
|
||||||
format = settings.CONFIG['video']['formats'][0]
|
format = settings.CONFIG['video']['formats'][0]
|
||||||
if format == 'mp4':
|
if format == 'mp4':
|
||||||
cmd += [
|
cmd += [
|
||||||
'-f', 'bestvideo[height<=%s][ext=mp4]+bestaudio[ext=m4a]' % max_resolution,
|
'-f', 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/bestvideo+bestaudio',
|
||||||
'--merge-output-format', 'mp4'
|
'--merge-output-format', 'mp4'
|
||||||
]
|
]
|
||||||
elif format == 'webm':
|
elif format == 'webm':
|
||||||
|
@ -138,50 +111,6 @@ def download(item_id, url, referer=None):
|
||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
stderr=subprocess.PIPE, close_fds=True)
|
stderr=subprocess.PIPE, close_fds=True)
|
||||||
stdout, stderr = p.communicate()
|
stdout, stderr = p.communicate()
|
||||||
if stderr and b'Requested format is not available.' in stderr:
|
|
||||||
formats = load_formats(url)
|
|
||||||
has_audio = bool([fmt for fmt in formats['formats'] if fmt['resolution'] == 'audio only'])
|
|
||||||
has_video = bool([fmt for fmt in formats['formats'] if 'x' in fmt['resolution']])
|
|
||||||
|
|
||||||
cmd = [
|
|
||||||
'yt-dlp', '-q', url,
|
|
||||||
'-o', '%(title)80s.%(ext)s'
|
|
||||||
]
|
|
||||||
if referer:
|
|
||||||
cmd += ['--referer', referer]
|
|
||||||
elif 'referer' in media:
|
|
||||||
cmd += ['--referer', media['referer']]
|
|
||||||
if has_video and not has_audio:
|
|
||||||
cmd += [
|
|
||||||
'-f', 'bestvideo[height<=%s][ext=mp4]' % max_resolution,
|
|
||||||
]
|
|
||||||
elif not has_video and has_audio:
|
|
||||||
cmd += [
|
|
||||||
'bestaudio[ext=m4a]'
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
cmd = []
|
|
||||||
if cmd:
|
|
||||||
p = subprocess.Popen(cmd,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.PIPE, close_fds=True)
|
|
||||||
stdout, stderr = p.communicate()
|
|
||||||
if stderr and b'Requested format is not available.' in stderr:
|
|
||||||
cmd = [
|
|
||||||
'yt-dlp', '-q', url,
|
|
||||||
'-o', '%(title)80s.%(ext)s'
|
|
||||||
]
|
|
||||||
if referer:
|
|
||||||
cmd += ['--referer', referer]
|
|
||||||
elif 'referer' in media:
|
|
||||||
cmd += ['--referer', media['referer']]
|
|
||||||
if cmd:
|
|
||||||
p = subprocess.Popen(cmd,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.PIPE, close_fds=True)
|
|
||||||
stdout, stderr = p.communicate()
|
|
||||||
if stdout or stderr:
|
|
||||||
logger.error("import failed:\n%s\n%s\n%s", " ".join(cmd), stdout.decode(), stderr.decode())
|
|
||||||
parts = list(os.listdir(tmp))
|
parts = list(os.listdir(tmp))
|
||||||
if parts:
|
if parts:
|
||||||
part = 1
|
part = 1
|
||||||
|
@ -209,7 +138,6 @@ def download(item_id, url, referer=None):
|
||||||
f.extract_stream()
|
f.extract_stream()
|
||||||
status = True
|
status = True
|
||||||
else:
|
else:
|
||||||
logger.error("failed to import %s file already exists %s", url, oshash)
|
|
||||||
status = 'file exists'
|
status = 'file exists'
|
||||||
if len(parts) == 1:
|
if len(parts) == 1:
|
||||||
add_subtitles(f.item, media, tmp)
|
add_subtitles(f.item, media, tmp)
|
||||||
|
|
|
@ -1,30 +1,26 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from distutils.spawn import find_executable
|
|
||||||
from glob import glob
|
|
||||||
from os.path import exists
|
|
||||||
import fractions
|
|
||||||
import logging
|
|
||||||
import math
|
|
||||||
import os
|
import os
|
||||||
import re
|
from os.path import exists
|
||||||
import shutil
|
|
||||||
|
import fractions
|
||||||
import subprocess
|
import subprocess
|
||||||
import tempfile
|
import tempfile
|
||||||
import time
|
import time
|
||||||
|
import math
|
||||||
|
import shutil
|
||||||
|
from distutils.spawn import find_executable
|
||||||
|
from glob import glob
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import ox
|
import ox
|
||||||
import ox.image
|
import ox.image
|
||||||
from ox.utils import json
|
from ox.utils import json
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from PIL import Image, ImageOps
|
from PIL import Image
|
||||||
|
|
||||||
from .chop import Chop, make_keyframe_index
|
from .chop import Chop, make_keyframe_index
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('pandora.' + __name__)
|
|
||||||
|
|
||||||
img_extension = 'jpg'
|
img_extension = 'jpg'
|
||||||
|
|
||||||
MAX_DISTANCE = math.sqrt(3 * pow(255, 2))
|
MAX_DISTANCE = math.sqrt(3 * pow(255, 2))
|
||||||
|
@ -61,15 +57,14 @@ def supported_formats():
|
||||||
stdout = stdout.decode('utf-8')
|
stdout = stdout.decode('utf-8')
|
||||||
stderr = stderr.decode('utf-8')
|
stderr = stderr.decode('utf-8')
|
||||||
version = stderr.split('\n')[0].split(' ')[2]
|
version = stderr.split('\n')[0].split(' ')[2]
|
||||||
mp4 = 'libx264' in stdout and bool(re.compile('DEA.L. aac').findall(stdout))
|
|
||||||
return {
|
return {
|
||||||
'version': version.split('.'),
|
'version': version.split('.'),
|
||||||
'ogg': 'libtheora' in stdout and 'libvorbis' in stdout,
|
'ogg': 'libtheora' in stdout and 'libvorbis' in stdout,
|
||||||
'webm': 'libvpx' in stdout and 'libvorbis' in stdout,
|
'webm': 'libvpx' in stdout and 'libvorbis' in stdout,
|
||||||
'vp8': 'libvpx' in stdout and 'libvorbis' in stdout,
|
'vp8': 'libvpx' in stdout and 'libvorbis' in stdout,
|
||||||
'vp9': 'libvpx-vp9' in stdout and 'libopus' in stdout,
|
'vp9': 'libvpx-vp9' in stdout and 'libopus' in stdout,
|
||||||
'mp4': mp4,
|
'mp4': 'libx264' in stdout and 'DEA.L. aac' in stdout,
|
||||||
'h264': mp4,
|
'h264': 'libx264' in stdout and 'DEA.L. aac' in stdout,
|
||||||
}
|
}
|
||||||
|
|
||||||
def stream(video, target, profile, info, audio_track=0, flags={}):
|
def stream(video, target, profile, info, audio_track=0, flags={}):
|
||||||
|
@ -150,17 +145,10 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
|
||||||
audioquality = -1
|
audioquality = -1
|
||||||
audiobitrate = '22k'
|
audiobitrate = '22k'
|
||||||
audiochannels = 1
|
audiochannels = 1
|
||||||
elif profile == '0p':
|
|
||||||
info['video'] = []
|
|
||||||
audiorate = 48000
|
|
||||||
audioquality = 6
|
|
||||||
audiobitrate = None
|
|
||||||
audiochannels = None
|
|
||||||
audio_codec = 'libopus'
|
|
||||||
else:
|
else:
|
||||||
height = 96
|
height = 96
|
||||||
|
|
||||||
if settings.USE_VP9 and settings.FFMPEG_SUPPORTS_VP9:
|
if settings.FFMPEG_SUPPORTS_VP9:
|
||||||
audio_codec = 'libopus'
|
audio_codec = 'libopus'
|
||||||
video_codec = 'libvpx-vp9'
|
video_codec = 'libvpx-vp9'
|
||||||
|
|
||||||
|
@ -223,7 +211,7 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
|
||||||
bitrate = height*width*fps*bpp/1000
|
bitrate = height*width*fps*bpp/1000
|
||||||
|
|
||||||
video_settings = trim + [
|
video_settings = trim + [
|
||||||
'-b:v', '%dk' % bitrate,
|
'-vb', '%dk' % bitrate,
|
||||||
'-aspect', aspect,
|
'-aspect', aspect,
|
||||||
# '-vf', 'yadif',
|
# '-vf', 'yadif',
|
||||||
'-max_muxing_queue_size', '512',
|
'-max_muxing_queue_size', '512',
|
||||||
|
@ -251,8 +239,6 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
|
||||||
'-level', '4.0',
|
'-level', '4.0',
|
||||||
'-pix_fmt', 'yuv420p',
|
'-pix_fmt', 'yuv420p',
|
||||||
]
|
]
|
||||||
if info['video'][0].get("force_framerate"):
|
|
||||||
video_settings += ['-r:v', str(fps)]
|
|
||||||
video_settings += ['-map', '0:%s,0:0' % info['video'][0]['id']]
|
video_settings += ['-map', '0:%s,0:0' % info['video'][0]['id']]
|
||||||
audio_only = False
|
audio_only = False
|
||||||
else:
|
else:
|
||||||
|
@ -292,7 +278,7 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
|
||||||
ac = min(ac, audiochannels)
|
ac = min(ac, audiochannels)
|
||||||
audio_settings += ['-ac', str(ac)]
|
audio_settings += ['-ac', str(ac)]
|
||||||
if audiobitrate:
|
if audiobitrate:
|
||||||
audio_settings += ['-b:a', audiobitrate]
|
audio_settings += ['-ab', audiobitrate]
|
||||||
if format == 'mp4':
|
if format == 'mp4':
|
||||||
audio_settings += ['-c:a', 'aac', '-strict', '-2']
|
audio_settings += ['-c:a', 'aac', '-strict', '-2']
|
||||||
elif audio_codec == 'libopus':
|
elif audio_codec == 'libopus':
|
||||||
|
@ -325,15 +311,14 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
|
||||||
pass1_post = post[:]
|
pass1_post = post[:]
|
||||||
pass1_post[-1] = '/dev/null'
|
pass1_post[-1] = '/dev/null'
|
||||||
if format == 'webm':
|
if format == 'webm':
|
||||||
if video_codec != 'libvpx-vp9':
|
pass1_post = ['-speed', '4'] + pass1_post
|
||||||
pass1_post = ['-speed', '4'] + pass1_post
|
|
||||||
post = ['-speed', '1'] + post
|
post = ['-speed', '1'] + post
|
||||||
cmds.append(base + ['-pass', '1', '-passlogfile', '%s.log' % target]
|
cmds.append(base + ['-an', '-pass', '1', '-passlogfile', '%s.log' % target]
|
||||||
+ video_settings + ['-an'] + pass1_post)
|
+ video_settings + pass1_post)
|
||||||
cmds.append(base + ['-pass', '2', '-passlogfile', '%s.log' % target]
|
cmds.append(base + ['-pass', '2', '-passlogfile', '%s.log' % target]
|
||||||
+ video_settings + audio_settings + post)
|
+ audio_settings + video_settings + post)
|
||||||
else:
|
else:
|
||||||
cmds.append(base + video_settings + audio_settings + post)
|
cmds.append(base + audio_settings + video_settings + post)
|
||||||
|
|
||||||
if settings.FFMPEG_DEBUG:
|
if settings.FFMPEG_DEBUG:
|
||||||
print('\n'.join([' '.join(cmd) for cmd in cmds]))
|
print('\n'.join([' '.join(cmd) for cmd in cmds]))
|
||||||
|
@ -441,15 +426,10 @@ def frame_direct(video, target, position):
|
||||||
r = run_command(cmd)
|
r = run_command(cmd)
|
||||||
return r == 0
|
return r == 0
|
||||||
|
|
||||||
def open_image_rgb(image_source):
|
|
||||||
source = Image.open(image_source)
|
|
||||||
source = ImageOps.exif_transpose(source)
|
|
||||||
source = source.convert('RGB')
|
|
||||||
return source
|
|
||||||
|
|
||||||
def resize_image(image_source, image_output, width=None, size=None):
|
def resize_image(image_source, image_output, width=None, size=None):
|
||||||
if exists(image_source):
|
if exists(image_source):
|
||||||
source = open_image_rgb(image_source)
|
source = Image.open(image_source).convert('RGB')
|
||||||
source_width = source.size[0]
|
source_width = source.size[0]
|
||||||
source_height = source.size[1]
|
source_height = source.size[1]
|
||||||
if size:
|
if size:
|
||||||
|
@ -470,7 +450,7 @@ def resize_image(image_source, image_output, width=None, size=None):
|
||||||
height = max(height, 1)
|
height = max(height, 1)
|
||||||
|
|
||||||
if width < source_width:
|
if width < source_width:
|
||||||
resize_method = Image.LANCZOS
|
resize_method = Image.ANTIALIAS
|
||||||
else:
|
else:
|
||||||
resize_method = Image.BICUBIC
|
resize_method = Image.BICUBIC
|
||||||
output = source.resize((width, height), resize_method)
|
output = source.resize((width, height), resize_method)
|
||||||
|
@ -484,7 +464,7 @@ def timeline(video, prefix, modes=None, size=None):
|
||||||
size = [64, 16]
|
size = [64, 16]
|
||||||
if isinstance(video, str):
|
if isinstance(video, str):
|
||||||
video = [video]
|
video = [video]
|
||||||
cmd = [os.path.normpath(os.path.join(settings.BASE_DIR, '../bin/oxtimelines')),
|
cmd = ['../bin/oxtimelines',
|
||||||
'-s', ','.join(map(str, reversed(sorted(size)))),
|
'-s', ','.join(map(str, reversed(sorted(size)))),
|
||||||
'-m', ','.join(modes),
|
'-m', ','.join(modes),
|
||||||
'-o', prefix,
|
'-o', prefix,
|
||||||
|
@ -616,7 +596,7 @@ def timeline_strip(item, cuts, info, prefix):
|
||||||
print(frame, 'cut', c, 'frame', s, frame, 'width', widths[s], box)
|
print(frame, 'cut', c, 'frame', s, frame, 'width', widths[s], box)
|
||||||
# FIXME: why does this have to be frame+1?
|
# FIXME: why does this have to be frame+1?
|
||||||
frame_image = Image.open(item.frame((frame+1)/fps))
|
frame_image = Image.open(item.frame((frame+1)/fps))
|
||||||
frame_image = frame_image.crop(box).resize((widths[s], timeline_height), Image.LANCZOS)
|
frame_image = frame_image.crop(box).resize((widths[s], timeline_height), Image.ANTIALIAS)
|
||||||
for x_ in range(widths[s]):
|
for x_ in range(widths[s]):
|
||||||
line_image.append(frame_image.crop((x_, 0, x_ + 1, timeline_height)))
|
line_image.append(frame_image.crop((x_, 0, x_ + 1, timeline_height)))
|
||||||
frame += widths[s]
|
frame += widths[s]
|
||||||
|
@ -744,24 +724,19 @@ def remux_stream(src, dst):
|
||||||
cmd = [
|
cmd = [
|
||||||
settings.FFMPEG,
|
settings.FFMPEG,
|
||||||
'-nostats', '-loglevel', 'error',
|
'-nostats', '-loglevel', 'error',
|
||||||
'-i', src,
|
|
||||||
'-map_metadata', '-1', '-sn',
|
'-map_metadata', '-1', '-sn',
|
||||||
|
'-i', src,
|
||||||
] + video + [
|
] + video + [
|
||||||
] + audio + [
|
] + audio + [
|
||||||
'-movflags', '+faststart',
|
'-movflags', '+faststart',
|
||||||
dst
|
dst
|
||||||
]
|
]
|
||||||
print(cmd)
|
|
||||||
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
|
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
|
||||||
stdout=subprocess.PIPE,
|
stdout=open('/dev/null', 'w'),
|
||||||
stderr=subprocess.STDOUT,
|
stderr=open('/dev/null', 'w'),
|
||||||
close_fds=True)
|
close_fds=True)
|
||||||
stdout, stderr = p.communicate()
|
p.wait()
|
||||||
if stderr:
|
return True, None
|
||||||
logger.error("failed to remux %s %s", cmd, stderr)
|
|
||||||
return False, stderr
|
|
||||||
else:
|
|
||||||
return True, None
|
|
||||||
|
|
||||||
|
|
||||||
def ffprobe(path, *args):
|
def ffprobe(path, *args):
|
||||||
|
|
|
@ -1,100 +0,0 @@
|
||||||
# Generated by Django 4.2.3 on 2023-07-27 21:24
|
|
||||||
|
|
||||||
import django.core.serializers.json
|
|
||||||
from django.db import migrations, models
|
|
||||||
import oxdjango.fields
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('archive', '0005_auto_20180804_1554'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='file',
|
|
||||||
name='extension',
|
|
||||||
field=models.CharField(default='', max_length=255, null=True),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='file',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='file',
|
|
||||||
name='info',
|
|
||||||
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='file',
|
|
||||||
name='language',
|
|
||||||
field=models.CharField(default='', max_length=255, null=True),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='file',
|
|
||||||
name='part',
|
|
||||||
field=models.CharField(default='', max_length=255, null=True),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='file',
|
|
||||||
name='part_title',
|
|
||||||
field=models.CharField(default='', max_length=255, null=True),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='file',
|
|
||||||
name='path',
|
|
||||||
field=models.CharField(default='', max_length=2048),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='file',
|
|
||||||
name='sort_path',
|
|
||||||
field=models.CharField(default='', max_length=2048),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='file',
|
|
||||||
name='type',
|
|
||||||
field=models.CharField(default='', max_length=255),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='file',
|
|
||||||
name='version',
|
|
||||||
field=models.CharField(default='', max_length=255, null=True),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='frame',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='instance',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='stream',
|
|
||||||
name='error',
|
|
||||||
field=models.TextField(blank=True, default=''),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='stream',
|
|
||||||
name='format',
|
|
||||||
field=models.CharField(default='webm', max_length=255),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='stream',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='stream',
|
|
||||||
name='info',
|
|
||||||
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='volume',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -1,17 +0,0 @@
|
||||||
# Generated by Django 4.2.3 on 2023-08-18 12:54
|
|
||||||
|
|
||||||
from django.db import migrations, models
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('archive', '0006_alter_file_extension_alter_file_id_alter_file_info_and_more'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AddIndex(
|
|
||||||
model_name='stream',
|
|
||||||
index=models.Index(fields=['file', 'source', 'available'], name='archive_str_file_id_69a542_idx'),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -151,10 +151,8 @@ class File(models.Model):
|
||||||
self.sampleate = 0
|
self.sampleate = 0
|
||||||
self.channels = 0
|
self.channels = 0
|
||||||
|
|
||||||
if self.framerate and self.duration > 0:
|
if self.framerate:
|
||||||
self.pixels = int(self.width * self.height * float(utils.parse_decimal(self.framerate)) * self.duration)
|
self.pixels = int(self.width * self.height * float(utils.parse_decimal(self.framerate)) * self.duration)
|
||||||
else:
|
|
||||||
self.pixels = 0
|
|
||||||
|
|
||||||
def get_path_info(self):
|
def get_path_info(self):
|
||||||
data = {}
|
data = {}
|
||||||
|
@ -183,13 +181,6 @@ class File(models.Model):
|
||||||
for type in ox.movie.EXTENSIONS:
|
for type in ox.movie.EXTENSIONS:
|
||||||
if data['extension'] in ox.movie.EXTENSIONS[type]:
|
if data['extension'] in ox.movie.EXTENSIONS[type]:
|
||||||
data['type'] = type
|
data['type'] = type
|
||||||
if data['extension'] == 'ogg' and self.info.get('video'):
|
|
||||||
data['type'] = 'video'
|
|
||||||
if data['type'] == 'unknown':
|
|
||||||
if self.info.get('video'):
|
|
||||||
data['type'] = 'video'
|
|
||||||
elif self.info.get('audio'):
|
|
||||||
data['type'] = 'audio'
|
|
||||||
if 'part' in data and isinstance(data['part'], int):
|
if 'part' in data and isinstance(data['part'], int):
|
||||||
data['part'] = str(data['part'])
|
data['part'] = str(data['part'])
|
||||||
return data
|
return data
|
||||||
|
@ -277,7 +268,7 @@ class File(models.Model):
|
||||||
|
|
||||||
if self.type not in ('audio', 'video'):
|
if self.type not in ('audio', 'video'):
|
||||||
self.duration = None
|
self.duration = None
|
||||||
elif self.id:
|
else:
|
||||||
duration = sum([s.info.get('duration', 0)
|
duration = sum([s.info.get('duration', 0)
|
||||||
for s in self.streams.filter(source=None)])
|
for s in self.streams.filter(source=None)])
|
||||||
if duration:
|
if duration:
|
||||||
|
@ -285,7 +276,7 @@ class File(models.Model):
|
||||||
|
|
||||||
if self.is_subtitle:
|
if self.is_subtitle:
|
||||||
self.available = self.data and True or False
|
self.available = self.data and True or False
|
||||||
elif self.id:
|
else:
|
||||||
self.available = not self.uploading and \
|
self.available = not self.uploading and \
|
||||||
self.streams.filter(source=None, available=True).count()
|
self.streams.filter(source=None, available=True).count()
|
||||||
super(File, self).save(*args, **kwargs)
|
super(File, self).save(*args, **kwargs)
|
||||||
|
@ -345,9 +336,7 @@ class File(models.Model):
|
||||||
|
|
||||||
def done_cb():
|
def done_cb():
|
||||||
if done:
|
if done:
|
||||||
info = ox.avinfo(self.data.path)
|
self.info.update(ox.avinfo(self.data.path))
|
||||||
del info['path']
|
|
||||||
self.info.update(info)
|
|
||||||
self.parse_info()
|
self.parse_info()
|
||||||
# reject invalid uploads
|
# reject invalid uploads
|
||||||
if self.info.get('oshash') != self.oshash:
|
if self.info.get('oshash') != self.oshash:
|
||||||
|
@ -374,8 +363,8 @@ class File(models.Model):
|
||||||
self.info.update(stream.info)
|
self.info.update(stream.info)
|
||||||
self.parse_info()
|
self.parse_info()
|
||||||
self.save()
|
self.save()
|
||||||
#if stream.info.get('video'):
|
if stream.info.get('video'):
|
||||||
# extract.make_keyframe_index(stream.media.path)
|
extract.make_keyframe_index(stream.media.path)
|
||||||
return True, stream.media.size
|
return True, stream.media.size
|
||||||
return save_chunk(stream, stream.media, chunk, offset, name, done_cb)
|
return save_chunk(stream, stream.media, chunk, offset, name, done_cb)
|
||||||
return False, 0
|
return False, 0
|
||||||
|
@ -404,7 +393,7 @@ class File(models.Model):
|
||||||
config = settings.CONFIG['video']
|
config = settings.CONFIG['video']
|
||||||
height = self.info['video'][0]['height'] if self.info.get('video') else None
|
height = self.info['video'][0]['height'] if self.info.get('video') else None
|
||||||
max_resolution = max(config['resolutions'])
|
max_resolution = max(config['resolutions'])
|
||||||
if height and height <= max_resolution and self.extension in ('mov', 'mkv', 'mp4', 'm4v'):
|
if height <= max_resolution and self.extension in ('mov', 'mkv', 'mp4', 'm4v'):
|
||||||
vcodec = self.get_codec('video')
|
vcodec = self.get_codec('video')
|
||||||
acodec = self.get_codec('audio')
|
acodec = self.get_codec('audio')
|
||||||
if vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS:
|
if vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS:
|
||||||
|
@ -415,7 +404,7 @@ class File(models.Model):
|
||||||
config = settings.CONFIG['video']
|
config = settings.CONFIG['video']
|
||||||
height = self.info['video'][0]['height'] if self.info.get('video') else None
|
height = self.info['video'][0]['height'] if self.info.get('video') else None
|
||||||
max_resolution = max(config['resolutions'])
|
max_resolution = max(config['resolutions'])
|
||||||
if height and height <= max_resolution and config['formats'][0] == self.extension:
|
if height <= max_resolution and config['formats'][0] == self.extension:
|
||||||
vcodec = self.get_codec('video')
|
vcodec = self.get_codec('video')
|
||||||
acodec = self.get_codec('audio')
|
acodec = self.get_codec('audio')
|
||||||
if self.extension in ['mp4', 'm4v'] and vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS:
|
if self.extension in ['mp4', 'm4v'] and vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS:
|
||||||
|
@ -492,13 +481,6 @@ class File(models.Model):
|
||||||
user.is_staff or \
|
user.is_staff or \
|
||||||
self.item.user == user or \
|
self.item.user == user or \
|
||||||
self.item.groups.filter(id__in=user.groups.all()).count() > 0
|
self.item.groups.filter(id__in=user.groups.all()).count() > 0
|
||||||
if 'instances' in data and 'filename' in self.info and self.data:
|
|
||||||
data['instances'].append({
|
|
||||||
'ignore': False,
|
|
||||||
'path': self.info['filename'],
|
|
||||||
'user': self.item.user.username if self.item and self.item.user else 'system',
|
|
||||||
'volume': 'Direct Upload'
|
|
||||||
})
|
|
||||||
if not can_see_media:
|
if not can_see_media:
|
||||||
if 'instances' in data:
|
if 'instances' in data:
|
||||||
data['instances'] = []
|
data['instances'] = []
|
||||||
|
@ -734,9 +716,6 @@ class Stream(models.Model):
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
unique_together = ("file", "resolution", "format")
|
unique_together = ("file", "resolution", "format")
|
||||||
indexes = [
|
|
||||||
models.Index(fields=['file', 'source', 'available'])
|
|
||||||
]
|
|
||||||
|
|
||||||
file = models.ForeignKey(File, related_name='streams', on_delete=models.CASCADE)
|
file = models.ForeignKey(File, related_name='streams', on_delete=models.CASCADE)
|
||||||
resolution = models.IntegerField(default=96)
|
resolution = models.IntegerField(default=96)
|
||||||
|
@ -816,15 +795,9 @@ class Stream(models.Model):
|
||||||
shutil.move(self.file.data.path, target)
|
shutil.move(self.file.data.path, target)
|
||||||
self.file.data.name = ''
|
self.file.data.name = ''
|
||||||
self.file.save()
|
self.file.save()
|
||||||
self.available = True
|
|
||||||
self.save()
|
|
||||||
done = True
|
|
||||||
elif self.file.can_remux():
|
elif self.file.can_remux():
|
||||||
ok, error = extract.remux_stream(media, target)
|
ok, error = extract.remux_stream(media, target)
|
||||||
if ok:
|
done = True
|
||||||
self.available = True
|
|
||||||
self.save()
|
|
||||||
done = True
|
|
||||||
if not done:
|
if not done:
|
||||||
ok, error = extract.stream(media, target, self.name(), info, flags=self.flags)
|
ok, error = extract.stream(media, target, self.name(), info, flags=self.flags)
|
||||||
|
|
||||||
|
@ -832,7 +805,7 @@ class Stream(models.Model):
|
||||||
# get current version from db and update
|
# get current version from db and update
|
||||||
try:
|
try:
|
||||||
self.refresh_from_db()
|
self.refresh_from_db()
|
||||||
except Stream.DoesNotExist:
|
except archive.models.DoesNotExist:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
self.update_status(ok, error)
|
self.update_status(ok, error)
|
||||||
|
|
|
@ -1,9 +1,11 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from time import time, monotonic
|
from time import time
|
||||||
|
|
||||||
|
import celery.task.control
|
||||||
|
import kombu.five
|
||||||
|
|
||||||
from app.celery import app
|
|
||||||
|
|
||||||
from .models import File
|
from .models import File
|
||||||
|
|
||||||
|
@ -16,7 +18,7 @@ def parse_job(job):
|
||||||
'file': f.oshash
|
'file': f.oshash
|
||||||
}
|
}
|
||||||
if job['time_start']:
|
if job['time_start']:
|
||||||
start_time = datetime.fromtimestamp(time() - (monotonic() - job['time_start']))
|
start_time = datetime.fromtimestamp(time() - (kombu.five.monotonic() - job['time_start']))
|
||||||
r.update({
|
r.update({
|
||||||
'started': start_time,
|
'started': start_time,
|
||||||
'running': (datetime.now() - start_time).total_seconds()
|
'running': (datetime.now() - start_time).total_seconds()
|
||||||
|
@ -28,7 +30,7 @@ def parse_job(job):
|
||||||
def status():
|
def status():
|
||||||
status = []
|
status = []
|
||||||
encoding_jobs = ('archive.tasks.extract_stream', 'archive.tasks.process_stream')
|
encoding_jobs = ('archive.tasks.extract_stream', 'archive.tasks.process_stream')
|
||||||
c = app.control.inspect()
|
c = celery.task.control.inspect()
|
||||||
for job in c.active(safe=True).get('celery@pandora-encoding', []):
|
for job in c.active(safe=True).get('celery@pandora-encoding', []):
|
||||||
if job['name'] in encoding_jobs:
|
if job['name'] in encoding_jobs:
|
||||||
status.append(parse_job(job))
|
status.append(parse_job(job))
|
||||||
|
@ -65,7 +67,7 @@ def fill_queue():
|
||||||
def get_celery_worker_status():
|
def get_celery_worker_status():
|
||||||
ERROR_KEY = "ERROR"
|
ERROR_KEY = "ERROR"
|
||||||
try:
|
try:
|
||||||
insp = app.control.inspect()
|
insp = celery.task.control.inspect()
|
||||||
d = insp.stats()
|
d = insp.stats()
|
||||||
if not d:
|
if not d:
|
||||||
d = {ERROR_KEY: 'No running Celery workers were found.'}
|
d = {ERROR_KEY: 'No running Celery workers were found.'}
|
||||||
|
|
|
@ -2,14 +2,13 @@
|
||||||
|
|
||||||
from glob import glob
|
from glob import glob
|
||||||
|
|
||||||
|
from celery.task import task
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.db import transaction
|
|
||||||
from django.db.models import Q
|
from django.db.models import Q
|
||||||
|
|
||||||
from item.models import Item
|
from item.models import Item
|
||||||
from item.tasks import update_poster, update_timeline
|
from item.tasks import update_poster, update_timeline
|
||||||
from taskqueue.models import Task
|
from taskqueue.models import Task
|
||||||
from app.celery import app
|
|
||||||
|
|
||||||
from . import models
|
from . import models
|
||||||
from . import extract
|
from . import extract
|
||||||
|
@ -69,7 +68,7 @@ def update_or_create_instance(volume, f):
|
||||||
instance.file.item.update_wanted()
|
instance.file.item.update_wanted()
|
||||||
return instance
|
return instance
|
||||||
|
|
||||||
@app.task(ignore_results=True, queue='default')
|
@task(ignore_results=True, queue='default')
|
||||||
def update_files(user, volume, files):
|
def update_files(user, volume, files):
|
||||||
user = models.User.objects.get(username=user)
|
user = models.User.objects.get(username=user)
|
||||||
volume, created = models.Volume.objects.get_or_create(user=user, name=volume)
|
volume, created = models.Volume.objects.get_or_create(user=user, name=volume)
|
||||||
|
@ -101,7 +100,7 @@ def update_files(user, volume, files):
|
||||||
Task.start(i, user)
|
Task.start(i, user)
|
||||||
update_timeline.delay(i.public_id)
|
update_timeline.delay(i.public_id)
|
||||||
|
|
||||||
@app.task(ignore_results=True, queue='default')
|
@task(ignore_results=True, queue='default')
|
||||||
def update_info(user, info):
|
def update_info(user, info):
|
||||||
user = models.User.objects.get(username=user)
|
user = models.User.objects.get(username=user)
|
||||||
files = models.File.objects.filter(oshash__in=list(info))
|
files = models.File.objects.filter(oshash__in=list(info))
|
||||||
|
@ -115,7 +114,7 @@ def update_info(user, info):
|
||||||
Task.start(i, user)
|
Task.start(i, user)
|
||||||
update_timeline.delay(i.public_id)
|
update_timeline.delay(i.public_id)
|
||||||
|
|
||||||
@app.task(queue="encoding")
|
@task(queue="encoding")
|
||||||
def process_stream(fileId):
|
def process_stream(fileId):
|
||||||
'''
|
'''
|
||||||
process uploaded stream
|
process uploaded stream
|
||||||
|
@ -141,7 +140,7 @@ def process_stream(fileId):
|
||||||
Task.finish(file.item)
|
Task.finish(file.item)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@app.task(queue="encoding")
|
@task(queue="encoding")
|
||||||
def extract_stream(fileId):
|
def extract_stream(fileId):
|
||||||
'''
|
'''
|
||||||
extract stream from direct upload
|
extract stream from direct upload
|
||||||
|
@ -170,7 +169,7 @@ def extract_stream(fileId):
|
||||||
models.File.objects.filter(id=fileId).update(encoding=False)
|
models.File.objects.filter(id=fileId).update(encoding=False)
|
||||||
Task.finish(file.item)
|
Task.finish(file.item)
|
||||||
|
|
||||||
@app.task(queue="encoding")
|
@task(queue="encoding")
|
||||||
def extract_derivatives(fileId, rebuild=False):
|
def extract_derivatives(fileId, rebuild=False):
|
||||||
file = models.File.objects.get(id=fileId)
|
file = models.File.objects.get(id=fileId)
|
||||||
streams = file.streams.filter(source=None)
|
streams = file.streams.filter(source=None)
|
||||||
|
@ -178,7 +177,7 @@ def extract_derivatives(fileId, rebuild=False):
|
||||||
streams[0].extract_derivatives(rebuild)
|
streams[0].extract_derivatives(rebuild)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@app.task(queue="encoding")
|
@task(queue="encoding")
|
||||||
def update_stream(id):
|
def update_stream(id):
|
||||||
s = models.Stream.objects.get(pk=id)
|
s = models.Stream.objects.get(pk=id)
|
||||||
if not glob("%s*" % s.timeline_prefix):
|
if not glob("%s*" % s.timeline_prefix):
|
||||||
|
@ -200,11 +199,11 @@ def update_stream(id):
|
||||||
c.update_calculated_values()
|
c.update_calculated_values()
|
||||||
c.save()
|
c.save()
|
||||||
|
|
||||||
@app.task(queue="encoding")
|
@task(queue="encoding")
|
||||||
def download_media(item_id, url, referer=None):
|
def download_media(item_id, url):
|
||||||
return external.download(item_id, url, referer)
|
return external.download(item_id, url)
|
||||||
|
|
||||||
@app.task(queue='default')
|
@task(queue='default')
|
||||||
def move_media(data, user):
|
def move_media(data, user):
|
||||||
from changelog.models import add_changelog
|
from changelog.models import add_changelog
|
||||||
from item.models import get_item, Item, ItemSort
|
from item.models import get_item, Item, ItemSort
|
||||||
|
@ -249,8 +248,7 @@ def move_media(data, user):
|
||||||
if old_item and old_item.files.count() == 0 and i.files.count() == len(data['ids']):
|
if old_item and old_item.files.count() == 0 and i.files.count() == len(data['ids']):
|
||||||
for a in old_item.annotations.all().order_by('id'):
|
for a in old_item.annotations.all().order_by('id'):
|
||||||
a.item = i
|
a.item = i
|
||||||
with transaction.atomic():
|
a.set_public_id()
|
||||||
a.set_public_id()
|
|
||||||
Annotation.objects.filter(id=a.id).update(item=i, public_id=a.public_id)
|
Annotation.objects.filter(id=a.id).update(item=i, public_id=a.public_id)
|
||||||
old_item.clips.all().update(item=i, sort=i.sort)
|
old_item.clips.all().update(item=i, sort=i.sort)
|
||||||
|
|
||||||
|
|
|
@ -103,7 +103,7 @@ def update(request, data):
|
||||||
file__available=False,
|
file__available=False,
|
||||||
file__wanted=True)]
|
file__wanted=True)]
|
||||||
|
|
||||||
if utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True):
|
if list(filter(lambda l: l['id'] == 'subtitles', settings.CONFIG['layers'])):
|
||||||
qs = files.filter(
|
qs = files.filter(
|
||||||
file__is_subtitle=True,
|
file__is_subtitle=True,
|
||||||
file__available=False
|
file__available=False
|
||||||
|
@ -195,9 +195,7 @@ def addMedia(request, data):
|
||||||
response['data']['item'] = f.item.public_id
|
response['data']['item'] = f.item.public_id
|
||||||
response['data']['itemUrl'] = request.build_absolute_uri('/%s' % f.item.public_id)
|
response['data']['itemUrl'] = request.build_absolute_uri('/%s' % f.item.public_id)
|
||||||
if not f.available:
|
if not f.available:
|
||||||
changelog_data = data.copy()
|
add_changelog(request, data, f.item.public_id)
|
||||||
changelog_data['oshash'] = oshash
|
|
||||||
add_changelog(request, changelog_data, f.item.public_id)
|
|
||||||
else:
|
else:
|
||||||
if 'item' in data:
|
if 'item' in data:
|
||||||
i = Item.objects.get(public_id=data['item'])
|
i = Item.objects.get(public_id=data['item'])
|
||||||
|
@ -222,15 +220,11 @@ def addMedia(request, data):
|
||||||
if 'info' in data and data['info'] and isinstance(data['info'], dict):
|
if 'info' in data and data['info'] and isinstance(data['info'], dict):
|
||||||
f.info = data['info']
|
f.info = data['info']
|
||||||
f.info['extension'] = extension
|
f.info['extension'] = extension
|
||||||
if 'filename' in data:
|
|
||||||
f.info['filename'] = data['filename']
|
|
||||||
f.parse_info()
|
f.parse_info()
|
||||||
f.save()
|
f.save()
|
||||||
response['data']['item'] = i.public_id
|
response['data']['item'] = i.public_id
|
||||||
response['data']['itemUrl'] = request.build_absolute_uri('/%s' % i.public_id)
|
response['data']['itemUrl'] = request.build_absolute_uri('/%s' % i.public_id)
|
||||||
changelog_data = data.copy()
|
add_changelog(request, data, i.public_id)
|
||||||
changelog_data['oshash'] = oshash
|
|
||||||
add_changelog(request, changelog_data, i.public_id)
|
|
||||||
return render_to_json_response(response)
|
return render_to_json_response(response)
|
||||||
actions.register(addMedia, cache=False)
|
actions.register(addMedia, cache=False)
|
||||||
|
|
||||||
|
@ -745,7 +739,6 @@ def addMediaUrl(request, data):
|
||||||
|
|
||||||
takes {
|
takes {
|
||||||
url: string, // url
|
url: string, // url
|
||||||
referer: string // optional referer url
|
|
||||||
item: string // item
|
item: string // item
|
||||||
}
|
}
|
||||||
returns {
|
returns {
|
||||||
|
@ -758,7 +751,7 @@ def addMediaUrl(request, data):
|
||||||
response = json_response()
|
response = json_response()
|
||||||
i = Item.objects.get(public_id=data['item'])
|
i = Item.objects.get(public_id=data['item'])
|
||||||
Task.start(i, request.user)
|
Task.start(i, request.user)
|
||||||
t = tasks.download_media.delay(data['item'], data['url'], data.get('referer'))
|
t = tasks.download_media.delay(data['item'], data['url'])
|
||||||
response['data']['taskId'] = t.task_id
|
response['data']['taskId'] = t.task_id
|
||||||
add_changelog(request, data, data['item'])
|
add_changelog(request, data, data['item'])
|
||||||
return render_to_json_response(response)
|
return render_to_json_response(response)
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
from django.apps import AppConfig
|
|
||||||
|
|
||||||
|
|
||||||
class ChangelogConfig(AppConfig):
|
|
||||||
default_auto_field = "django.db.models.BigAutoField"
|
|
||||||
name = 'changelog'
|
|
||||||
|
|
|
@ -1,35 +0,0 @@
|
||||||
# Generated by Django 4.2.3 on 2023-07-27 21:24
|
|
||||||
|
|
||||||
import django.core.serializers.json
|
|
||||||
from django.db import migrations, models
|
|
||||||
import oxdjango.fields
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('changelog', '0002_jsonfield'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='changelog',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='changelog',
|
|
||||||
name='value',
|
|
||||||
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='log',
|
|
||||||
name='data',
|
|
||||||
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='log',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -1,7 +0,0 @@
|
||||||
from django.apps import AppConfig
|
|
||||||
|
|
||||||
|
|
||||||
class ClipConfig(AppConfig):
|
|
||||||
default_auto_field = "django.db.models.BigAutoField"
|
|
||||||
name = 'clip'
|
|
||||||
|
|
|
@ -17,7 +17,6 @@ keymap = {
|
||||||
'place': 'annotations__places__id',
|
'place': 'annotations__places__id',
|
||||||
'text': 'findvalue',
|
'text': 'findvalue',
|
||||||
'annotations': 'findvalue',
|
'annotations': 'findvalue',
|
||||||
'layer': 'annotations__layer',
|
|
||||||
'user': 'annotations__user__username',
|
'user': 'annotations__user__username',
|
||||||
}
|
}
|
||||||
case_insensitive_keys = ('annotations__user__username', )
|
case_insensitive_keys = ('annotations__user__username', )
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
# Generated by Django 4.2.3 on 2023-07-27 21:24
|
|
||||||
|
|
||||||
from django.db import migrations, models
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('clip', '0003_auto_20160219_1805'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='clip',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -8,7 +8,6 @@ import ox
|
||||||
from archive import extract
|
from archive import extract
|
||||||
|
|
||||||
from . import managers
|
from . import managers
|
||||||
from .utils import add_cuts
|
|
||||||
|
|
||||||
|
|
||||||
def get_layers(item, interval=None, user=None):
|
def get_layers(item, interval=None, user=None):
|
||||||
|
@ -60,7 +59,9 @@ class MetaClip(object):
|
||||||
self.hue = self.saturation = self.lightness = 0
|
self.hue = self.saturation = self.lightness = 0
|
||||||
self.volume = 0
|
self.volume = 0
|
||||||
|
|
||||||
def update_findvalue(self):
|
def save(self, *args, **kwargs):
|
||||||
|
if self.duration != self.end - self.start:
|
||||||
|
self.update_calculated_values()
|
||||||
if not self.aspect_ratio and self.item:
|
if not self.aspect_ratio and self.item:
|
||||||
streams = self.item.streams()
|
streams = self.item.streams()
|
||||||
if streams:
|
if streams:
|
||||||
|
@ -88,11 +89,6 @@ class MetaClip(object):
|
||||||
self.findvalue = '\n'.join(list(filter(None, [a.findvalue for a in anns])))
|
self.findvalue = '\n'.join(list(filter(None, [a.findvalue for a in anns])))
|
||||||
for l in [k['id'] for k in settings.CONFIG['layers']]:
|
for l in [k['id'] for k in settings.CONFIG['layers']]:
|
||||||
setattr(self, l, l in anns_by_layer and bool(len(anns_by_layer[l])))
|
setattr(self, l, l in anns_by_layer and bool(len(anns_by_layer[l])))
|
||||||
|
|
||||||
def save(self, *args, **kwargs):
|
|
||||||
if self.duration != self.end - self.start:
|
|
||||||
self.update_calculated_values()
|
|
||||||
self.update_findvalue()
|
|
||||||
models.Model.save(self, *args, **kwargs)
|
models.Model.save(self, *args, **kwargs)
|
||||||
|
|
||||||
clip_keys = ('id', 'in', 'out', 'position', 'created', 'modified',
|
clip_keys = ('id', 'in', 'out', 'position', 'created', 'modified',
|
||||||
|
@ -115,7 +111,8 @@ class MetaClip(object):
|
||||||
del j[key]
|
del j[key]
|
||||||
#needed here to make item find with clips work
|
#needed here to make item find with clips work
|
||||||
if 'annotations' in keys:
|
if 'annotations' in keys:
|
||||||
annotations = self.annotations.all().exclude(value='')
|
#annotations = self.annotations.filter(layer__in=settings.CONFIG['clipLayers'])
|
||||||
|
annotations = self.annotations.all()
|
||||||
if qs:
|
if qs:
|
||||||
for q in qs:
|
for q in qs:
|
||||||
annotations = annotations.filter(q)
|
annotations = annotations.filter(q)
|
||||||
|
@ -153,12 +150,12 @@ class MetaClip(object):
|
||||||
data['annotation'] = qs[0].public_id
|
data['annotation'] = qs[0].public_id
|
||||||
data['parts'] = self.item.cache['parts']
|
data['parts'] = self.item.cache['parts']
|
||||||
data['durations'] = self.item.cache['durations']
|
data['durations'] = self.item.cache['durations']
|
||||||
for key in settings.CONFIG['itemTitleKeys'] + ['videoRatio']:
|
for key in ('title', 'director', 'year', 'videoRatio'):
|
||||||
value = self.item.cache.get(key)
|
value = self.item.cache.get(key)
|
||||||
if value:
|
if value:
|
||||||
data[key] = value
|
data[key] = value
|
||||||
data['duration'] = data['out'] - data['in']
|
data['duration'] = data['out'] - data['in']
|
||||||
add_cuts(data, self.item, self.start, self.end)
|
data['cuts'] = tuple([c for c in self.item.get('cuts', []) if c > self.start and c < self.end])
|
||||||
data['layers'] = self.get_layers(user)
|
data['layers'] = self.get_layers(user)
|
||||||
data['streams'] = [s.file.oshash for s in self.item.streams()]
|
data['streams'] = [s.file.oshash for s in self.item.streams()]
|
||||||
return data
|
return data
|
||||||
|
@ -189,7 +186,6 @@ class MetaClip(object):
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return self.public_id
|
return self.public_id
|
||||||
|
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
unique_together = ("item", "start", "end")
|
unique_together = ("item", "start", "end")
|
||||||
|
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
|
|
||||||
|
|
||||||
def add_cuts(data, item, start, end):
|
|
||||||
cuts = []
|
|
||||||
last = False
|
|
||||||
outer = []
|
|
||||||
first = 0
|
|
||||||
for cut in item.get('cuts', []):
|
|
||||||
if cut > start and cut < end:
|
|
||||||
if not cuts:
|
|
||||||
outer.append(first)
|
|
||||||
cuts.append(cut)
|
|
||||||
last = True
|
|
||||||
elif cut <= start:
|
|
||||||
first = cut
|
|
||||||
elif cut >= end:
|
|
||||||
if not len(outer):
|
|
||||||
outer.append(first)
|
|
||||||
if len(outer) == 1:
|
|
||||||
outer.append(cut)
|
|
||||||
data['cuts'] = tuple(cuts)
|
|
||||||
data['outerCuts'] = tuple(outer)
|
|
|
@ -1009,7 +1009,7 @@
|
||||||
{
|
{
|
||||||
"id": "tags",
|
"id": "tags",
|
||||||
"title": "Tags",
|
"title": "Tags",
|
||||||
"canAddAnnotations": {"member": true, "friend": true, "staff": true, "admin": true},
|
"canAddAnnotations": {"member": true, "staff": true, "admin": true},
|
||||||
"item": "Tag",
|
"item": "Tag",
|
||||||
"autocomplete": true,
|
"autocomplete": true,
|
||||||
"overlap": true,
|
"overlap": true,
|
||||||
|
@ -1399,8 +1399,10 @@
|
||||||
corner of the screen
|
corner of the screen
|
||||||
"resolutions": List of video resolutions. Supported values are 96, 144,
|
"resolutions": List of video resolutions. Supported values are 96, 144,
|
||||||
240, 288, 360, 432, 480, 720 and 1080.
|
240, 288, 360, 432, 480, 720 and 1080.
|
||||||
|
"torrent": If true, video downloads are offered via BitTorrent
|
||||||
*/
|
*/
|
||||||
"video": {
|
"video": {
|
||||||
|
"torrent": false,
|
||||||
"formats": ["webm", "mp4"],
|
"formats": ["webm", "mp4"],
|
||||||
// fixme: this should be named "ratio" or "defaultRatio",
|
// fixme: this should be named "ratio" or "defaultRatio",
|
||||||
// as it also applies to clip lists (on load)
|
// as it also applies to clip lists (on load)
|
||||||
|
|
|
@ -73,14 +73,13 @@
|
||||||
"canSeeAccessed": {"researcher": true, "staff": true, "admin": true},
|
"canSeeAccessed": {"researcher": true, "staff": true, "admin": true},
|
||||||
"canSeeAllTasks": {"staff": true, "admin": true},
|
"canSeeAllTasks": {"staff": true, "admin": true},
|
||||||
"canSeeDebugMenu": {"researcher": true, "staff": true, "admin": true},
|
"canSeeDebugMenu": {"researcher": true, "staff": true, "admin": true},
|
||||||
"canSeeDocument": {"guest": 1, "member": 1, "researcher": 2, "staff": 3, "admin": 3},
|
|
||||||
"canSeeExtraItemViews": {"researcher": true, "staff": true, "admin": true},
|
"canSeeExtraItemViews": {"researcher": true, "staff": true, "admin": true},
|
||||||
"canSeeItem": {"guest": 2, "member": 2, "researcher": 2, "staff": 3, "admin": 3},
|
|
||||||
"canSeeMedia": {"researcher": true, "staff": true, "admin": true},
|
"canSeeMedia": {"researcher": true, "staff": true, "admin": true},
|
||||||
|
"canSeeDocument": {"guest": 1, "member": 1, "researcher": 2, "staff": 3, "admin": 3},
|
||||||
|
"canSeeItem": {"guest": 2, "member": 2, "researcher": 2, "staff": 3, "admin": 3},
|
||||||
"canSeeSize": {"researcher": true, "staff": true, "admin": true},
|
"canSeeSize": {"researcher": true, "staff": true, "admin": true},
|
||||||
"canSeeSoftwareVersion": {"researcher": true, "staff": true, "admin": true},
|
"canSeeSoftwareVersion": {"researcher": true, "staff": true, "admin": true},
|
||||||
"canSendMail": {"staff": true, "admin": true},
|
"canSendMail": {"staff": true, "admin": true}
|
||||||
"canShare": {"staff": true, "admin": true}
|
|
||||||
},
|
},
|
||||||
/*
|
/*
|
||||||
"clipKeys" are the properties that clips can be sorted by (the values are
|
"clipKeys" are the properties that clips can be sorted by (the values are
|
||||||
|
@ -313,14 +312,6 @@
|
||||||
"autocomplete": true,
|
"autocomplete": true,
|
||||||
"columnWidth": 128
|
"columnWidth": 128
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"id": "fulltext",
|
|
||||||
"operator": "+",
|
|
||||||
"title": "Fulltext",
|
|
||||||
"type": "text",
|
|
||||||
"fulltext": true,
|
|
||||||
"find": true
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"id": "created",
|
"id": "created",
|
||||||
"operator": "-",
|
"operator": "-",
|
||||||
|
@ -1503,7 +1494,6 @@
|
||||||
"hasEvents": true,
|
"hasEvents": true,
|
||||||
"hasPlaces": true,
|
"hasPlaces": true,
|
||||||
"item": "Keyword",
|
"item": "Keyword",
|
||||||
"autocomplete": true,
|
|
||||||
"overlap": true,
|
"overlap": true,
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
|
@ -1885,8 +1875,10 @@
|
||||||
corner of the screen
|
corner of the screen
|
||||||
"resolutions": List of video resolutions. Supported values are 96, 144,
|
"resolutions": List of video resolutions. Supported values are 96, 144,
|
||||||
240, 288, 360, 432, 480, 720 and 1080.
|
240, 288, 360, 432, 480, 720 and 1080.
|
||||||
|
"torrent": If true, video downloads are offered via BitTorrent
|
||||||
*/
|
*/
|
||||||
"video": {
|
"video": {
|
||||||
|
"torrent": false,
|
||||||
"formats": ["webm", "mp4"],
|
"formats": ["webm", "mp4"],
|
||||||
"previewRatio": 1.375,
|
"previewRatio": 1.375,
|
||||||
"resolutions": [240, 480]
|
"resolutions": [240, 480]
|
||||||
|
|
|
@ -71,14 +71,13 @@
|
||||||
"canSeeAccessed": {"staff": true, "admin": true},
|
"canSeeAccessed": {"staff": true, "admin": true},
|
||||||
"canSeeAllTasks": {"staff": true, "admin": true},
|
"canSeeAllTasks": {"staff": true, "admin": true},
|
||||||
"canSeeDebugMenu": {"staff": true, "admin": true},
|
"canSeeDebugMenu": {"staff": true, "admin": true},
|
||||||
"canSeeDocument": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
|
|
||||||
"canSeeExtraItemViews": {"staff": true, "admin": true},
|
"canSeeExtraItemViews": {"staff": true, "admin": true},
|
||||||
"canSeeItem": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
|
|
||||||
"canSeeMedia": {"staff": true, "admin": true},
|
"canSeeMedia": {"staff": true, "admin": true},
|
||||||
|
"canSeeDocument": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
|
||||||
|
"canSeeItem": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
|
||||||
"canSeeSize": {"staff": true, "admin": true},
|
"canSeeSize": {"staff": true, "admin": true},
|
||||||
"canSeeSoftwareVersion": {"staff": true, "admin": true},
|
"canSeeSoftwareVersion": {"staff": true, "admin": true},
|
||||||
"canSendMail": {"staff": true, "admin": true},
|
"canSendMail": {"staff": true, "admin": true}
|
||||||
"canShare": {"staff": true, "admin": true}
|
|
||||||
},
|
},
|
||||||
/*
|
/*
|
||||||
"clipKeys" are the properties that clips can be sorted by (the values are
|
"clipKeys" are the properties that clips can be sorted by (the values are
|
||||||
|
@ -247,28 +246,6 @@
|
||||||
"filter": true,
|
"filter": true,
|
||||||
"find": true
|
"find": true
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"id": "source",
|
|
||||||
"title": "Source",
|
|
||||||
"type": "string",
|
|
||||||
"autocomplete": true,
|
|
||||||
"description": true,
|
|
||||||
"columnWidth": 180,
|
|
||||||
"filter": true,
|
|
||||||
"find": true,
|
|
||||||
"sort": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "project",
|
|
||||||
"title": "Project",
|
|
||||||
"type": "string",
|
|
||||||
"autocomplete": true,
|
|
||||||
"description": true,
|
|
||||||
"columnWidth": 120,
|
|
||||||
"filter": true,
|
|
||||||
"find": true,
|
|
||||||
"sort": true
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"id": "id",
|
"id": "id",
|
||||||
"operator": "+",
|
"operator": "+",
|
||||||
|
@ -314,24 +291,6 @@
|
||||||
"sort": true,
|
"sort": true,
|
||||||
"columnWidth": 256
|
"columnWidth": 256
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"id": "content",
|
|
||||||
"operator": "+",
|
|
||||||
"title": "Content",
|
|
||||||
"type": "text",
|
|
||||||
"find": true,
|
|
||||||
"sort": true,
|
|
||||||
"columnWidth": 256
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "translation",
|
|
||||||
"operator": "+",
|
|
||||||
"title": "Translation",
|
|
||||||
"type": "text",
|
|
||||||
"find": true,
|
|
||||||
"sort": true,
|
|
||||||
"columnWidth": 256
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"id": "matches",
|
"id": "matches",
|
||||||
"operator": "-",
|
"operator": "-",
|
||||||
|
@ -351,20 +310,6 @@
|
||||||
"autocomplete": true,
|
"autocomplete": true,
|
||||||
"columnWidth": 128
|
"columnWidth": 128
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"id": "notes",
|
|
||||||
"title": "Notes",
|
|
||||||
"type": "text",
|
|
||||||
"capability": "canEditMetadata"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "fulltext",
|
|
||||||
"operator": "+",
|
|
||||||
"title": "Fulltext",
|
|
||||||
"type": "text",
|
|
||||||
"fulltext": true,
|
|
||||||
"find": true
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"id": "created",
|
"id": "created",
|
||||||
"operator": "-",
|
"operator": "-",
|
||||||
|
@ -600,6 +545,7 @@
|
||||||
"title": "Director",
|
"title": "Director",
|
||||||
"type": ["string"],
|
"type": ["string"],
|
||||||
"autocomplete": true,
|
"autocomplete": true,
|
||||||
|
"columnRequired": true,
|
||||||
"columnWidth": 180,
|
"columnWidth": 180,
|
||||||
"sort": true,
|
"sort": true,
|
||||||
"sortType": "person"
|
"sortType": "person"
|
||||||
|
@ -618,6 +564,7 @@
|
||||||
"title": "Featuring",
|
"title": "Featuring",
|
||||||
"type": ["string"],
|
"type": ["string"],
|
||||||
"autocomplete": true,
|
"autocomplete": true,
|
||||||
|
"columnRequired": true,
|
||||||
"columnWidth": 180,
|
"columnWidth": 180,
|
||||||
"filter": true,
|
"filter": true,
|
||||||
"sort": true,
|
"sort": true,
|
||||||
|
@ -673,7 +620,7 @@
|
||||||
{
|
{
|
||||||
"id": "annotations",
|
"id": "annotations",
|
||||||
"title": "Annotations",
|
"title": "Annotations",
|
||||||
"type": "text", // fixme: not the best type for this magic key
|
"type": "string", // fixme: not the best type for this magic key
|
||||||
"find": true
|
"find": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -711,7 +658,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "numberofannotations",
|
"id": "numberofannotations",
|
||||||
"title": "Number of Annotations",
|
"title": "Annotations",
|
||||||
"type": "integer",
|
"type": "integer",
|
||||||
"columnWidth": 60,
|
"columnWidth": 60,
|
||||||
"sort": true
|
"sort": true
|
||||||
|
@ -847,16 +794,12 @@
|
||||||
"id": "user",
|
"id": "user",
|
||||||
"title": "User",
|
"title": "User",
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"columnWidth": 90,
|
|
||||||
"capability": "canSeeMedia",
|
"capability": "canSeeMedia",
|
||||||
"sort": true,
|
|
||||||
"find": true
|
"find": true
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "groups",
|
"id": "groups",
|
||||||
"title": "Group",
|
"title": "Group",
|
||||||
"columnWidth": 90,
|
|
||||||
"sort": true,
|
|
||||||
"type": ["string"]
|
"type": ["string"]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -1389,8 +1332,10 @@
|
||||||
corner of the screen
|
corner of the screen
|
||||||
"resolutions": List of video resolutions. Supported values are 96, 144,
|
"resolutions": List of video resolutions. Supported values are 96, 144,
|
||||||
240, 288, 360, 432, 480, 720 and 1080.
|
240, 288, 360, 432, 480, 720 and 1080.
|
||||||
|
"torrent": If true, video downloads are offered via BitTorrent
|
||||||
*/
|
*/
|
||||||
"video": {
|
"video": {
|
||||||
|
"torrent": true,
|
||||||
"formats": ["webm", "mp4"],
|
"formats": ["webm", "mp4"],
|
||||||
"previewRatio": 1.3333333333,
|
"previewRatio": 1.3333333333,
|
||||||
//supported resolutions are
|
//supported resolutions are
|
||||||
|
|
|
@ -29,7 +29,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
|
||||||
"text": Text shown on mouseover
|
"text": Text shown on mouseover
|
||||||
*/
|
*/
|
||||||
"cantPlay": {
|
"cantPlay": {
|
||||||
"icon": "NoCopyright",
|
"icon": "noCopyright",
|
||||||
"link": "",
|
"link": "",
|
||||||
"text": ""
|
"text": ""
|
||||||
},
|
},
|
||||||
|
@ -67,7 +67,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
|
||||||
"canManageEntities": {"member": true, "staff": true, "admin": true},
|
"canManageEntities": {"member": true, "staff": true, "admin": true},
|
||||||
"canManageHome": {"staff": true, "admin": true},
|
"canManageHome": {"staff": true, "admin": true},
|
||||||
"canManagePlacesAndEvents": {"member": true, "staff": true, "admin": true},
|
"canManagePlacesAndEvents": {"member": true, "staff": true, "admin": true},
|
||||||
"canManageTitlesAndNames": {"member": false, "staff": true, "admin": true},
|
"canManageTitlesAndNames": {"member": true, "staff": true, "admin": true},
|
||||||
"canManageTranslations": {"admin": true},
|
"canManageTranslations": {"admin": true},
|
||||||
"canManageUsers": {"staff": true, "admin": true},
|
"canManageUsers": {"staff": true, "admin": true},
|
||||||
"canPlayClips": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
|
"canPlayClips": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
|
||||||
|
@ -102,7 +102,8 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
|
||||||
],
|
],
|
||||||
/*
|
/*
|
||||||
"clipLayers" is the ordered list of public layers that will appear as the
|
"clipLayers" is the ordered list of public layers that will appear as the
|
||||||
text of clips (in grid view, below the icon).
|
text of clips (in grid view, below the icon). Excluding a layer from this
|
||||||
|
list means it will not be included in find annotations.
|
||||||
*/
|
*/
|
||||||
"clipLayers": ["publicnotes", "keywords", "subtitles"],
|
"clipLayers": ["publicnotes", "keywords", "subtitles"],
|
||||||
/*
|
/*
|
||||||
|
@ -350,11 +351,11 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
|
||||||
"type": "enum",
|
"type": "enum",
|
||||||
"columnWidth": 90,
|
"columnWidth": 90,
|
||||||
"format": {"type": "ColorLevel", "args": [
|
"format": {"type": "ColorLevel", "args": [
|
||||||
["Public", "Restricted", "Private"]
|
["Public", "Out of Copyright", "Under Copyright", "Private"]
|
||||||
]},
|
]},
|
||||||
"sort": true,
|
"sort": true,
|
||||||
"sortOperator": "+",
|
"sortOperator": "+",
|
||||||
"values": ["Public", "Restricted", "Private", "Unknown"]
|
"values": ["Public", "Out of Copyright", "Under Copyright", "Private", "Unknown"]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
/*
|
/*
|
||||||
|
@ -752,13 +753,6 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
|
||||||
"capability": "canSeeMedia",
|
"capability": "canSeeMedia",
|
||||||
"find": true
|
"find": true
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"id": "filename",
|
|
||||||
"title": "Filename",
|
|
||||||
"type": ["string"],
|
|
||||||
"capability": "canSeeMedia",
|
|
||||||
"find": true
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"id": "created",
|
"id": "created",
|
||||||
"title": "Date Created",
|
"title": "Date Created",
|
||||||
|
@ -1165,11 +1159,6 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
|
||||||
"findDocuments": {"conditions": [], "operator": "&"},
|
"findDocuments": {"conditions": [], "operator": "&"},
|
||||||
"followPlayer": true,
|
"followPlayer": true,
|
||||||
"help": "",
|
"help": "",
|
||||||
"hidden": {
|
|
||||||
"collections": [],
|
|
||||||
"edits": [],
|
|
||||||
"lists": []
|
|
||||||
},
|
|
||||||
"icons": "posters",
|
"icons": "posters",
|
||||||
"infoIconSize": 256,
|
"infoIconSize": 256,
|
||||||
"item": "",
|
"item": "",
|
||||||
|
@ -1278,11 +1267,13 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
|
||||||
corner of the screen
|
corner of the screen
|
||||||
"resolutions": List of video resolutions. Supported values are 96, 144,
|
"resolutions": List of video resolutions. Supported values are 96, 144,
|
||||||
240, 288, 360, 432, 480, 720 and 1080.
|
240, 288, 360, 432, 480, 720 and 1080.
|
||||||
|
"torrent": If true, video downloads are offered via BitTorrent
|
||||||
*/
|
*/
|
||||||
"video": {
|
"video": {
|
||||||
"downloadFormat": "webm",
|
"downloadFormat": "webm",
|
||||||
"formats": ["webm", "mp4"],
|
"formats": ["webm", "mp4"],
|
||||||
"previewRatio": 1.3333333333,
|
"previewRatio": 1.3333333333,
|
||||||
"resolutions": [240, 480]
|
"resolutions": [240, 480],
|
||||||
|
"torrent": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
from django.apps import AppConfig
|
|
||||||
|
|
||||||
|
|
||||||
class DocumentConfig(AppConfig):
|
|
||||||
default_auto_field = "django.db.models.BigAutoField"
|
|
||||||
name = 'document'
|
|
||||||
|
|
|
@ -1,37 +1,14 @@
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import tempfile
|
|
||||||
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('pandora.' + __name__)
|
def extract_text(pdf):
|
||||||
|
cmd = ['pdftotext', pdf, '-']
|
||||||
|
|
||||||
def extract_text(pdf, page=None):
|
|
||||||
if page is not None:
|
|
||||||
page = str(page)
|
|
||||||
cmd = ['pdftotext', '-f', page, '-l', page, pdf, '-']
|
|
||||||
else:
|
|
||||||
cmd = ['pdftotext', pdf, '-']
|
|
||||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
stdout, stderr = p.communicate()
|
stdout, stderr = p.communicate()
|
||||||
stdout = stdout.decode().strip()
|
stdout = stdout.decode()
|
||||||
if not stdout:
|
return stdout.strip()
|
||||||
if page:
|
|
||||||
# split page from pdf and ocr
|
|
||||||
fd, page_pdf = tempfile.mkstemp('.pdf')
|
|
||||||
cmd = ['pdfseparate', '-f', page, '-l', page, pdf, page_pdf]
|
|
||||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
|
|
||||||
stdout, stderr = p.communicate()
|
|
||||||
text = ocr_image(page_pdf)
|
|
||||||
os.unlink(page_pdf)
|
|
||||||
os.close(fd)
|
|
||||||
return text
|
|
||||||
else:
|
|
||||||
return ocr_image(pdf)
|
|
||||||
return stdout
|
|
||||||
|
|
||||||
def ocr_image(path):
|
def ocr_image(path):
|
||||||
cmd = ['tesseract', path, '-', 'txt']
|
cmd = ['tesseract', path, '-', 'txt']
|
||||||
|
@ -66,11 +43,9 @@ class FulltextMixin:
|
||||||
if self.has_fulltext_key():
|
if self.has_fulltext_key():
|
||||||
from elasticsearch.exceptions import NotFoundError
|
from elasticsearch.exceptions import NotFoundError
|
||||||
try:
|
try:
|
||||||
res = self.elasticsearch().delete(index=self._ES_INDEX, id=self.id)
|
res = self.elasticsearch().delete(index=self._ES_INDEX, doc_type='document', id=self.id)
|
||||||
except NotFoundError:
|
except NotFoundError:
|
||||||
pass
|
pass
|
||||||
except:
|
|
||||||
logger.error('failed to delete fulltext document', exc_info=True)
|
|
||||||
|
|
||||||
def update_fulltext(self):
|
def update_fulltext(self):
|
||||||
if self.has_fulltext_key():
|
if self.has_fulltext_key():
|
||||||
|
@ -79,7 +54,7 @@ class FulltextMixin:
|
||||||
doc = {
|
doc = {
|
||||||
'text': text.lower()
|
'text': text.lower()
|
||||||
}
|
}
|
||||||
res = self.elasticsearch().index(index=self._ES_INDEX, id=self.id, body=doc)
|
res = self.elasticsearch().index(index=self._ES_INDEX, doc_type='document', id=self.id, body=doc)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def find_fulltext(cls, query):
|
def find_fulltext(cls, query):
|
||||||
|
@ -120,69 +95,3 @@ class FulltextMixin:
|
||||||
ids += [int(r['_id']) for r in res['hits']['hits']]
|
ids += [int(r['_id']) for r in res['hits']['hits']]
|
||||||
from_ += len(res['hits']['hits'])
|
from_ += len(res['hits']['hits'])
|
||||||
return ids
|
return ids
|
||||||
|
|
||||||
def highlight_page(self, page, query, size):
|
|
||||||
import pypdfium2 as pdfium
|
|
||||||
from PIL import Image
|
|
||||||
from PIL import ImageDraw
|
|
||||||
|
|
||||||
pdfpath = self.file.path
|
|
||||||
pagenumber = int(page) - 1
|
|
||||||
jpg = tempfile.NamedTemporaryFile(suffix='.jpg')
|
|
||||||
output = jpg.name
|
|
||||||
TINT_COLOR = (255, 255, 0)
|
|
||||||
TRANSPARENCY = .45
|
|
||||||
OPACITY = int(255 * TRANSPARENCY)
|
|
||||||
scale = 150/72
|
|
||||||
|
|
||||||
pdf = pdfium.PdfDocument(pdfpath)
|
|
||||||
page = pdf[pagenumber]
|
|
||||||
|
|
||||||
bitmap = page.render(scale=scale, rotation=0)
|
|
||||||
img = bitmap.to_pil().convert('RGBA')
|
|
||||||
overlay = Image.new('RGBA', img.size, TINT_COLOR+(0,))
|
|
||||||
draw = ImageDraw.Draw(overlay)
|
|
||||||
|
|
||||||
textpage = page.get_textpage()
|
|
||||||
search = textpage.search(query)
|
|
||||||
result = search.get_next()
|
|
||||||
while result:
|
|
||||||
pos, steps = result
|
|
||||||
steps += 1
|
|
||||||
while steps:
|
|
||||||
box = textpage.get_charbox(pos)
|
|
||||||
box = [b*scale for b in box]
|
|
||||||
tl = (box[0], img.size[1] - box[3])
|
|
||||||
br = (box[2], img.size[1] - box[1])
|
|
||||||
draw.rectangle((tl, br), fill=TINT_COLOR+(OPACITY,))
|
|
||||||
pos += 1
|
|
||||||
steps -= 1
|
|
||||||
result = search.get_next()
|
|
||||||
img = Image.alpha_composite(img, overlay)
|
|
||||||
img = img.convert("RGB")
|
|
||||||
aspect = img.size[0] / img.size[1]
|
|
||||||
resize_method = Image.LANCZOS
|
|
||||||
if img.size[0] >= img.size[1]:
|
|
||||||
width = size
|
|
||||||
height = int(size / aspect)
|
|
||||||
else:
|
|
||||||
width = int(size / aspect)
|
|
||||||
height = size
|
|
||||||
img = img.resize((width, height), resize_method)
|
|
||||||
img.save(output, quality=72)
|
|
||||||
return jpg
|
|
||||||
|
|
||||||
|
|
||||||
class FulltextPageMixin(FulltextMixin):
|
|
||||||
_ES_INDEX = "document-page-index"
|
|
||||||
|
|
||||||
def extract_fulltext(self):
|
|
||||||
if self.document.file:
|
|
||||||
if self.document.extension == 'pdf':
|
|
||||||
return extract_text(self.document.file.path, self.page)
|
|
||||||
elif self.extension in ('png', 'jpg'):
|
|
||||||
return ocr_image(self.document.file.path)
|
|
||||||
elif self.extension == 'html':
|
|
||||||
# FIXME: is there a nice way to split that into pages
|
|
||||||
return self.data.get('text', '')
|
|
||||||
return ''
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ from django.db import connection, transaction
|
||||||
from django.db.models import fields
|
from django.db.models import fields
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
|
settings.RELOAD_CONFIG = False
|
||||||
import app.monkey_patch
|
import app.monkey_patch
|
||||||
from ... import models
|
from ... import models
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ from django.db import connection, transaction
|
||||||
from django.db.models import fields
|
from django.db.models import fields
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
|
settings.RELOAD_CONFIG = False
|
||||||
import app.monkey_patch
|
import app.monkey_patch
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
from datetime import datetime
|
|
||||||
import unicodedata
|
import unicodedata
|
||||||
|
|
||||||
from django.db.models import Q, Manager
|
from django.db.models import Q, Manager
|
||||||
|
@ -15,7 +14,6 @@ from documentcollection.models import Collection
|
||||||
from item import utils
|
from item import utils
|
||||||
from user.models import Group
|
from user.models import Group
|
||||||
|
|
||||||
from .pages import PageManager
|
|
||||||
|
|
||||||
keymap = {
|
keymap = {
|
||||||
'item': 'items__public_id',
|
'item': 'items__public_id',
|
||||||
|
@ -63,7 +61,7 @@ def parseCondition(condition, user, item=None, owner=None):
|
||||||
|
|
||||||
def buildCondition(k, op, v, user, exclude=False, owner=None):
|
def buildCondition(k, op, v, user, exclude=False, owner=None):
|
||||||
import entity.models
|
import entity.models
|
||||||
from .. import models
|
from . import models
|
||||||
|
|
||||||
# fixme: frontend should never call with list
|
# fixme: frontend should never call with list
|
||||||
if k == 'list':
|
if k == 'list':
|
||||||
|
@ -299,8 +297,5 @@ class DocumentManager(Manager):
|
||||||
q |= Q(groups__in=user.groups.all())
|
q |= Q(groups__in=user.groups.all())
|
||||||
rendered_q |= Q(groups__in=user.groups.all())
|
rendered_q |= Q(groups__in=user.groups.all())
|
||||||
qs = qs.filter(q)
|
qs = qs.filter(q)
|
||||||
max_level = len(settings.CONFIG['documentRightsLevels'])
|
|
||||||
qs = qs.filter(rightslevel__lte=max_level)
|
|
||||||
|
|
||||||
return qs
|
return qs
|
||||||
|
|
|
@ -1,302 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
from datetime import datetime
|
|
||||||
import unicodedata
|
|
||||||
|
|
||||||
from six import string_types
|
|
||||||
from django.db.models import Q, Manager
|
|
||||||
from django.conf import settings
|
|
||||||
|
|
||||||
import ox
|
|
||||||
from oxdjango.query import QuerySet
|
|
||||||
|
|
||||||
import entity.managers
|
|
||||||
from oxdjango.managers import get_operator
|
|
||||||
|
|
||||||
from documentcollection.models import Collection
|
|
||||||
from item import utils
|
|
||||||
from user.models import Group
|
|
||||||
|
|
||||||
|
|
||||||
keymap = {
|
|
||||||
'item': 'items__public_id',
|
|
||||||
}
|
|
||||||
default_key = 'title'
|
|
||||||
|
|
||||||
def get_key_type(k):
|
|
||||||
key_type = (utils.get_by_id(settings.CONFIG['documentKeys'], k) or {'type': 'string'}).get('type')
|
|
||||||
if isinstance(key_type, list):
|
|
||||||
key_type = key_type[0]
|
|
||||||
key_type = {
|
|
||||||
'title': 'string',
|
|
||||||
'person': 'string',
|
|
||||||
'text': 'string',
|
|
||||||
'year': 'string',
|
|
||||||
'length': 'string',
|
|
||||||
'layer': 'string',
|
|
||||||
'list': 'list',
|
|
||||||
}.get(key_type, key_type)
|
|
||||||
return key_type
|
|
||||||
|
|
||||||
|
|
||||||
def parseCondition(condition, user, item=None, owner=None):
|
|
||||||
'''
|
|
||||||
'''
|
|
||||||
k = condition.get('key', default_key)
|
|
||||||
k = keymap.get(k, k)
|
|
||||||
if not k:
|
|
||||||
k = default_key
|
|
||||||
if item and k == 'description':
|
|
||||||
item_conditions = condition.copy()
|
|
||||||
item_conditions['key'] = 'items__itemproperties__description'
|
|
||||||
return parseCondition(condition, user) | parseCondition(item_conditions, user)
|
|
||||||
|
|
||||||
v = condition['value']
|
|
||||||
op = condition.get('operator')
|
|
||||||
if not op:
|
|
||||||
op = '='
|
|
||||||
|
|
||||||
if op.startswith('!'):
|
|
||||||
return buildCondition(k, op[1:], v, user, True, owner=owner)
|
|
||||||
else:
|
|
||||||
return buildCondition(k, op, v, user, owner=owner)
|
|
||||||
|
|
||||||
def buildCondition(k, op, v, user, exclude=False, owner=None):
|
|
||||||
import entity.models
|
|
||||||
from .. import models
|
|
||||||
|
|
||||||
# fixme: frontend should never call with list
|
|
||||||
if k == 'list':
|
|
||||||
print('fixme: frontend should never call with list', k, op, v)
|
|
||||||
k = 'collection'
|
|
||||||
|
|
||||||
key_type = get_key_type(k)
|
|
||||||
|
|
||||||
key_config = (utils.get_by_id(settings.CONFIG['documentKeys'], k) or {'type': 'string'})
|
|
||||||
|
|
||||||
facet_keys = models.Document.facet_keys
|
|
||||||
if k == 'document':
|
|
||||||
k = 'document__id'
|
|
||||||
if op == '&' and isinstance(v, list):
|
|
||||||
v = [ox.fromAZ(id_) for id_ in v]
|
|
||||||
k += get_operator(op)
|
|
||||||
else:
|
|
||||||
v = ox.fromAZ(v)
|
|
||||||
q = Q(**{k: v})
|
|
||||||
if exclude:
|
|
||||||
q = ~Q(document__id__in=models.Document.objects.filter(q))
|
|
||||||
return q
|
|
||||||
elif k == 'rightslevel':
|
|
||||||
q = Q(document__rightslevel=v)
|
|
||||||
if exclude:
|
|
||||||
q = ~Q(document__rightslevel=v)
|
|
||||||
return q
|
|
||||||
elif k == 'groups':
|
|
||||||
if op == '==' and v == '$my':
|
|
||||||
if not owner:
|
|
||||||
owner = user
|
|
||||||
groups = owner.groups.all()
|
|
||||||
else:
|
|
||||||
key = 'name' + get_operator(op)
|
|
||||||
groups = Group.objects.filter(**{key: v})
|
|
||||||
if not groups.count():
|
|
||||||
return Q(id=0)
|
|
||||||
q = Q(document__groups__in=groups)
|
|
||||||
if exclude:
|
|
||||||
q = ~q
|
|
||||||
return q
|
|
||||||
elif k in ('oshash', 'items__public_id'):
|
|
||||||
q = Q(**{k: v})
|
|
||||||
if exclude:
|
|
||||||
q = ~Q(id__in=models.Document.objects.filter(q))
|
|
||||||
return q
|
|
||||||
elif isinstance(v, bool):
|
|
||||||
key = k
|
|
||||||
elif k == 'entity':
|
|
||||||
entity_key, entity_v = entity.managers.namePredicate(op, v)
|
|
||||||
key = 'id__in'
|
|
||||||
v = entity.models.DocumentProperties.objects.filter(**{
|
|
||||||
'entity__' + entity_key: entity_v
|
|
||||||
}).values_list('document_id', flat=True)
|
|
||||||
elif k == 'collection':
|
|
||||||
q = Q(id=0)
|
|
||||||
l = v.split(":", 1)
|
|
||||||
if len(l) >= 2:
|
|
||||||
lqs = list(Collection.objects.filter(name=l[1], user__username=l[0]))
|
|
||||||
if len(lqs) == 1 and lqs[0].accessible(user):
|
|
||||||
l = lqs[0]
|
|
||||||
if l.query.get('static', False) is False:
|
|
||||||
data = l.query
|
|
||||||
q = parseConditions(data.get('conditions', []),
|
|
||||||
data.get('operator', '&'),
|
|
||||||
user, owner=l.user)
|
|
||||||
else:
|
|
||||||
q = Q(id__in=l.documents.all())
|
|
||||||
else:
|
|
||||||
q = Q(id=0)
|
|
||||||
return q
|
|
||||||
elif key_config.get('fulltext'):
|
|
||||||
qs = models.Page.find_fulltext_ids(v)
|
|
||||||
q = Q(id__in=qs)
|
|
||||||
if exclude:
|
|
||||||
q = ~Q(id__in=qs)
|
|
||||||
return q
|
|
||||||
elif key_type == 'boolean':
|
|
||||||
q = Q(**{'find__key': k, 'find__value': v})
|
|
||||||
if exclude:
|
|
||||||
q = ~Q(id__in=models.Document.objects.filter(q))
|
|
||||||
return q
|
|
||||||
elif key_type == "string":
|
|
||||||
in_find = True
|
|
||||||
if in_find:
|
|
||||||
value_key = 'find__value'
|
|
||||||
else:
|
|
||||||
value_key = k
|
|
||||||
if isinstance(v, string_types):
|
|
||||||
v = unicodedata.normalize('NFKD', v).lower()
|
|
||||||
if k in facet_keys:
|
|
||||||
in_find = False
|
|
||||||
facet_value = 'facets__value' + get_operator(op, 'istr')
|
|
||||||
v = models.Document.objects.filter(**{'facets__key': k, facet_value: v})
|
|
||||||
value_key = 'id__in'
|
|
||||||
else:
|
|
||||||
value_key = value_key + get_operator(op)
|
|
||||||
k = str(k)
|
|
||||||
value_key = str(value_key)
|
|
||||||
if k == '*':
|
|
||||||
q = Q(**{'find__value' + get_operator(op): v}) | \
|
|
||||||
Q(**{'facets__value' + get_operator(op, 'istr'): v})
|
|
||||||
elif in_find:
|
|
||||||
q = Q(**{'find__key': k, value_key: v})
|
|
||||||
else:
|
|
||||||
q = Q(**{value_key: v})
|
|
||||||
if exclude:
|
|
||||||
q = ~Q(id__in=models.Document.objects.filter(q))
|
|
||||||
return q
|
|
||||||
elif key_type == 'date':
|
|
||||||
def parse_date(d):
|
|
||||||
while len(d) < 3:
|
|
||||||
d.append(1)
|
|
||||||
return datetime(*[int(i) for i in d])
|
|
||||||
|
|
||||||
#using sort here since find only contains strings
|
|
||||||
v = parse_date(v.split('-'))
|
|
||||||
vk = 'sort__%s%s' % (k, get_operator(op, 'int'))
|
|
||||||
vk = str(vk)
|
|
||||||
q = Q(**{vk: v})
|
|
||||||
if exclude:
|
|
||||||
q = ~q
|
|
||||||
return q
|
|
||||||
else: # integer, float, list, time
|
|
||||||
#use sort table here
|
|
||||||
if key_type == 'time':
|
|
||||||
v = int(utils.parse_time(v))
|
|
||||||
|
|
||||||
vk = 'sort__%s%s' % (k, get_operator(op, 'int'))
|
|
||||||
vk = str(vk)
|
|
||||||
q = Q(**{vk: v})
|
|
||||||
if exclude:
|
|
||||||
q = ~q
|
|
||||||
return q
|
|
||||||
key = str(key)
|
|
||||||
q = Q(**{key: v})
|
|
||||||
if exclude:
|
|
||||||
q = ~q
|
|
||||||
return q
|
|
||||||
|
|
||||||
|
|
||||||
def parseConditions(conditions, operator, user, item=None, owner=None):
|
|
||||||
'''
|
|
||||||
conditions: [
|
|
||||||
{
|
|
||||||
value: "war"
|
|
||||||
}
|
|
||||||
{
|
|
||||||
key: "year",
|
|
||||||
value: "1970-1980,
|
|
||||||
operator: "!="
|
|
||||||
},
|
|
||||||
{
|
|
||||||
key: "country",
|
|
||||||
value: "f",
|
|
||||||
operator: "^"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
operator: "&"
|
|
||||||
'''
|
|
||||||
conn = []
|
|
||||||
for condition in conditions:
|
|
||||||
if 'conditions' in condition:
|
|
||||||
q = parseConditions(condition['conditions'],
|
|
||||||
condition.get('operator', '&'), user, item, owner=owner)
|
|
||||||
if q:
|
|
||||||
conn.append(q)
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
conn.append(parseCondition(condition, user, item, owner=owner))
|
|
||||||
if conn:
|
|
||||||
q = conn[0]
|
|
||||||
for c in conn[1:]:
|
|
||||||
if operator == '|':
|
|
||||||
q = q | c
|
|
||||||
else:
|
|
||||||
q = q & c
|
|
||||||
return q
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class PageManager(Manager):
|
|
||||||
|
|
||||||
def get_query_set(self):
|
|
||||||
return QuerySet(self.model)
|
|
||||||
|
|
||||||
def find(self, data, user, item=None):
|
|
||||||
'''
|
|
||||||
query: {
|
|
||||||
conditions: [
|
|
||||||
{
|
|
||||||
value: "war"
|
|
||||||
}
|
|
||||||
{
|
|
||||||
key: "year",
|
|
||||||
value: "1970-1980,
|
|
||||||
operator: "!="
|
|
||||||
},
|
|
||||||
{
|
|
||||||
key: "country",
|
|
||||||
value: "f",
|
|
||||||
operator: "^"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
operator: "&"
|
|
||||||
}
|
|
||||||
'''
|
|
||||||
|
|
||||||
#join query with operator
|
|
||||||
qs = self.get_query_set()
|
|
||||||
query = data.get('query', {})
|
|
||||||
conditions = parseConditions(query.get('conditions', []),
|
|
||||||
query.get('operator', '&'),
|
|
||||||
user, item)
|
|
||||||
if conditions:
|
|
||||||
qs = qs.filter(conditions)
|
|
||||||
qs = qs.distinct()
|
|
||||||
|
|
||||||
#anonymous can only see public items
|
|
||||||
if not user or user.is_anonymous:
|
|
||||||
level = 'guest'
|
|
||||||
allowed_level = settings.CONFIG['capabilities']['canSeeDocument'][level]
|
|
||||||
qs = qs.filter(document__rightslevel__lte=allowed_level)
|
|
||||||
rendered_q = Q(rendered=True)
|
|
||||||
#users can see public items, there own items and items of there groups
|
|
||||||
else:
|
|
||||||
level = user.profile.get_level()
|
|
||||||
allowed_level = settings.CONFIG['capabilities']['canSeeDocument'][level]
|
|
||||||
q = Q(document__rightslevel__lte=allowed_level) | Q(document__user=user)
|
|
||||||
rendered_q = Q(rendered=True) | Q(document__user=user)
|
|
||||||
if user.groups.count():
|
|
||||||
q |= Q(document__groups__in=user.groups.all())
|
|
||||||
rendered_q |= Q(document__groups__in=user.groups.all())
|
|
||||||
qs = qs.filter(q)
|
|
||||||
|
|
||||||
return qs
|
|
||||||
|
|
|
@ -1,35 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Generated by Django 1.11.22 on 2020-05-13 00:01
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
import django.core.serializers.json
|
|
||||||
from django.db import migrations, models
|
|
||||||
import django.db.models.deletion
|
|
||||||
import document.fulltext
|
|
||||||
import oxdjango.fields
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('document', '0011_jsonfield'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.CreateModel(
|
|
||||||
name='Page',
|
|
||||||
fields=[
|
|
||||||
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
|
||||||
('created', models.DateTimeField(auto_now_add=True)),
|
|
||||||
('modified', models.DateTimeField(auto_now=True)),
|
|
||||||
('page', models.IntegerField(default=1)),
|
|
||||||
('data', oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder)),
|
|
||||||
],
|
|
||||||
bases=(models.Model, document.fulltext.FulltextPageMixin),
|
|
||||||
),
|
|
||||||
migrations.AddField(
|
|
||||||
model_name='page',
|
|
||||||
name='document',
|
|
||||||
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pages_set', to='document.Document'),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -1,55 +0,0 @@
|
||||||
# Generated by Django 4.2.3 on 2023-07-27 21:24
|
|
||||||
|
|
||||||
import django.core.serializers.json
|
|
||||||
from django.db import migrations, models
|
|
||||||
import oxdjango.fields
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('document', '0012_auto_20200513_0001'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='access',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='document',
|
|
||||||
name='data',
|
|
||||||
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='document',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='facet',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='find',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='itemproperties',
|
|
||||||
name='description',
|
|
||||||
field=models.TextField(default=''),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='itemproperties',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='page',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -6,12 +6,11 @@ import os
|
||||||
import re
|
import re
|
||||||
import unicodedata
|
import unicodedata
|
||||||
|
|
||||||
from django.conf import settings
|
|
||||||
from django.contrib.auth import get_user_model
|
|
||||||
from django.db import models, transaction
|
from django.db import models, transaction
|
||||||
from django.db.models import Q, Sum, Max
|
from django.db.models import Q, Sum, Max
|
||||||
|
from django.contrib.auth import get_user_model
|
||||||
from django.db.models.signals import pre_delete
|
from django.db.models.signals import pre_delete
|
||||||
from django.utils import datetime_safe
|
from django.conf import settings
|
||||||
from oxdjango.fields import JSONField
|
from oxdjango.fields import JSONField
|
||||||
|
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
@ -22,7 +21,7 @@ from oxdjango.sortmodel import get_sort_field
|
||||||
from person.models import get_name_sort
|
from person.models import get_name_sort
|
||||||
from item.models import Item
|
from item.models import Item
|
||||||
from annotation.models import Annotation
|
from annotation.models import Annotation
|
||||||
from archive.extract import resize_image, open_image_rgb
|
from archive.extract import resize_image
|
||||||
from archive.chunk import save_chunk
|
from archive.chunk import save_chunk
|
||||||
from user.models import Group
|
from user.models import Group
|
||||||
from user.utils import update_groups
|
from user.utils import update_groups
|
||||||
|
@ -30,7 +29,7 @@ from user.utils import update_groups
|
||||||
from . import managers
|
from . import managers
|
||||||
from . import utils
|
from . import utils
|
||||||
from . import tasks
|
from . import tasks
|
||||||
from .fulltext import FulltextMixin, FulltextPageMixin
|
from .fulltext import FulltextMixin
|
||||||
|
|
||||||
User = get_user_model()
|
User = get_user_model()
|
||||||
|
|
||||||
|
@ -80,7 +79,7 @@ class Document(models.Model, FulltextMixin):
|
||||||
current_values = []
|
current_values = []
|
||||||
for k in settings.CONFIG['documentKeys']:
|
for k in settings.CONFIG['documentKeys']:
|
||||||
if k.get('sortType') == 'person':
|
if k.get('sortType') == 'person':
|
||||||
current_values += self.get_value(k['id'], [])
|
current_values += self.get(k['id'], [])
|
||||||
if not isinstance(current_values, list):
|
if not isinstance(current_values, list):
|
||||||
if not current_values:
|
if not current_values:
|
||||||
current_values = []
|
current_values = []
|
||||||
|
@ -328,9 +327,6 @@ class Document(models.Model, FulltextMixin):
|
||||||
def editable(self, user, item=None):
|
def editable(self, user, item=None):
|
||||||
if not user or user.is_anonymous:
|
if not user or user.is_anonymous:
|
||||||
return False
|
return False
|
||||||
max_level = len(settings.CONFIG['rightsLevels'])
|
|
||||||
if self.rightslevel > max_level:
|
|
||||||
return False
|
|
||||||
if self.user == user or \
|
if self.user == user or \
|
||||||
self.groups.filter(id__in=user.groups.all()).count() > 0 or \
|
self.groups.filter(id__in=user.groups.all()).count() > 0 or \
|
||||||
user.is_staff or \
|
user.is_staff or \
|
||||||
|
@ -350,8 +346,6 @@ class Document(models.Model, FulltextMixin):
|
||||||
groups = data.pop('groups')
|
groups = data.pop('groups')
|
||||||
update_groups(self, groups)
|
update_groups(self, groups)
|
||||||
for key in data:
|
for key in data:
|
||||||
if key == "id":
|
|
||||||
continue
|
|
||||||
k = list(filter(lambda i: i['id'] == key, settings.CONFIG['documentKeys']))
|
k = list(filter(lambda i: i['id'] == key, settings.CONFIG['documentKeys']))
|
||||||
ktype = k and k[0].get('type') or ''
|
ktype = k and k[0].get('type') or ''
|
||||||
if key == 'text' and self.extension == 'html':
|
if key == 'text' and self.extension == 'html':
|
||||||
|
@ -552,10 +546,10 @@ class Document(models.Model, FulltextMixin):
|
||||||
if len(crop) == 4:
|
if len(crop) == 4:
|
||||||
path = os.path.join(folder, '%dp%d,%s.jpg' % (1024, page, ','.join(map(str, crop))))
|
path = os.path.join(folder, '%dp%d,%s.jpg' % (1024, page, ','.join(map(str, crop))))
|
||||||
if not os.path.exists(path):
|
if not os.path.exists(path):
|
||||||
img = open_image_rgb(src).crop(crop)
|
img = Image.open(src).crop(crop)
|
||||||
img.save(path)
|
img.save(path)
|
||||||
else:
|
else:
|
||||||
img = open_image_rgb(path)
|
img = Image.open(path)
|
||||||
src = path
|
src = path
|
||||||
if size < max(img.size):
|
if size < max(img.size):
|
||||||
path = os.path.join(folder, '%dp%d,%s.jpg' % (size, page, ','.join(map(str, crop))))
|
path = os.path.join(folder, '%dp%d,%s.jpg' % (size, page, ','.join(map(str, crop))))
|
||||||
|
@ -568,10 +562,10 @@ class Document(models.Model, FulltextMixin):
|
||||||
if len(crop) == 4:
|
if len(crop) == 4:
|
||||||
path = os.path.join(folder, '%s.jpg' % ','.join(map(str, crop)))
|
path = os.path.join(folder, '%s.jpg' % ','.join(map(str, crop)))
|
||||||
if not os.path.exists(path):
|
if not os.path.exists(path):
|
||||||
img = open_image_rgb(src).convert('RGB').crop(crop)
|
img = Image.open(src).crop(crop)
|
||||||
img.save(path)
|
img.save(path)
|
||||||
else:
|
else:
|
||||||
img = open_image_rgb(path)
|
img = Image.open(path)
|
||||||
src = path
|
src = path
|
||||||
if size < max(img.size):
|
if size < max(img.size):
|
||||||
path = os.path.join(folder, '%sp%s.jpg' % (size, ','.join(map(str, crop))))
|
path = os.path.join(folder, '%sp%s.jpg' % (size, ','.join(map(str, crop))))
|
||||||
|
@ -580,7 +574,7 @@ class Document(models.Model, FulltextMixin):
|
||||||
if os.path.exists(src) and not os.path.exists(path):
|
if os.path.exists(src) and not os.path.exists(path):
|
||||||
image_size = max(self.width, self.height)
|
image_size = max(self.width, self.height)
|
||||||
if image_size == -1:
|
if image_size == -1:
|
||||||
image_size = max(*open_image_rgb(src).size)
|
image_size = max(*Image.open(src).size)
|
||||||
if size > image_size:
|
if size > image_size:
|
||||||
path = src
|
path = src
|
||||||
else:
|
else:
|
||||||
|
@ -592,11 +586,6 @@ class Document(models.Model, FulltextMixin):
|
||||||
image = os.path.join(os.path.dirname(pdf), '1024p%d.jpg' % page)
|
image = os.path.join(os.path.dirname(pdf), '1024p%d.jpg' % page)
|
||||||
utils.extract_pdfpage(pdf, image, page)
|
utils.extract_pdfpage(pdf, image, page)
|
||||||
|
|
||||||
def create_pages(self):
|
|
||||||
for page in range(self.pages):
|
|
||||||
page += 1
|
|
||||||
p, c = Page.objects.get_or_create(document=self, page=page)
|
|
||||||
|
|
||||||
def get_info(self):
|
def get_info(self):
|
||||||
if self.extension == 'pdf':
|
if self.extension == 'pdf':
|
||||||
self.thumbnail(1024)
|
self.thumbnail(1024)
|
||||||
|
@ -606,7 +595,7 @@ class Document(models.Model, FulltextMixin):
|
||||||
self.pages = utils.pdfpages(self.file.path)
|
self.pages = utils.pdfpages(self.file.path)
|
||||||
elif self.width == -1:
|
elif self.width == -1:
|
||||||
self.pages = -1
|
self.pages = -1
|
||||||
self.width, self.height = open_image_rgb(self.file.path).size
|
self.width, self.height = Image.open(self.file.path).size
|
||||||
|
|
||||||
def get_ratio(self):
|
def get_ratio(self):
|
||||||
if self.extension == 'pdf':
|
if self.extension == 'pdf':
|
||||||
|
@ -713,41 +702,6 @@ class ItemProperties(models.Model):
|
||||||
super(ItemProperties, self).save(*args, **kwargs)
|
super(ItemProperties, self).save(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class Page(models.Model, FulltextPageMixin):
|
|
||||||
|
|
||||||
created = models.DateTimeField(auto_now_add=True)
|
|
||||||
modified = models.DateTimeField(auto_now=True)
|
|
||||||
|
|
||||||
document = models.ForeignKey(Document, related_name='pages_set', on_delete=models.CASCADE)
|
|
||||||
page = models.IntegerField(default=1)
|
|
||||||
data = JSONField(default=dict, editable=False)
|
|
||||||
|
|
||||||
objects = managers.PageManager()
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return u"%s:%s" % (self.document, self.page)
|
|
||||||
|
|
||||||
def json(self, keys=None, user=None):
|
|
||||||
data = {}
|
|
||||||
data['document'] = ox.toAZ(self.document.id)
|
|
||||||
data['page'] = self.page
|
|
||||||
data['id'] = '{document}/{page}'.format(**data)
|
|
||||||
document_keys = []
|
|
||||||
if keys:
|
|
||||||
for key in list(data):
|
|
||||||
if key not in keys:
|
|
||||||
del data[key]
|
|
||||||
for key in keys:
|
|
||||||
if 'fulltext' in key:
|
|
||||||
data['fulltext'] = self.extract_fulltext()
|
|
||||||
elif key in ('document', 'page', 'id'):
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
document_keys.append(key)
|
|
||||||
if document_keys:
|
|
||||||
data.update(self.document.json(document_keys, user))
|
|
||||||
return data
|
|
||||||
|
|
||||||
class Access(models.Model):
|
class Access(models.Model):
|
||||||
class Meta:
|
class Meta:
|
||||||
unique_together = ("document", "user")
|
unique_together = ("document", "user")
|
||||||
|
|
|
@ -1,135 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
from glob import glob
|
|
||||||
import unicodedata
|
|
||||||
|
|
||||||
import ox
|
|
||||||
from ox.utils import json
|
|
||||||
from oxdjango.api import actions
|
|
||||||
from oxdjango.decorators import login_required_json
|
|
||||||
from oxdjango.http import HttpFileResponse
|
|
||||||
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response, HttpErrorJson
|
|
||||||
from django import forms
|
|
||||||
from django.db.models import Count, Sum
|
|
||||||
from django.conf import settings
|
|
||||||
|
|
||||||
from item import utils
|
|
||||||
from item.models import Item
|
|
||||||
from itemlist.models import List
|
|
||||||
from entity.models import Entity
|
|
||||||
from archive.chunk import process_chunk
|
|
||||||
from changelog.models import add_changelog
|
|
||||||
|
|
||||||
from . import models
|
|
||||||
from . import tasks
|
|
||||||
|
|
||||||
def parse_query(data, user):
|
|
||||||
query = {}
|
|
||||||
query['range'] = [0, 100]
|
|
||||||
query['sort'] = [{'key': 'page', 'operator': '+'}, {'key': 'title', 'operator': '+'}]
|
|
||||||
for key in ('keys', 'group', 'file', 'range', 'position', 'positions', 'sort'):
|
|
||||||
if key in data:
|
|
||||||
query[key] = data[key]
|
|
||||||
query['qs'] = models.Page.objects.find(data, user)
|
|
||||||
return query
|
|
||||||
|
|
||||||
def _order_query(qs, sort):
|
|
||||||
prefix = 'document__sort__'
|
|
||||||
order_by = []
|
|
||||||
for e in sort:
|
|
||||||
operator = e['operator']
|
|
||||||
if operator != '-':
|
|
||||||
operator = ''
|
|
||||||
key = {
|
|
||||||
'index': 'document__items__itemproperties__index',
|
|
||||||
'position': 'id',
|
|
||||||
'name': 'title',
|
|
||||||
}.get(e['key'], e['key'])
|
|
||||||
if key == 'resolution':
|
|
||||||
order_by.append('%swidth' % operator)
|
|
||||||
order_by.append('%sheight' % operator)
|
|
||||||
else:
|
|
||||||
if '__' not in key and key not in ('created', 'modified', 'page'):
|
|
||||||
key = "%s%s" % (prefix, key)
|
|
||||||
order = '%s%s' % (operator, key)
|
|
||||||
order_by.append(order)
|
|
||||||
if order_by:
|
|
||||||
qs = qs.order_by(*order_by, nulls_last=True)
|
|
||||||
qs = qs.distinct()
|
|
||||||
return qs
|
|
||||||
|
|
||||||
def _order_by_group(query):
|
|
||||||
prefix = 'document__sort__'
|
|
||||||
if 'sort' in query:
|
|
||||||
op = '-' if query['sort'][0]['operator'] == '-' else ''
|
|
||||||
if len(query['sort']) == 1 and query['sort'][0]['key'] == 'items':
|
|
||||||
order_by = op + prefix + 'items'
|
|
||||||
if query['group'] == "year":
|
|
||||||
secondary = op + prefix + 'sortvalue'
|
|
||||||
order_by = (order_by, secondary)
|
|
||||||
elif query['group'] != "keyword":
|
|
||||||
order_by = (order_by, prefix + 'sortvalue')
|
|
||||||
else:
|
|
||||||
order_by = (order_by, 'value')
|
|
||||||
else:
|
|
||||||
order_by = op + prefix + 'sortvalue'
|
|
||||||
order_by = (order_by, prefix + 'items')
|
|
||||||
else:
|
|
||||||
order_by = ('-' + prefix + 'sortvalue', prefix + 'items')
|
|
||||||
return order_by
|
|
||||||
|
|
||||||
def findPages(request, data):
|
|
||||||
'''
|
|
||||||
Finds documents pages for a given query
|
|
||||||
takes {
|
|
||||||
query: object, // query object, see `find`
|
|
||||||
sort: [object], // list of sort objects, see `find`
|
|
||||||
range: [int, int], // range of results, per current sort order
|
|
||||||
keys: [string] // list of keys to return
|
|
||||||
}
|
|
||||||
returns {
|
|
||||||
items: [{ // list of pages
|
|
||||||
id: string
|
|
||||||
page: int
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
'''
|
|
||||||
query = parse_query(data, request.user)
|
|
||||||
#order
|
|
||||||
qs = _order_query(query['qs'], query['sort'])
|
|
||||||
|
|
||||||
response = json_response()
|
|
||||||
if 'group' in query:
|
|
||||||
response['data']['items'] = []
|
|
||||||
items = 'items'
|
|
||||||
document_qs = query['qs']
|
|
||||||
order_by = _order_by_group(query)
|
|
||||||
qs = models.Facet.objects.filter(key=query['group']).filter(document__id__in=document_qs)
|
|
||||||
qs = qs.values('value').annotate(items=Count('id')).order_by(*order_by)
|
|
||||||
|
|
||||||
if 'positions' in query:
|
|
||||||
response['data']['positions'] = {}
|
|
||||||
ids = [j['value'] for j in qs]
|
|
||||||
response['data']['positions'] = utils.get_positions(ids, query['positions'])
|
|
||||||
elif 'range' in data:
|
|
||||||
qs = qs[query['range'][0]:query['range'][1]]
|
|
||||||
response['data']['items'] = [{'name': i['value'], 'items': i[items]} for i in qs]
|
|
||||||
else:
|
|
||||||
response['data']['items'] = qs.count()
|
|
||||||
elif 'keys' in data:
|
|
||||||
qs = qs[query['range'][0]:query['range'][1]]
|
|
||||||
|
|
||||||
response['data']['items'] = [l.json(data['keys'], request.user) for l in qs]
|
|
||||||
elif 'position' in data:
|
|
||||||
#FIXME: actually implement position requests
|
|
||||||
response['data']['position'] = 0
|
|
||||||
elif 'positions' in data:
|
|
||||||
ids = list(qs.values_list('id', flat=True))
|
|
||||||
response['data']['positions'] = utils.get_positions(ids, query['positions'], decode_id=True)
|
|
||||||
else:
|
|
||||||
response['data']['items'] = qs.count()
|
|
||||||
return render_to_json_response(response)
|
|
||||||
actions.register(findPages)
|
|
||||||
|
|
|
@ -1,30 +1,8 @@
|
||||||
import ox
|
# -*- coding: utf-8 -*-
|
||||||
from app.celery import app
|
from celery.task import task
|
||||||
|
|
||||||
@app.task(queue="encoding")
|
@task(queue="encoding")
|
||||||
def extract_fulltext(id):
|
def extract_fulltext(id):
|
||||||
from . import models
|
from . import models
|
||||||
d = models.Document.objects.get(id=id)
|
d = models.Document.objects.get(id=id)
|
||||||
d.update_fulltext()
|
d.update_fulltext()
|
||||||
d.create_pages()
|
|
||||||
for page in d.pages_set.all():
|
|
||||||
page.update_fulltext()
|
|
||||||
|
|
||||||
|
|
||||||
@app.task(queue='default')
|
|
||||||
def bulk_edit(data, username):
|
|
||||||
from django.db import transaction
|
|
||||||
from . import models
|
|
||||||
from item.models import Item
|
|
||||||
user = models.User.objects.get(username=username)
|
|
||||||
item = 'item' in data and Item.objects.get(public_id=data['item']) or None
|
|
||||||
ids = data['id']
|
|
||||||
del data['id']
|
|
||||||
documents = models.Document.objects.filter(pk__in=map(ox.fromAZ, ids))
|
|
||||||
for document in documents:
|
|
||||||
if document.editable(user, item):
|
|
||||||
with transaction.atomic():
|
|
||||||
document.refresh_from_db()
|
|
||||||
document.edit(data, user, item)
|
|
||||||
document.save()
|
|
||||||
return {}
|
|
||||||
|
|
|
@ -12,10 +12,8 @@ from oxdjango.decorators import login_required_json
|
||||||
from oxdjango.http import HttpFileResponse
|
from oxdjango.http import HttpFileResponse
|
||||||
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response, HttpErrorJson
|
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response, HttpErrorJson
|
||||||
from django import forms
|
from django import forms
|
||||||
from django.conf import settings
|
|
||||||
from django.db.models import Count, Sum
|
from django.db.models import Count, Sum
|
||||||
from django.http import HttpResponse
|
from django.conf import settings
|
||||||
from django.shortcuts import render
|
|
||||||
|
|
||||||
from item import utils
|
from item import utils
|
||||||
from item.models import Item
|
from item.models import Item
|
||||||
|
@ -25,8 +23,6 @@ from archive.chunk import process_chunk
|
||||||
from changelog.models import add_changelog
|
from changelog.models import add_changelog
|
||||||
|
|
||||||
from . import models
|
from . import models
|
||||||
from . import tasks
|
|
||||||
from . import page_views
|
|
||||||
|
|
||||||
def get_document_or_404_json(request, id):
|
def get_document_or_404_json(request, id):
|
||||||
response = {'status': {'code': 404,
|
response = {'status': {'code': 404,
|
||||||
|
@ -135,13 +131,13 @@ def editDocument(request, data):
|
||||||
item = 'item' in data and Item.objects.get(public_id=data['item']) or None
|
item = 'item' in data and Item.objects.get(public_id=data['item']) or None
|
||||||
if data['id']:
|
if data['id']:
|
||||||
if isinstance(data['id'], list):
|
if isinstance(data['id'], list):
|
||||||
add_changelog(request, data)
|
documents = models.Document.objects.filter(pk__in=map(ox.fromAZ, data['id']))
|
||||||
t = tasks.bulk_edit.delay(data, request.user.username)
|
|
||||||
response['data']['taskId'] = t.task_id
|
|
||||||
else:
|
else:
|
||||||
document = models.Document.get(data['id'])
|
documents = [models.Document.get(data['id'])]
|
||||||
|
for document in documents:
|
||||||
if document.editable(request.user, item):
|
if document.editable(request.user, item):
|
||||||
add_changelog(request, data)
|
if document == documents[0]:
|
||||||
|
add_changelog(request, data)
|
||||||
document.edit(data, request.user, item)
|
document.edit(data, request.user, item)
|
||||||
document.save()
|
document.save()
|
||||||
response['data'] = document.json(user=request.user, item=item)
|
response['data'] = document.json(user=request.user, item=item)
|
||||||
|
@ -383,12 +379,8 @@ def file(request, id, name=None):
|
||||||
def thumbnail(request, id, size=256, page=None):
|
def thumbnail(request, id, size=256, page=None):
|
||||||
size = int(size)
|
size = int(size)
|
||||||
document = get_document_or_404_json(request, id)
|
document = get_document_or_404_json(request, id)
|
||||||
if "q" in request.GET and page:
|
|
||||||
img = document.highlight_page(page, request.GET["q"], size)
|
|
||||||
return HttpResponse(img, content_type="image/jpeg")
|
|
||||||
return HttpFileResponse(document.thumbnail(size, page=page))
|
return HttpFileResponse(document.thumbnail(size, page=page))
|
||||||
|
|
||||||
|
|
||||||
@login_required_json
|
@login_required_json
|
||||||
def upload(request):
|
def upload(request):
|
||||||
if 'id' in request.GET:
|
if 'id' in request.GET:
|
||||||
|
@ -513,37 +505,3 @@ def autocompleteDocuments(request, data):
|
||||||
response['data']['items'] = [i['value'] for i in qs]
|
response['data']['items'] = [i['value'] for i in qs]
|
||||||
return render_to_json_response(response)
|
return render_to_json_response(response)
|
||||||
actions.register(autocompleteDocuments)
|
actions.register(autocompleteDocuments)
|
||||||
|
|
||||||
|
|
||||||
def document(request, fragment):
|
|
||||||
context = {}
|
|
||||||
parts = fragment.split('/')
|
|
||||||
# FIXME: parse collection urls and return the right metadata for those
|
|
||||||
id = parts[0]
|
|
||||||
page = None
|
|
||||||
crop = None
|
|
||||||
if len(parts) == 2:
|
|
||||||
rect = parts[1].split(',')
|
|
||||||
if len(rect) == 1:
|
|
||||||
page = rect[0]
|
|
||||||
else:
|
|
||||||
crop = rect
|
|
||||||
try:
|
|
||||||
document = models.Document.objects.filter(id=ox.fromAZ(id)).first()
|
|
||||||
except:
|
|
||||||
document = None
|
|
||||||
if document and document.access(request.user):
|
|
||||||
context['title'] = document.data['title']
|
|
||||||
if document.data.get('description'):
|
|
||||||
context['description'] = document.data['description']
|
|
||||||
link = request.build_absolute_uri(document.get_absolute_url())
|
|
||||||
public_id = ox.toAZ(document.id)
|
|
||||||
preview = '/documents/%s/512p.jpg' % public_id
|
|
||||||
if page:
|
|
||||||
preview = '/documents/%s/512p%s.jpg' % (public_id, page)
|
|
||||||
if crop:
|
|
||||||
preview = '/documents/%s/512p%s.jpg' % (public_id, ','.join(crop))
|
|
||||||
context['preview'] = request.build_absolute_uri(preview)
|
|
||||||
context['url'] = request.build_absolute_uri('/documents/' + fragment)
|
|
||||||
context['settings'] = settings
|
|
||||||
return render(request, "document.html", context)
|
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
from django.apps import AppConfig
|
|
||||||
|
|
||||||
|
|
||||||
class DocumentcollectionConfig(AppConfig):
|
|
||||||
default_auto_field = "django.db.models.BigAutoField"
|
|
||||||
name = 'documentcollection'
|
|
||||||
|
|
|
@ -1,61 +0,0 @@
|
||||||
# Generated by Django 4.2.3 on 2023-07-27 21:28
|
|
||||||
|
|
||||||
import django.core.serializers.json
|
|
||||||
from django.db import migrations, models
|
|
||||||
import documentcollection.models
|
|
||||||
import oxdjango.fields
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('documentcollection', '0004_jsonfield'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='collection',
|
|
||||||
name='description',
|
|
||||||
field=models.TextField(default=''),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='collection',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='collection',
|
|
||||||
name='poster_frames',
|
|
||||||
field=oxdjango.fields.JSONField(default=list, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='collection',
|
|
||||||
name='query',
|
|
||||||
field=oxdjango.fields.JSONField(default=documentcollection.models.default_query, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='collection',
|
|
||||||
name='sort',
|
|
||||||
field=oxdjango.fields.JSONField(default=documentcollection.models.get_collectionsort, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='collection',
|
|
||||||
name='status',
|
|
||||||
field=models.CharField(default='private', max_length=20),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='collection',
|
|
||||||
name='type',
|
|
||||||
field=models.CharField(default='static', max_length=255),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='collectiondocument',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='position',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -34,9 +34,6 @@ def get_collectionview():
|
||||||
def get_collectionsort():
|
def get_collectionsort():
|
||||||
return tuple(settings.CONFIG['user']['ui']['collectionSort'])
|
return tuple(settings.CONFIG['user']['ui']['collectionSort'])
|
||||||
|
|
||||||
def default_query():
|
|
||||||
return {"static": True}
|
|
||||||
|
|
||||||
class Collection(models.Model):
|
class Collection(models.Model):
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
|
@ -49,7 +46,7 @@ class Collection(models.Model):
|
||||||
name = models.CharField(max_length=255)
|
name = models.CharField(max_length=255)
|
||||||
status = models.CharField(max_length=20, default='private')
|
status = models.CharField(max_length=20, default='private')
|
||||||
_status = ['private', 'public', 'featured']
|
_status = ['private', 'public', 'featured']
|
||||||
query = JSONField(default=default_query, editable=False)
|
query = JSONField(default=lambda: {"static": True}, editable=False)
|
||||||
type = models.CharField(max_length=255, default='static')
|
type = models.CharField(max_length=255, default='static')
|
||||||
description = models.TextField(default='')
|
description = models.TextField(default='')
|
||||||
|
|
||||||
|
|
|
@ -86,11 +86,6 @@ def findCollections(request, data):
|
||||||
for x in data.get('query', {}).get('conditions', [])
|
for x in data.get('query', {}).get('conditions', [])
|
||||||
)
|
)
|
||||||
|
|
||||||
is_personal = request.user.is_authenticated and any(
|
|
||||||
(x['key'] == 'user' and x['value'] == request.user.username and x['operator'] == '==')
|
|
||||||
for x in data.get('query', {}).get('conditions', [])
|
|
||||||
)
|
|
||||||
|
|
||||||
if is_section_request:
|
if is_section_request:
|
||||||
qs = query['qs']
|
qs = query['qs']
|
||||||
if not is_featured and not request.user.is_anonymous:
|
if not is_featured and not request.user.is_anonymous:
|
||||||
|
@ -99,9 +94,6 @@ def findCollections(request, data):
|
||||||
else:
|
else:
|
||||||
qs = _order_query(query['qs'], query['sort'])
|
qs = _order_query(query['qs'], query['sort'])
|
||||||
|
|
||||||
if is_personal and request.user.profile.ui.get('hidden', {}).get('collections'):
|
|
||||||
qs = qs.exclude(name__in=request.user.profile.ui['hidden']['collections'])
|
|
||||||
|
|
||||||
response = json_response()
|
response = json_response()
|
||||||
if 'keys' in data:
|
if 'keys' in data:
|
||||||
qs = qs[query['range'][0]:query['range'][1]]
|
qs = qs[query['range'][0]:query['range'][1]]
|
||||||
|
@ -246,7 +238,7 @@ def addCollection(request, data):
|
||||||
'type' and 'view'.
|
'type' and 'view'.
|
||||||
see: editCollection, findCollections, getCollection, removeCollection, sortCollections
|
see: editCollection, findCollections, getCollection, removeCollection, sortCollections
|
||||||
'''
|
'''
|
||||||
data['name'] = re.sub(r' \[\d+\]$', '', data.get('name', 'Untitled')).strip()
|
data['name'] = re.sub(' \[\d+\]$', '', data.get('name', 'Untitled')).strip()
|
||||||
name = data['name']
|
name = data['name']
|
||||||
if not name:
|
if not name:
|
||||||
name = "Untitled"
|
name = "Untitled"
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
from django.apps import AppConfig
|
|
||||||
|
|
||||||
|
|
||||||
class EditConfig(AppConfig):
|
|
||||||
default_auto_field = "django.db.models.BigAutoField"
|
|
||||||
name = 'edit'
|
|
||||||
|
|
|
@ -1,41 +0,0 @@
|
||||||
# Generated by Django 4.2.3 on 2023-07-27 21:28
|
|
||||||
|
|
||||||
import django.core.serializers.json
|
|
||||||
from django.db import migrations, models
|
|
||||||
import edit.models
|
|
||||||
import oxdjango.fields
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('edit', '0005_jsonfield'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='clip',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='edit',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='edit',
|
|
||||||
name='poster_frames',
|
|
||||||
field=oxdjango.fields.JSONField(default=list, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='edit',
|
|
||||||
name='query',
|
|
||||||
field=oxdjango.fields.JSONField(default=edit.models.default_query, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='position',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -13,7 +13,6 @@ from django.conf import settings
|
||||||
from django.db import models, transaction
|
from django.db import models, transaction
|
||||||
from django.db.models import Max
|
from django.db.models import Max
|
||||||
from django.contrib.auth import get_user_model
|
from django.contrib.auth import get_user_model
|
||||||
from django.core.cache import cache
|
|
||||||
|
|
||||||
from oxdjango.fields import JSONField
|
from oxdjango.fields import JSONField
|
||||||
|
|
||||||
|
@ -25,7 +24,6 @@ import clip.models
|
||||||
from archive import extract
|
from archive import extract
|
||||||
from user.utils import update_groups
|
from user.utils import update_groups
|
||||||
from user.models import Group
|
from user.models import Group
|
||||||
from clip.utils import add_cuts
|
|
||||||
|
|
||||||
from . import managers
|
from . import managers
|
||||||
|
|
||||||
|
@ -35,9 +33,6 @@ User = get_user_model()
|
||||||
def get_path(f, x): return f.path(x)
|
def get_path(f, x): return f.path(x)
|
||||||
def get_icon_path(f, x): return get_path(f, 'icon.jpg')
|
def get_icon_path(f, x): return get_path(f, 'icon.jpg')
|
||||||
|
|
||||||
def default_query():
|
|
||||||
return {"static": True}
|
|
||||||
|
|
||||||
class Edit(models.Model):
|
class Edit(models.Model):
|
||||||
|
|
||||||
class Meta:
|
class Meta:
|
||||||
|
@ -56,7 +51,7 @@ class Edit(models.Model):
|
||||||
description = models.TextField(default='')
|
description = models.TextField(default='')
|
||||||
rightslevel = models.IntegerField(db_index=True, default=0)
|
rightslevel = models.IntegerField(db_index=True, default=0)
|
||||||
|
|
||||||
query = JSONField(default=default_query, editable=False)
|
query = JSONField(default=lambda: {"static": True}, editable=False)
|
||||||
type = models.CharField(max_length=255, default='static')
|
type = models.CharField(max_length=255, default='static')
|
||||||
|
|
||||||
icon = models.ImageField(default=None, blank=True, null=True, upload_to=get_icon_path)
|
icon = models.ImageField(default=None, blank=True, null=True, upload_to=get_icon_path)
|
||||||
|
@ -98,8 +93,6 @@ class Edit(models.Model):
|
||||||
# dont add clip if in/out are invalid
|
# dont add clip if in/out are invalid
|
||||||
if not c.annotation:
|
if not c.annotation:
|
||||||
duration = c.item.sort.duration
|
duration = c.item.sort.duration
|
||||||
if c.start is None or c.end is None:
|
|
||||||
return False
|
|
||||||
if c.start > c.end \
|
if c.start > c.end \
|
||||||
or round(c.start, 3) >= round(duration, 3) \
|
or round(c.start, 3) >= round(duration, 3) \
|
||||||
or round(c.end, 3) > round(duration, 3):
|
or round(c.end, 3) > round(duration, 3):
|
||||||
|
@ -514,7 +507,7 @@ class Clip(models.Model):
|
||||||
if value:
|
if value:
|
||||||
data[key] = value
|
data[key] = value
|
||||||
data['duration'] = data['out'] - data['in']
|
data['duration'] = data['out'] - data['in']
|
||||||
add_cuts(data, self.item, self.start, self.end)
|
data['cuts'] = tuple([c for c in self.item.get('cuts', []) if c > self.start and c < self.end])
|
||||||
data['layers'] = self.get_layers(user)
|
data['layers'] = self.get_layers(user)
|
||||||
data['streams'] = [s.file.oshash for s in self.item.streams()]
|
data['streams'] = [s.file.oshash for s in self.item.streams()]
|
||||||
return data
|
return data
|
||||||
|
|
|
@ -3,16 +3,14 @@
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from oxdjango.api import actions
|
|
||||||
from oxdjango.decorators import login_required_json
|
|
||||||
from oxdjango.http import HttpFileResponse
|
|
||||||
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
|
|
||||||
import ox
|
import ox
|
||||||
|
from oxdjango.decorators import login_required_json
|
||||||
from django.conf import settings
|
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
|
||||||
from django.db import transaction
|
from django.db import transaction
|
||||||
from django.db.models import Max
|
from django.db.models import Max
|
||||||
from django.db.models import Sum
|
from oxdjango.http import HttpFileResponse
|
||||||
|
from oxdjango.api import actions
|
||||||
|
from django.conf import settings
|
||||||
|
|
||||||
from item import utils
|
from item import utils
|
||||||
from changelog.models import add_changelog
|
from changelog.models import add_changelog
|
||||||
|
@ -192,7 +190,7 @@ def _order_clips(edit, sort):
|
||||||
'in': 'start',
|
'in': 'start',
|
||||||
'out': 'end',
|
'out': 'end',
|
||||||
'text': 'sortvalue',
|
'text': 'sortvalue',
|
||||||
'volume': 'volume' if edit.type == 'smart' else 'sortvolume',
|
'volume': 'sortvolume',
|
||||||
'item__sort__item': 'item__sort__public_id',
|
'item__sort__item': 'item__sort__public_id',
|
||||||
}.get(key, key)
|
}.get(key, key)
|
||||||
order = '%s%s' % (operator, key)
|
order = '%s%s' % (operator, key)
|
||||||
|
@ -262,7 +260,7 @@ def addEdit(request, data):
|
||||||
}
|
}
|
||||||
see: editEdit, findEdit, getEdit, removeEdit, sortEdits
|
see: editEdit, findEdit, getEdit, removeEdit, sortEdits
|
||||||
'''
|
'''
|
||||||
data['name'] = re.sub(r' \[\d+\]$', '', data.get('name', 'Untitled')).strip()
|
data['name'] = re.sub(' \[\d+\]$', '', data.get('name', 'Untitled')).strip()
|
||||||
name = data['name']
|
name = data['name']
|
||||||
if not name:
|
if not name:
|
||||||
name = "Untitled"
|
name = "Untitled"
|
||||||
|
@ -414,11 +412,6 @@ def findEdits(request, data):
|
||||||
|
|
||||||
is_featured = any(filter(is_featured_condition, data.get('query', {}).get('conditions', [])))
|
is_featured = any(filter(is_featured_condition, data.get('query', {}).get('conditions', [])))
|
||||||
|
|
||||||
is_personal = request.user.is_authenticated and any(
|
|
||||||
(x['key'] == 'user' and x['value'] == request.user.username and x['operator'] == '==')
|
|
||||||
for x in data.get('query', {}).get('conditions', [])
|
|
||||||
)
|
|
||||||
|
|
||||||
if is_section_request:
|
if is_section_request:
|
||||||
qs = query['qs']
|
qs = query['qs']
|
||||||
if not is_featured and not request.user.is_anonymous:
|
if not is_featured and not request.user.is_anonymous:
|
||||||
|
@ -427,9 +420,6 @@ def findEdits(request, data):
|
||||||
else:
|
else:
|
||||||
qs = _order_query(query['qs'], query['sort'])
|
qs = _order_query(query['qs'], query['sort'])
|
||||||
|
|
||||||
if is_personal and request.user.profile.ui.get('hidden', {}).get('edits'):
|
|
||||||
qs = qs.exclude(name__in=request.user.profile.ui['hidden']['edits'])
|
|
||||||
|
|
||||||
response = json_response()
|
response = json_response()
|
||||||
if 'keys' in data:
|
if 'keys' in data:
|
||||||
qs = qs[query['range'][0]:query['range'][1]]
|
qs = qs[query['range'][0]:query['range'][1]]
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
LOGLEVEL=info
|
|
||||||
MAX_TASKS_PER_CHILD=500
|
|
||||||
CONCURRENCY=1
|
|
|
@ -1,6 +0,0 @@
|
||||||
from django.apps import AppConfig
|
|
||||||
|
|
||||||
|
|
||||||
class EntityConfig(AppConfig):
|
|
||||||
default_auto_field = "django.db.models.BigAutoField"
|
|
||||||
name = 'entity'
|
|
|
@ -1,50 +0,0 @@
|
||||||
# Generated by Django 4.2.3 on 2023-07-27 21:28
|
|
||||||
|
|
||||||
import django.core.serializers.json
|
|
||||||
from django.db import migrations, models
|
|
||||||
import oxdjango.fields
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('entity', '0006_auto_20180918_0903'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='documentproperties',
|
|
||||||
name='data',
|
|
||||||
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='documentproperties',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='entity',
|
|
||||||
name='data',
|
|
||||||
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='entity',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='entity',
|
|
||||||
name='name_find',
|
|
||||||
field=models.TextField(default=''),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='find',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='link',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -1,6 +0,0 @@
|
||||||
from django.apps import AppConfig
|
|
||||||
|
|
||||||
|
|
||||||
class EventConfig(AppConfig):
|
|
||||||
default_auto_field = "django.db.models.BigAutoField"
|
|
||||||
name = 'event'
|
|
|
@ -1,43 +0,0 @@
|
||||||
# Generated by Django 4.2.3 on 2023-07-27 21:28
|
|
||||||
|
|
||||||
from django.db import migrations, models
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('event', '0003_auto_20160304_1644'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='event',
|
|
||||||
name='duration',
|
|
||||||
field=models.CharField(default='', max_length=255),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='event',
|
|
||||||
name='end',
|
|
||||||
field=models.CharField(default='', max_length=255),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='event',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='event',
|
|
||||||
name='name_find',
|
|
||||||
field=models.TextField(default=''),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='event',
|
|
||||||
name='start',
|
|
||||||
field=models.CharField(default='', max_length=255),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='event',
|
|
||||||
name='type',
|
|
||||||
field=models.CharField(default='', max_length=255),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -1,26 +1,20 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from app.celery import app
|
from celery.task import task
|
||||||
|
|
||||||
from .models import Event
|
from .models import Event
|
||||||
|
|
||||||
|
|
||||||
'''
|
'''
|
||||||
from celery.schedules import crontab
|
@periodic_task(run_every=crontab(hour=7, minute=30), queue='encoding')
|
||||||
|
|
||||||
@app.task(ignore_results=True, queue='encoding')
|
|
||||||
def update_all_matches(**kwargs):
|
def update_all_matches(**kwargs):
|
||||||
ids = [e['id'] for e in Event.objects.all().values('id')]
|
ids = [e['id'] for e in Event.objects.all().values('id')]
|
||||||
for i in ids:
|
for i in ids:
|
||||||
e = Event.objects.get(pk=i)
|
e = Event.objects.get(pk=i)
|
||||||
e.update_matches()
|
e.update_matches()
|
||||||
|
|
||||||
@app.on_after_finalize.connect
|
|
||||||
def setup_periodic_tasks(sender, **kwargs):
|
|
||||||
sender.add_periodic_task(crontab(hour=7, minute=30), update_all_matches.s())
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
@app.task(ignore_results=True, queue='default')
|
@task(ignore_results=True, queue='default')
|
||||||
def update_matches(eventId):
|
def update_matches(eventId):
|
||||||
event = Event.objects.get(pk=eventId)
|
event = Event.objects.get(pk=eventId)
|
||||||
event.update_matches()
|
event.update_matches()
|
||||||
|
|
|
@ -2,5 +2,4 @@ from django.apps import AppConfig
|
||||||
|
|
||||||
|
|
||||||
class HomeConfig(AppConfig):
|
class HomeConfig(AppConfig):
|
||||||
default_auto_field = "django.db.models.BigAutoField"
|
|
||||||
name = 'home'
|
name = 'home'
|
||||||
|
|
|
@ -1,30 +0,0 @@
|
||||||
# Generated by Django 4.2.3 on 2023-07-27 21:28
|
|
||||||
|
|
||||||
import django.core.serializers.json
|
|
||||||
from django.db import migrations, models
|
|
||||||
import oxdjango.fields
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('home', '0002_jsonfield'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='item',
|
|
||||||
name='data',
|
|
||||||
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='item',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='item',
|
|
||||||
name='index',
|
|
||||||
field=models.IntegerField(default=-1),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -1,6 +0,0 @@
|
||||||
from django.apps import AppConfig
|
|
||||||
|
|
||||||
|
|
||||||
class ItemConfig(AppConfig):
|
|
||||||
default_auto_field = "django.db.models.BigAutoField"
|
|
||||||
name = 'item'
|
|
|
@ -4,6 +4,7 @@ from django.core.management.base import BaseCommand
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.db import transaction
|
from django.db import transaction
|
||||||
|
|
||||||
|
settings.RELOAD_CONFIG = False
|
||||||
import app.monkey_patch
|
import app.monkey_patch
|
||||||
from ... import models
|
from ... import models
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@ from django.db import connection, transaction
|
||||||
from django.db.models import fields
|
from django.db.models import fields
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
|
settings.RELOAD_CONFIG = False
|
||||||
import app.monkey_patch
|
import app.monkey_patch
|
||||||
from ... import models
|
from ... import models
|
||||||
import clip.models
|
import clip.models
|
||||||
|
|
|
@ -5,6 +5,7 @@ from django.db import connection, transaction
|
||||||
from django.db.models import fields
|
from django.db.models import fields
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
|
settings.RELOAD_CONFIG = False
|
||||||
import app.monkey_patch
|
import app.monkey_patch
|
||||||
from ... import models
|
from ... import models
|
||||||
import clip.models
|
import clip.models
|
||||||
|
|
|
@ -5,6 +5,7 @@ from django.db import connection, transaction
|
||||||
from django.db.models import fields
|
from django.db.models import fields
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
|
settings.RELOAD_CONFIG = False
|
||||||
import app.monkey_patch
|
import app.monkey_patch
|
||||||
from ... import models
|
from ... import models
|
||||||
import clip.models
|
import clip.models
|
||||||
|
|
|
@ -1,29 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
import os
|
|
||||||
from glob import glob
|
|
||||||
|
|
||||||
from django.core.management.base import BaseCommand
|
|
||||||
|
|
||||||
import app.monkey_patch
|
|
||||||
from ... import models
|
|
||||||
from ... import tasks
|
|
||||||
|
|
||||||
class Command(BaseCommand):
|
|
||||||
"""
|
|
||||||
rebuild posters for all items.
|
|
||||||
"""
|
|
||||||
help = 'rebuild all posters for all items.'
|
|
||||||
args = ''
|
|
||||||
|
|
||||||
def handle(self, **options):
|
|
||||||
offset = 0
|
|
||||||
chunk = 100
|
|
||||||
count = models.Item.objects.count()
|
|
||||||
while offset <= count:
|
|
||||||
for i in models.Item.objects.all().order_by('id')[offset:offset+chunk]:
|
|
||||||
print(i)
|
|
||||||
if i.poster:
|
|
||||||
i.poster.delete()
|
|
||||||
i.make_poster()
|
|
||||||
offset += chunk
|
|
|
@ -6,6 +6,7 @@ from django.db import connection, transaction
|
||||||
from django.db.models import fields
|
from django.db.models import fields
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
|
settings.RELOAD_CONFIG = False
|
||||||
import app.monkey_patch
|
import app.monkey_patch
|
||||||
from ... import models
|
from ... import models
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ from django.core.management.base import BaseCommand
|
||||||
from django.db import connection, transaction
|
from django.db import connection, transaction
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
|
settings.RELOAD_CONFIG = False
|
||||||
import app.monkey_patch
|
import app.monkey_patch
|
||||||
from ... import models
|
from ... import models
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ from django.db import connection, transaction
|
||||||
from django.db.models import fields
|
from django.db.models import fields
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
|
settings.RELOAD_CONFIG = False
|
||||||
import app.monkey_patch
|
import app.monkey_patch
|
||||||
from ... import models
|
from ... import models
|
||||||
import clip.models
|
import clip.models
|
||||||
|
|
|
@ -33,7 +33,7 @@ def parseCondition(condition, user, owner=None):
|
||||||
k = {'id': 'public_id'}.get(k, k)
|
k = {'id': 'public_id'}.get(k, k)
|
||||||
if not k:
|
if not k:
|
||||||
k = '*'
|
k = '*'
|
||||||
v = condition.get('value', '')
|
v = condition['value']
|
||||||
op = condition.get('operator')
|
op = condition.get('operator')
|
||||||
if not op:
|
if not op:
|
||||||
op = '='
|
op = '='
|
||||||
|
@ -62,9 +62,6 @@ def parseCondition(condition, user, owner=None):
|
||||||
if k == 'list':
|
if k == 'list':
|
||||||
key_type = ''
|
key_type = ''
|
||||||
|
|
||||||
if k in ('width', 'height'):
|
|
||||||
key_type = 'integer'
|
|
||||||
|
|
||||||
if k == 'groups':
|
if k == 'groups':
|
||||||
if op == '==' and v == '$my':
|
if op == '==' and v == '$my':
|
||||||
if not owner:
|
if not owner:
|
||||||
|
@ -89,11 +86,8 @@ def parseCondition(condition, user, owner=None):
|
||||||
elif k == 'rendered':
|
elif k == 'rendered':
|
||||||
return Q(rendered=v)
|
return Q(rendered=v)
|
||||||
elif k == 'resolution':
|
elif k == 'resolution':
|
||||||
if isinstance(v, list) and len(v) == 2:
|
q = parseCondition({'key': 'width', 'value': v[0], 'operator': op}, user) \
|
||||||
q = parseCondition({'key': 'width', 'value': v[0], 'operator': op}, user) \
|
& parseCondition({'key': 'height', 'value': v[1], 'operator': op}, user)
|
||||||
& parseCondition({'key': 'height', 'value': v[1], 'operator': op}, user)
|
|
||||||
else:
|
|
||||||
q = Q(id=0)
|
|
||||||
if exclude:
|
if exclude:
|
||||||
q = ~q
|
q = ~q
|
||||||
return q
|
return q
|
||||||
|
@ -324,8 +318,6 @@ class ItemManager(Manager):
|
||||||
q |= Q(groups__in=user.groups.all())
|
q |= Q(groups__in=user.groups.all())
|
||||||
rendered_q |= Q(groups__in=user.groups.all())
|
rendered_q |= Q(groups__in=user.groups.all())
|
||||||
qs = qs.filter(q)
|
qs = qs.filter(q)
|
||||||
max_level = len(settings.CONFIG['rightsLevels'])
|
|
||||||
qs = qs.filter(level__lte=max_level)
|
|
||||||
if settings.CONFIG.get('itemRequiresVideo') and level != 'admin':
|
if settings.CONFIG.get('itemRequiresVideo') and level != 'admin':
|
||||||
qs = qs.filter(rendered_q)
|
qs = qs.filter(rendered_q)
|
||||||
return qs
|
return qs
|
||||||
|
|
|
@ -71,7 +71,7 @@ class Migration(migrations.Migration):
|
||||||
('poster_width', models.IntegerField(default=0)),
|
('poster_width', models.IntegerField(default=0)),
|
||||||
('poster_frame', models.FloatField(default=-1)),
|
('poster_frame', models.FloatField(default=-1)),
|
||||||
('icon', models.ImageField(blank=True, default=None, upload_to=item.models.get_icon_path)),
|
('icon', models.ImageField(blank=True, default=None, upload_to=item.models.get_icon_path)),
|
||||||
('torrent', models.FileField(blank=True, default=None, max_length=1000)),
|
('torrent', models.FileField(blank=True, default=None, max_length=1000, upload_to=item.models.get_torrent_path)),
|
||||||
('stream_info', oxdjango.fields.DictField(default={}, editable=False)),
|
('stream_info', oxdjango.fields.DictField(default={}, editable=False)),
|
||||||
('stream_aspect', models.FloatField(default=1.3333333333333333)),
|
('stream_aspect', models.FloatField(default=1.3333333333333333)),
|
||||||
],
|
],
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
# Generated by Django 3.0.10 on 2023-07-10 08:52
|
|
||||||
|
|
||||||
import django.core.serializers.json
|
|
||||||
from django.db import migrations, models
|
|
||||||
import oxdjango.fields
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('item', '0004_json_cache'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.RemoveField(
|
|
||||||
model_name='item',
|
|
||||||
name='torrent',
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -1,65 +0,0 @@
|
||||||
# Generated by Django 4.2.3 on 2023-07-27 21:28
|
|
||||||
|
|
||||||
import django.core.serializers.json
|
|
||||||
from django.db import migrations, models
|
|
||||||
import oxdjango.fields
|
|
||||||
|
|
||||||
|
|
||||||
class Migration(migrations.Migration):
|
|
||||||
|
|
||||||
dependencies = [
|
|
||||||
('item', '0005_auto_20230710_0852'),
|
|
||||||
]
|
|
||||||
|
|
||||||
operations = [
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='access',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='annotationsequence',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='description',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='facet',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='item',
|
|
||||||
name='cache',
|
|
||||||
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='item',
|
|
||||||
name='data',
|
|
||||||
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='item',
|
|
||||||
name='external_data',
|
|
||||||
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='item',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='item',
|
|
||||||
name='stream_info',
|
|
||||||
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
|
|
||||||
),
|
|
||||||
migrations.AlterField(
|
|
||||||
model_name='itemfind',
|
|
||||||
name='id',
|
|
||||||
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
|
|
||||||
),
|
|
||||||
]
|
|
|
@ -1,7 +1,6 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import shutil
|
import shutil
|
||||||
|
@ -43,7 +42,6 @@ from user.utils import update_groups
|
||||||
from user.models import Group
|
from user.models import Group
|
||||||
import archive.models
|
import archive.models
|
||||||
|
|
||||||
logger = logging.getLogger('pandora.' + __name__)
|
|
||||||
User = get_user_model()
|
User = get_user_model()
|
||||||
|
|
||||||
|
|
||||||
|
@ -157,6 +155,9 @@ def get_icon_path(f, x):
|
||||||
def get_poster_path(f, x):
|
def get_poster_path(f, x):
|
||||||
return get_path(f, 'poster.jpg')
|
return get_path(f, 'poster.jpg')
|
||||||
|
|
||||||
|
def get_torrent_path(f, x):
|
||||||
|
return get_path(f, 'torrent.torrent')
|
||||||
|
|
||||||
class Item(models.Model):
|
class Item(models.Model):
|
||||||
created = models.DateTimeField(auto_now_add=True)
|
created = models.DateTimeField(auto_now_add=True)
|
||||||
modified = models.DateTimeField(auto_now=True)
|
modified = models.DateTimeField(auto_now=True)
|
||||||
|
@ -182,6 +183,7 @@ class Item(models.Model):
|
||||||
|
|
||||||
icon = models.ImageField(default=None, blank=True, upload_to=get_icon_path)
|
icon = models.ImageField(default=None, blank=True, upload_to=get_icon_path)
|
||||||
|
|
||||||
|
torrent = models.FileField(default=None, blank=True, max_length=1000, upload_to=get_torrent_path)
|
||||||
stream_info = JSONField(default=dict, editable=False)
|
stream_info = JSONField(default=dict, editable=False)
|
||||||
|
|
||||||
# stream related fields
|
# stream related fields
|
||||||
|
@ -229,9 +231,6 @@ class Item(models.Model):
|
||||||
def editable(self, user):
|
def editable(self, user):
|
||||||
if user.is_anonymous:
|
if user.is_anonymous:
|
||||||
return False
|
return False
|
||||||
max_level = len(settings.CONFIG['rightsLevels'])
|
|
||||||
if self.level > max_level:
|
|
||||||
return False
|
|
||||||
if user.profile.capability('canEditMetadata') or \
|
if user.profile.capability('canEditMetadata') or \
|
||||||
user.is_staff or \
|
user.is_staff or \
|
||||||
self.user == user or \
|
self.user == user or \
|
||||||
|
@ -239,7 +238,7 @@ class Item(models.Model):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def edit(self, data, is_task=False):
|
def edit(self, data):
|
||||||
data = data.copy()
|
data = data.copy()
|
||||||
# FIXME: how to map the keys to the right place to write them to?
|
# FIXME: how to map the keys to the right place to write them to?
|
||||||
if 'id' in data:
|
if 'id' in data:
|
||||||
|
@ -256,12 +255,11 @@ class Item(models.Model):
|
||||||
description = data.pop(key)
|
description = data.pop(key)
|
||||||
if isinstance(description, dict):
|
if isinstance(description, dict):
|
||||||
for value in description:
|
for value in description:
|
||||||
value = ox.sanitize_html(value)
|
|
||||||
d, created = Description.objects.get_or_create(key=k, value=value)
|
d, created = Description.objects.get_or_create(key=k, value=value)
|
||||||
d.description = ox.sanitize_html(description[value])
|
d.description = ox.sanitize_html(description[value])
|
||||||
d.save()
|
d.save()
|
||||||
else:
|
else:
|
||||||
value = ox.sanitize_html(data.get(k, self.get(k, '')))
|
value = data.get(k, self.get(k, ''))
|
||||||
if not description:
|
if not description:
|
||||||
description = ''
|
description = ''
|
||||||
d, created = Description.objects.get_or_create(key=k, value=value)
|
d, created = Description.objects.get_or_create(key=k, value=value)
|
||||||
|
@ -296,10 +294,7 @@ class Item(models.Model):
|
||||||
self.data[key] = ox.escape_html(data[key])
|
self.data[key] = ox.escape_html(data[key])
|
||||||
p = self.save()
|
p = self.save()
|
||||||
if not settings.USE_IMDB and list(filter(lambda k: k in self.poster_keys, data)):
|
if not settings.USE_IMDB and list(filter(lambda k: k in self.poster_keys, data)):
|
||||||
if is_task:
|
p = tasks.update_poster.delay(self.public_id)
|
||||||
tasks.update_poster(self.public_id)
|
|
||||||
else:
|
|
||||||
p = tasks.update_poster.delay(self.public_id)
|
|
||||||
return p
|
return p
|
||||||
|
|
||||||
def update_external(self):
|
def update_external(self):
|
||||||
|
@ -478,8 +473,7 @@ class Item(models.Model):
|
||||||
|
|
||||||
for a in self.annotations.all().order_by('id'):
|
for a in self.annotations.all().order_by('id'):
|
||||||
a.item = other
|
a.item = other
|
||||||
with transaction.atomic():
|
a.set_public_id()
|
||||||
a.set_public_id()
|
|
||||||
Annotation.objects.filter(id=a.id).update(item=other, public_id=a.public_id)
|
Annotation.objects.filter(id=a.id).update(item=other, public_id=a.public_id)
|
||||||
try:
|
try:
|
||||||
other_sort = other.sort
|
other_sort = other.sort
|
||||||
|
@ -523,7 +517,6 @@ class Item(models.Model):
|
||||||
cmd, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), close_fds=True)
|
cmd, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), close_fds=True)
|
||||||
p.wait()
|
p.wait()
|
||||||
os.unlink(tmp_output_txt)
|
os.unlink(tmp_output_txt)
|
||||||
os.close(fd)
|
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
@ -641,11 +634,11 @@ class Item(models.Model):
|
||||||
if self.poster_height:
|
if self.poster_height:
|
||||||
i['posterRatio'] = self.poster_width / self.poster_height
|
i['posterRatio'] = self.poster_width / self.poster_height
|
||||||
|
|
||||||
if keys and 'hasSource' in keys:
|
if keys and 'source' in keys:
|
||||||
i['hasSource'] = self.streams().exclude(file__data='').exists()
|
i['source'] = self.streams().exclude(file__data='').exists()
|
||||||
|
|
||||||
streams = self.streams()
|
streams = self.streams()
|
||||||
i['durations'] = [s[0] for s in streams.values_list('duration')]
|
i['durations'] = [s.duration for s in streams]
|
||||||
i['duration'] = sum(i['durations'])
|
i['duration'] = sum(i['durations'])
|
||||||
i['audioTracks'] = self.audio_tracks()
|
i['audioTracks'] = self.audio_tracks()
|
||||||
if not i['audioTracks']:
|
if not i['audioTracks']:
|
||||||
|
@ -701,12 +694,10 @@ class Item(models.Model):
|
||||||
else:
|
else:
|
||||||
values = self.get(key)
|
values = self.get(key)
|
||||||
if values:
|
if values:
|
||||||
values = [ox.sanitize_html(value) for value in values]
|
|
||||||
for d in Description.objects.filter(key=key, value__in=values):
|
for d in Description.objects.filter(key=key, value__in=values):
|
||||||
i['%sdescription' % key][d.value] = d.description
|
i['%sdescription' % key][d.value] = d.description
|
||||||
else:
|
else:
|
||||||
value = ox.sanitize_html(self.get(key, ''))
|
qs = Description.objects.filter(key=key, value=self.get(key, ''))
|
||||||
qs = Description.objects.filter(key=key, value=value)
|
|
||||||
i['%sdescription' % key] = '' if qs.count() == 0 else qs[0].description
|
i['%sdescription' % key] = '' if qs.count() == 0 else qs[0].description
|
||||||
if keys:
|
if keys:
|
||||||
info = {}
|
info = {}
|
||||||
|
@ -864,7 +855,7 @@ class Item(models.Model):
|
||||||
values = list(set(values))
|
values = list(set(values))
|
||||||
else:
|
else:
|
||||||
values = self.get(key, '')
|
values = self.get(key, '')
|
||||||
if values and isinstance(values, list) and isinstance(values[0], str):
|
if isinstance(values, list):
|
||||||
save(key, '\n'.join(values))
|
save(key, '\n'.join(values))
|
||||||
else:
|
else:
|
||||||
save(key, values)
|
save(key, values)
|
||||||
|
@ -1026,16 +1017,12 @@ class Item(models.Model):
|
||||||
set_value(s, name, value)
|
set_value(s, name, value)
|
||||||
elif sort_type == 'person':
|
elif sort_type == 'person':
|
||||||
value = sortNames(self.get(source, []))
|
value = sortNames(self.get(source, []))
|
||||||
if value is None:
|
|
||||||
value = ''
|
|
||||||
value = utils.sort_string(value)[:955]
|
value = utils.sort_string(value)[:955]
|
||||||
set_value(s, name, value)
|
set_value(s, name, value)
|
||||||
elif sort_type == 'string':
|
elif sort_type == 'string':
|
||||||
value = self.get(source, '')
|
value = self.get(source, '')
|
||||||
if value is None:
|
|
||||||
value = ''
|
|
||||||
if isinstance(value, list):
|
if isinstance(value, list):
|
||||||
value = ','.join([str(v) for v in value])
|
value = ','.join(value)
|
||||||
value = utils.sort_string(value)[:955]
|
value = utils.sort_string(value)[:955]
|
||||||
set_value(s, name, value)
|
set_value(s, name, value)
|
||||||
elif sort_type == 'words':
|
elif sort_type == 'words':
|
||||||
|
@ -1112,11 +1099,7 @@ class Item(models.Model):
|
||||||
_current_values.append(value[0])
|
_current_values.append(value[0])
|
||||||
current_values = _current_values
|
current_values = _current_values
|
||||||
|
|
||||||
try:
|
current_values = list(set(current_values))
|
||||||
current_values = list(set(current_values))
|
|
||||||
except:
|
|
||||||
logger.error('invalid facet data for %s: %s', key, current_values)
|
|
||||||
current_values = []
|
|
||||||
current_values = [ox.decode_html(ox.strip_tags(v)) for v in current_values]
|
current_values = [ox.decode_html(ox.strip_tags(v)) for v in current_values]
|
||||||
current_values = [unicodedata.normalize('NFKD', v) for v in current_values]
|
current_values = [unicodedata.normalize('NFKD', v) for v in current_values]
|
||||||
self.update_facet_values(key, current_values)
|
self.update_facet_values(key, current_values)
|
||||||
|
@ -1209,7 +1192,7 @@ class Item(models.Model):
|
||||||
if not r:
|
if not r:
|
||||||
return False
|
return False
|
||||||
path = video.name
|
path = video.name
|
||||||
duration = sum(self.item.cache['durations'])
|
duration = sum(item.cache['durations'])
|
||||||
else:
|
else:
|
||||||
path = stream.media.path
|
path = stream.media.path
|
||||||
duration = stream.info['duration']
|
duration = stream.info['duration']
|
||||||
|
@ -1305,6 +1288,90 @@ class Item(models.Model):
|
||||||
self.files.filter(selected=True).update(selected=False)
|
self.files.filter(selected=True).update(selected=False)
|
||||||
self.save()
|
self.save()
|
||||||
|
|
||||||
|
def get_torrent(self, request):
|
||||||
|
if self.torrent:
|
||||||
|
self.torrent.seek(0)
|
||||||
|
data = ox.torrent.bdecode(self.torrent.read())
|
||||||
|
url = request.build_absolute_uri("%s/torrent/" % self.get_absolute_url())
|
||||||
|
if url.startswith('https://'):
|
||||||
|
url = 'http' + url[5:]
|
||||||
|
data['url-list'] = ['%s%s' % (url, u.split('torrent/')[1]) for u in data['url-list']]
|
||||||
|
return ox.torrent.bencode(data)
|
||||||
|
|
||||||
|
def make_torrent(self):
|
||||||
|
if not settings.CONFIG['video'].get('torrent'):
|
||||||
|
return
|
||||||
|
streams = self.streams()
|
||||||
|
if streams.count() == 0:
|
||||||
|
return
|
||||||
|
base = self.path('torrent')
|
||||||
|
base = os.path.abspath(os.path.join(settings.MEDIA_ROOT, base))
|
||||||
|
if not isinstance(base, bytes):
|
||||||
|
base = base.encode('utf-8')
|
||||||
|
if os.path.exists(base):
|
||||||
|
shutil.rmtree(base)
|
||||||
|
ox.makedirs(base)
|
||||||
|
|
||||||
|
filename = utils.safe_filename(ox.decode_html(self.get('title')))
|
||||||
|
base = self.path('torrent/%s' % filename)
|
||||||
|
base = os.path.abspath(os.path.join(settings.MEDIA_ROOT, base))
|
||||||
|
size = 0
|
||||||
|
duration = 0.0
|
||||||
|
if streams.count() == 1:
|
||||||
|
v = streams[0]
|
||||||
|
media_path = v.media.path
|
||||||
|
extension = media_path.split('.')[-1]
|
||||||
|
url = "%s/torrent/%s.%s" % (self.get_absolute_url(),
|
||||||
|
quote(filename.encode('utf-8')),
|
||||||
|
extension)
|
||||||
|
video = "%s.%s" % (base, extension)
|
||||||
|
if not isinstance(media_path, bytes):
|
||||||
|
media_path = media_path.encode('utf-8')
|
||||||
|
if not isinstance(video, bytes):
|
||||||
|
video = video.encode('utf-8')
|
||||||
|
media_path = os.path.relpath(media_path, os.path.dirname(video))
|
||||||
|
os.symlink(media_path, video)
|
||||||
|
size = v.media.size
|
||||||
|
duration = v.duration
|
||||||
|
else:
|
||||||
|
url = "%s/torrent/" % self.get_absolute_url()
|
||||||
|
part = 1
|
||||||
|
ox.makedirs(base)
|
||||||
|
for v in streams:
|
||||||
|
media_path = v.media.path
|
||||||
|
extension = media_path.split('.')[-1]
|
||||||
|
video = "%s/%s.Part %d.%s" % (base, filename, part, extension)
|
||||||
|
part += 1
|
||||||
|
if not isinstance(media_path, bytes):
|
||||||
|
media_path = media_path.encode('utf-8')
|
||||||
|
if not isinstance(video, bytes):
|
||||||
|
video = video.encode('utf-8')
|
||||||
|
media_path = os.path.relpath(media_path, os.path.dirname(video))
|
||||||
|
os.symlink(media_path, video)
|
||||||
|
size += v.media.size
|
||||||
|
duration += v.duration
|
||||||
|
video = base
|
||||||
|
|
||||||
|
torrent = '%s.torrent' % base
|
||||||
|
url = "http://%s%s" % (settings.CONFIG['site']['url'], url)
|
||||||
|
meta = {
|
||||||
|
'filesystem_encoding': 'utf-8',
|
||||||
|
'target': torrent,
|
||||||
|
'url-list': url,
|
||||||
|
}
|
||||||
|
if duration:
|
||||||
|
meta['playtime'] = ox.format_duration(duration*1000)[:-4]
|
||||||
|
|
||||||
|
# slightly bigger torrent file but better for streaming
|
||||||
|
piece_size_pow2 = 15 # 1 mbps -> 32KB pieces
|
||||||
|
if size / duration >= 1000000:
|
||||||
|
piece_size_pow2 = 16 # 2 mbps -> 64KB pieces
|
||||||
|
meta['piece_size_pow2'] = piece_size_pow2
|
||||||
|
|
||||||
|
ox.torrent.create_torrent(video, settings.TRACKER_URL, meta)
|
||||||
|
self.torrent.name = torrent[len(settings.MEDIA_ROOT)+1:]
|
||||||
|
self.save()
|
||||||
|
|
||||||
def audio_tracks(self):
|
def audio_tracks(self):
|
||||||
tracks = [f['language']
|
tracks = [f['language']
|
||||||
for f in self.files.filter(selected=True).filter(Q(is_video=True) | Q(is_audio=True)).values('language')
|
for f in self.files.filter(selected=True).filter(Q(is_video=True) | Q(is_audio=True)).values('language')
|
||||||
|
@ -1312,10 +1379,11 @@ class Item(models.Model):
|
||||||
return sorted(set(tracks))
|
return sorted(set(tracks))
|
||||||
|
|
||||||
def streams(self, track=None):
|
def streams(self, track=None):
|
||||||
files = self.files.filter(selected=True).filter(Q(is_audio=True) | Q(is_video=True))
|
|
||||||
qs = archive.models.Stream.objects.filter(
|
qs = archive.models.Stream.objects.filter(
|
||||||
file__in=files, source=None, available=True
|
source=None, available=True, file__item=self, file__selected=True
|
||||||
).select_related()
|
).filter(
|
||||||
|
Q(file__is_audio=True) | Q(file__is_video=True)
|
||||||
|
)
|
||||||
if not track:
|
if not track:
|
||||||
tracks = self.audio_tracks()
|
tracks = self.audio_tracks()
|
||||||
if len(tracks) > 1:
|
if len(tracks) > 1:
|
||||||
|
@ -1354,6 +1422,7 @@ class Item(models.Model):
|
||||||
self.select_frame()
|
self.select_frame()
|
||||||
self.make_poster()
|
self.make_poster()
|
||||||
self.make_icon()
|
self.make_icon()
|
||||||
|
self.make_torrent()
|
||||||
self.rendered = streams.count() > 0
|
self.rendered = streams.count() > 0
|
||||||
self.save()
|
self.save()
|
||||||
if self.rendered:
|
if self.rendered:
|
||||||
|
@ -1539,15 +1608,8 @@ class Item(models.Model):
|
||||||
cmd += ['-l', timeline]
|
cmd += ['-l', timeline]
|
||||||
if frame:
|
if frame:
|
||||||
cmd += ['-f', frame]
|
cmd += ['-f', frame]
|
||||||
if settings.ITEM_ICON_DATA:
|
p = subprocess.Popen(cmd, close_fds=True)
|
||||||
cmd += '-d', '-'
|
p.wait()
|
||||||
data = self.json()
|
|
||||||
data = utils.normalize_dict('NFC', data)
|
|
||||||
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, close_fds=True)
|
|
||||||
p.communicate(json.dumps(data, default=to_json).encode('utf-8'))
|
|
||||||
else:
|
|
||||||
p = subprocess.Popen(cmd, close_fds=True)
|
|
||||||
p.wait()
|
|
||||||
# remove cached versions
|
# remove cached versions
|
||||||
icon = os.path.abspath(os.path.join(settings.MEDIA_ROOT, icon))
|
icon = os.path.abspath(os.path.join(settings.MEDIA_ROOT, icon))
|
||||||
for f in glob(icon.replace('.jpg', '*.jpg')):
|
for f in glob(icon.replace('.jpg', '*.jpg')):
|
||||||
|
@ -1559,13 +1621,11 @@ class Item(models.Model):
|
||||||
return icon
|
return icon
|
||||||
|
|
||||||
def add_empty_clips(self):
|
def add_empty_clips(self):
|
||||||
if not settings.EMPTY_CLIPS:
|
|
||||||
return
|
|
||||||
subtitles = utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True)
|
subtitles = utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True)
|
||||||
if not subtitles:
|
if not subtitles:
|
||||||
return
|
return
|
||||||
# otherwise add empty 5 seconds annotation every minute
|
# otherwise add empty 5 seconds annotation every minute
|
||||||
duration = sum([s[0] for s in self.streams().values_list('duration')])
|
duration = sum([s.duration for s in self.streams()])
|
||||||
layer = subtitles['id']
|
layer = subtitles['id']
|
||||||
# FIXME: allow annotations from no user instead?
|
# FIXME: allow annotations from no user instead?
|
||||||
user = User.objects.all().order_by('id')[0]
|
user = User.objects.all().order_by('id')[0]
|
||||||
|
@ -1814,8 +1874,6 @@ class Description(models.Model):
|
||||||
value = models.CharField(max_length=1000, db_index=True)
|
value = models.CharField(max_length=1000, db_index=True)
|
||||||
description = models.TextField()
|
description = models.TextField()
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return "%s=%s" % (self.key, self.value)
|
|
||||||
|
|
||||||
class AnnotationSequence(models.Model):
|
class AnnotationSequence(models.Model):
|
||||||
item = models.OneToOneField('Item', related_name='_annotation_sequence', on_delete=models.CASCADE)
|
item = models.OneToOneField('Item', related_name='_annotation_sequence', on_delete=models.CASCADE)
|
||||||
|
@ -1831,12 +1889,13 @@ class AnnotationSequence(models.Model):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def nextid(cls, item):
|
def nextid(cls, item):
|
||||||
s, created = cls.objects.get_or_create(item=item)
|
with transaction.atomic():
|
||||||
if created:
|
s, created = cls.objects.get_or_create(item=item)
|
||||||
nextid = s.value
|
if created:
|
||||||
else:
|
nextid = s.value
|
||||||
cursor = connection.cursor()
|
else:
|
||||||
sql = "UPDATE %s SET value = value + 1 WHERE item_id = %s RETURNING value" % (cls._meta.db_table, item.id)
|
cursor = connection.cursor()
|
||||||
cursor.execute(sql)
|
sql = "UPDATE %s SET value = value + 1 WHERE item_id = %s RETURNING value" % (cls._meta.db_table, item.id)
|
||||||
nextid = cursor.fetchone()[0]
|
cursor.execute(sql)
|
||||||
|
nextid = cursor.fetchone()[0]
|
||||||
return "%s/%s" % (item.public_id, ox.toAZ(nextid))
|
return "%s/%s" % (item.public_id, ox.toAZ(nextid))
|
||||||
|
|
|
@ -24,6 +24,10 @@ urls = [
|
||||||
re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<format>webm|ogv|mp4)$', views.video),
|
re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<format>webm|ogv|mp4)$', views.video),
|
||||||
re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<track>.+)\.(?P<format>webm|ogv|mp4)$', views.video),
|
re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<track>.+)\.(?P<format>webm|ogv|mp4)$', views.video),
|
||||||
|
|
||||||
|
#torrent
|
||||||
|
re_path(r'^(?P<id>[A-Z0-9].*)/torrent$', views.torrent),
|
||||||
|
re_path(r'^(?P<id>[A-Z0-9].*)/torrent/(?P<filename>.*?)$', views.torrent),
|
||||||
|
|
||||||
#export
|
#export
|
||||||
re_path(r'^(?P<id>[A-Z0-9].*)/json$', views.item_json),
|
re_path(r'^(?P<id>[A-Z0-9].*)/json$', views.item_json),
|
||||||
re_path(r'^(?P<id>[A-Z0-9].*)/xml$', views.item_xml),
|
re_path(r'^(?P<id>[A-Z0-9].*)/xml$', views.item_xml),
|
||||||
|
|
|
@ -2,35 +2,27 @@
|
||||||
|
|
||||||
from datetime import timedelta, datetime
|
from datetime import timedelta, datetime
|
||||||
from urllib.parse import quote
|
from urllib.parse import quote
|
||||||
import xml.etree.ElementTree as ET
|
|
||||||
import gzip
|
import gzip
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import logging
|
|
||||||
|
|
||||||
from app.celery import app
|
from celery.task import task, periodic_task
|
||||||
from celery.schedules import crontab
|
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.db import connection, transaction
|
from django.db import connection, transaction
|
||||||
from django.db.models import Q
|
from django.db.models import Q
|
||||||
|
from ox.utils import ET
|
||||||
|
|
||||||
from app.utils import limit_rate
|
from app.utils import limit_rate
|
||||||
from taskqueue.models import Task
|
from taskqueue.models import Task
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger('pandora.' + __name__)
|
@periodic_task(run_every=timedelta(days=1), queue='encoding')
|
||||||
|
|
||||||
@app.task(queue='encoding')
|
|
||||||
def cronjob(**kwargs):
|
def cronjob(**kwargs):
|
||||||
if limit_rate('item.tasks.cronjob', 8 * 60 * 60):
|
if limit_rate('item.tasks.cronjob', 8 * 60 * 60):
|
||||||
update_random_sort()
|
update_random_sort()
|
||||||
update_random_clip_sort()
|
update_random_clip_sort()
|
||||||
clear_cache.delay()
|
clear_cache.delay()
|
||||||
|
|
||||||
@app.on_after_finalize.connect
|
|
||||||
def setup_periodic_tasks(sender, **kwargs):
|
|
||||||
sender.add_periodic_task(timedelta(days=1), cronjob.s())
|
|
||||||
|
|
||||||
def update_random_sort():
|
def update_random_sort():
|
||||||
from . import models
|
from . import models
|
||||||
if list(filter(lambda f: f['id'] == 'random', settings.CONFIG['itemKeys'])):
|
if list(filter(lambda f: f['id'] == 'random', settings.CONFIG['itemKeys'])):
|
||||||
|
@ -58,7 +50,7 @@ def update_random_clip_sort():
|
||||||
cursor.execute(row)
|
cursor.execute(row)
|
||||||
|
|
||||||
|
|
||||||
@app.task(ignore_results=True, queue='default')
|
@task(ignore_results=True, queue='default')
|
||||||
def update_clips(public_id):
|
def update_clips(public_id):
|
||||||
from . import models
|
from . import models
|
||||||
try:
|
try:
|
||||||
|
@ -67,7 +59,7 @@ def update_clips(public_id):
|
||||||
return
|
return
|
||||||
item.clips.all().update(user=item.user.id)
|
item.clips.all().update(user=item.user.id)
|
||||||
|
|
||||||
@app.task(ignore_results=True, queue='default')
|
@task(ignore_results=True, queue='default')
|
||||||
def update_poster(public_id):
|
def update_poster(public_id):
|
||||||
from . import models
|
from . import models
|
||||||
try:
|
try:
|
||||||
|
@ -85,7 +77,7 @@ def update_poster(public_id):
|
||||||
icon=item.icon.name
|
icon=item.icon.name
|
||||||
)
|
)
|
||||||
|
|
||||||
@app.task(ignore_results=True, queue='default')
|
@task(ignore_results=True, queue='default')
|
||||||
def update_file_paths(public_id):
|
def update_file_paths(public_id):
|
||||||
from . import models
|
from . import models
|
||||||
try:
|
try:
|
||||||
|
@ -94,7 +86,7 @@ def update_file_paths(public_id):
|
||||||
return
|
return
|
||||||
item.update_file_paths()
|
item.update_file_paths()
|
||||||
|
|
||||||
@app.task(ignore_results=True, queue='default')
|
@task(ignore_results=True, queue='default')
|
||||||
def update_external(public_id):
|
def update_external(public_id):
|
||||||
from . import models
|
from . import models
|
||||||
try:
|
try:
|
||||||
|
@ -103,7 +95,7 @@ def update_external(public_id):
|
||||||
return
|
return
|
||||||
item.update_external()
|
item.update_external()
|
||||||
|
|
||||||
@app.task(queue="encoding")
|
@task(queue="encoding")
|
||||||
def update_timeline(public_id):
|
def update_timeline(public_id):
|
||||||
from . import models
|
from . import models
|
||||||
try:
|
try:
|
||||||
|
@ -113,7 +105,7 @@ def update_timeline(public_id):
|
||||||
item.update_timeline(async_=False)
|
item.update_timeline(async_=False)
|
||||||
Task.finish(item)
|
Task.finish(item)
|
||||||
|
|
||||||
@app.task(queue="encoding")
|
@task(queue="encoding")
|
||||||
def rebuild_timeline(public_id):
|
def rebuild_timeline(public_id):
|
||||||
from . import models
|
from . import models
|
||||||
i = models.Item.objects.get(public_id=public_id)
|
i = models.Item.objects.get(public_id=public_id)
|
||||||
|
@ -121,7 +113,7 @@ def rebuild_timeline(public_id):
|
||||||
s.make_timeline()
|
s.make_timeline()
|
||||||
i.update_timeline(async_=False)
|
i.update_timeline(async_=False)
|
||||||
|
|
||||||
@app.task(queue="encoding")
|
@task(queue="encoding")
|
||||||
def load_subtitles(public_id):
|
def load_subtitles(public_id):
|
||||||
from . import models
|
from . import models
|
||||||
try:
|
try:
|
||||||
|
@ -134,7 +126,7 @@ def load_subtitles(public_id):
|
||||||
item.update_facets()
|
item.update_facets()
|
||||||
|
|
||||||
|
|
||||||
@app.task(queue="encoding")
|
@task(queue="encoding")
|
||||||
def extract_clip(public_id, in_, out, resolution, format, track=None):
|
def extract_clip(public_id, in_, out, resolution, format, track=None):
|
||||||
from . import models
|
from . import models
|
||||||
try:
|
try:
|
||||||
|
@ -146,7 +138,7 @@ def extract_clip(public_id, in_, out, resolution, format, track=None):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@app.task(queue="encoding")
|
@task(queue="encoding")
|
||||||
def clear_cache(days=60):
|
def clear_cache(days=60):
|
||||||
import subprocess
|
import subprocess
|
||||||
path = os.path.join(settings.MEDIA_ROOT, 'media')
|
path = os.path.join(settings.MEDIA_ROOT, 'media')
|
||||||
|
@ -160,7 +152,7 @@ def clear_cache(days=60):
|
||||||
subprocess.check_output(cmd)
|
subprocess.check_output(cmd)
|
||||||
|
|
||||||
|
|
||||||
@app.task(ignore_results=True, queue='default')
|
@task(ignore_results=True, queue='default')
|
||||||
def update_sitemap(base_url):
|
def update_sitemap(base_url):
|
||||||
from . import models
|
from . import models
|
||||||
sitemap = os.path.abspath(os.path.join(settings.MEDIA_ROOT, 'sitemap.xml.gz'))
|
sitemap = os.path.abspath(os.path.join(settings.MEDIA_ROOT, 'sitemap.xml.gz'))
|
||||||
|
@ -358,18 +350,3 @@ def update_sitemap(base_url):
|
||||||
f.write(data)
|
f.write(data)
|
||||||
with gzip.open(sitemap, 'wb') as f:
|
with gzip.open(sitemap, 'wb') as f:
|
||||||
f.write(data)
|
f.write(data)
|
||||||
|
|
||||||
|
|
||||||
@app.task(queue='default')
|
|
||||||
def bulk_edit(data, username):
|
|
||||||
from django.db import transaction
|
|
||||||
from . import models
|
|
||||||
from .views import edit_item
|
|
||||||
user = models.User.objects.get(username=username)
|
|
||||||
items = models.Item.objects.filter(public_id__in=data['id'])
|
|
||||||
for item in items:
|
|
||||||
if item.editable(user):
|
|
||||||
with transaction.atomic():
|
|
||||||
item.refresh_from_db()
|
|
||||||
response = edit_item(user, item, data, is_task=True)
|
|
||||||
return {}
|
|
||||||
|
|
|
@ -71,7 +71,7 @@ def join_tiles(source_paths, durations, target_path):
|
||||||
if not w or large_tile_i < large_tile_n - 1:
|
if not w or large_tile_i < large_tile_n - 1:
|
||||||
w = 60
|
w = 60
|
||||||
data['target_images']['large'] = data['target_images']['large'].resize(
|
data['target_images']['large'] = data['target_images']['large'].resize(
|
||||||
(w, small_tile_h), Image.LANCZOS
|
(w, small_tile_h), Image.ANTIALIAS
|
||||||
)
|
)
|
||||||
if data['target_images']['small']:
|
if data['target_images']['small']:
|
||||||
data['target_images']['small'].paste(
|
data['target_images']['small'].paste(
|
||||||
|
@ -90,7 +90,7 @@ def join_tiles(source_paths, durations, target_path):
|
||||||
if data['full_tile_widths'][0]:
|
if data['full_tile_widths'][0]:
|
||||||
resized = data['target_images']['large'].resize((
|
resized = data['target_images']['large'].resize((
|
||||||
data['full_tile_widths'][0], large_tile_h
|
data['full_tile_widths'][0], large_tile_h
|
||||||
), Image.LANCZOS)
|
), Image.ANTIALIAS)
|
||||||
data['target_images']['full'].paste(resized, (data['full_tile_offset'], 0))
|
data['target_images']['full'].paste(resized, (data['full_tile_offset'], 0))
|
||||||
data['full_tile_offset'] += data['full_tile_widths'][0]
|
data['full_tile_offset'] += data['full_tile_widths'][0]
|
||||||
data['full_tile_widths'] = data['full_tile_widths'][1:]
|
data['full_tile_widths'] = data['full_tile_widths'][1:]
|
||||||
|
@ -196,7 +196,7 @@ def join_tiles(source_paths, durations, target_path):
|
||||||
#print(image_file)
|
#print(image_file)
|
||||||
image_file = '%stimeline%s%dp.jpg' % (target_path, full_tile_mode, small_tile_h)
|
image_file = '%stimeline%s%dp.jpg' % (target_path, full_tile_mode, small_tile_h)
|
||||||
data['target_images']['full'].resize(
|
data['target_images']['full'].resize(
|
||||||
(full_tile_w, small_tile_h), Image.LANCZOS
|
(full_tile_w, small_tile_h), Image.ANTIALIAS
|
||||||
).save(image_file)
|
).save(image_file)
|
||||||
#print(image_file)
|
#print(image_file)
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ def sort_title(title):
|
||||||
title = sort_string(title)
|
title = sort_string(title)
|
||||||
|
|
||||||
#title
|
#title
|
||||||
title = re.sub(r'[\'!¿¡,\.;\-"\:\*\[\]]', '', title)
|
title = re.sub('[\'!¿¡,\.;\-"\:\*\[\]]', '', title)
|
||||||
return title.strip()
|
return title.strip()
|
||||||
|
|
||||||
def get_positions(ids, pos, decode_id=False):
|
def get_positions(ids, pos, decode_id=False):
|
||||||
|
|
|
@ -16,13 +16,11 @@ from wsgiref.util import FileWrapper
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
from ox.utils import json, ET
|
from ox.utils import json, ET
|
||||||
import ox
|
|
||||||
|
|
||||||
from oxdjango.api import actions
|
|
||||||
from oxdjango.decorators import login_required_json
|
from oxdjango.decorators import login_required_json
|
||||||
from oxdjango.http import HttpFileResponse
|
|
||||||
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
|
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
|
||||||
import oxdjango
|
from oxdjango.http import HttpFileResponse
|
||||||
|
import ox
|
||||||
|
|
||||||
from . import models
|
from . import models
|
||||||
from . import utils
|
from . import utils
|
||||||
|
@ -34,6 +32,7 @@ from clip.models import Clip
|
||||||
from user.models import has_capability
|
from user.models import has_capability
|
||||||
from changelog.models import add_changelog
|
from changelog.models import add_changelog
|
||||||
|
|
||||||
|
from oxdjango.api import actions
|
||||||
|
|
||||||
|
|
||||||
def _order_query(qs, sort, prefix='sort__'):
|
def _order_query(qs, sort, prefix='sort__'):
|
||||||
|
@ -309,7 +308,7 @@ def find(request, data):
|
||||||
responsive UI: First leave out `keys` to get totals as fast as possible,
|
responsive UI: First leave out `keys` to get totals as fast as possible,
|
||||||
then pass `positions` to get the positions of previously selected items,
|
then pass `positions` to get the positions of previously selected items,
|
||||||
finally make the query with the `keys` you need and an appropriate `range`.
|
finally make the query with the `keys` you need and an appropriate `range`.
|
||||||
For more examples, see https://code.0x2620.org/0x2620/pandora/wiki/QuerySyntax.
|
For more examples, see https://wiki.0x2620.org/wiki/pandora/QuerySyntax.
|
||||||
see: add, edit, get, lookup, remove, upload
|
see: add, edit, get, lookup, remove, upload
|
||||||
'''
|
'''
|
||||||
if settings.JSON_DEBUG:
|
if settings.JSON_DEBUG:
|
||||||
|
@ -534,18 +533,17 @@ def get(request, data):
|
||||||
return render_to_json_response(response)
|
return render_to_json_response(response)
|
||||||
actions.register(get)
|
actions.register(get)
|
||||||
|
|
||||||
def edit_item(user, item, data, is_task=False):
|
def edit_item(request, item, data):
|
||||||
data = data.copy()
|
|
||||||
update_clips = False
|
update_clips = False
|
||||||
response = json_response(status=200, text='ok')
|
response = json_response(status=200, text='ok')
|
||||||
if 'rightslevel' in data:
|
if 'rightslevel' in data:
|
||||||
if user.profile.capability('canEditRightsLevel'):
|
if request.user.profile.capability('canEditRightsLevel'):
|
||||||
item.level = int(data['rightslevel'])
|
item.level = int(data['rightslevel'])
|
||||||
else:
|
else:
|
||||||
response = json_response(status=403, text='permission denied')
|
response = json_response(status=403, text='permission denied')
|
||||||
del data['rightslevel']
|
del data['rightslevel']
|
||||||
if 'user' in data:
|
if 'user' in data:
|
||||||
if user.profile.get_level() in ('admin', 'staff') and \
|
if request.user.profile.get_level() in ('admin', 'staff') and \
|
||||||
models.User.objects.filter(username=data['user']).exists():
|
models.User.objects.filter(username=data['user']).exists():
|
||||||
new_user = models.User.objects.get(username=data['user'])
|
new_user = models.User.objects.get(username=data['user'])
|
||||||
if new_user != item.user:
|
if new_user != item.user:
|
||||||
|
@ -553,13 +551,13 @@ def edit_item(user, item, data, is_task=False):
|
||||||
update_clips = True
|
update_clips = True
|
||||||
del data['user']
|
del data['user']
|
||||||
if 'groups' in data:
|
if 'groups' in data:
|
||||||
if not user.profile.capability('canManageUsers'):
|
if not request.user.profile.capability('canManageUsers'):
|
||||||
# Users wihtout canManageUsers can only add/remove groups they are not in
|
# Users wihtout canManageUsers can only add/remove groups they are not in
|
||||||
groups = set([g.name for g in item.groups.all()])
|
groups = set([g.name for g in item.groups.all()])
|
||||||
user_groups = set([g.name for g in user.groups.all()])
|
user_groups = set([g.name for g in request.user.groups.all()])
|
||||||
other_groups = list(groups - user_groups)
|
other_groups = list(groups - user_groups)
|
||||||
data['groups'] = [g for g in data['groups'] if g in user_groups] + other_groups
|
data['groups'] = [g for g in data['groups'] if g in user_groups] + other_groups
|
||||||
r = item.edit(data, is_task=is_task)
|
r = item.edit(data)
|
||||||
if r:
|
if r:
|
||||||
r.wait()
|
r.wait()
|
||||||
if update_clips:
|
if update_clips:
|
||||||
|
@ -596,10 +594,10 @@ def add(request, data):
|
||||||
if p:
|
if p:
|
||||||
p.wait()
|
p.wait()
|
||||||
else:
|
else:
|
||||||
item.make_poster()
|
i.make_poster()
|
||||||
del data['title']
|
del data['title']
|
||||||
if data:
|
if data:
|
||||||
response = edit_item(request.user, item, data)
|
response = edit_item(request, item, data)
|
||||||
response['data'] = item.json()
|
response['data'] = item.json()
|
||||||
add_changelog(request, request_data, item.public_id)
|
add_changelog(request, request_data, item.public_id)
|
||||||
return render_to_json_response(response)
|
return render_to_json_response(response)
|
||||||
|
@ -621,16 +619,16 @@ def edit(request, data):
|
||||||
see: add, find, get, lookup, remove, upload
|
see: add, find, get, lookup, remove, upload
|
||||||
'''
|
'''
|
||||||
if isinstance(data['id'], list):
|
if isinstance(data['id'], list):
|
||||||
add_changelog(request, data)
|
items = models.Item.objects.filter(public_id__in=data['id'])
|
||||||
t = tasks.bulk_edit.delay(data, request.user.username)
|
|
||||||
response = json_response(status=200, text='ok')
|
|
||||||
response['data']['taskId'] = t.task_id
|
|
||||||
else:
|
else:
|
||||||
item = get_object_or_404_json(models.Item, public_id=data['id'])
|
items = [get_object_or_404_json(models.Item, public_id=data['id'])]
|
||||||
|
for item in items:
|
||||||
if item.editable(request.user):
|
if item.editable(request.user):
|
||||||
add_changelog(request, data)
|
request_data = data.copy()
|
||||||
response = edit_item(request.user, item, data)
|
response = edit_item(request, item, data)
|
||||||
response['data'] = item.json()
|
response['data'] = item.json()
|
||||||
|
if item == items[0]:
|
||||||
|
add_changelog(request, request_data)
|
||||||
else:
|
else:
|
||||||
response = json_response(status=403, text='permission denied')
|
response = json_response(status=403, text='permission denied')
|
||||||
return render_to_json_response(response)
|
return render_to_json_response(response)
|
||||||
|
@ -949,11 +947,9 @@ def timeline(request, id, size, position=-1, format='jpg', mode=None):
|
||||||
if not item.access(request.user):
|
if not item.access(request.user):
|
||||||
return HttpResponseForbidden()
|
return HttpResponseForbidden()
|
||||||
|
|
||||||
modes = [t['id'] for t in settings.CONFIG['timelines']]
|
|
||||||
if not mode:
|
if not mode:
|
||||||
mode = 'antialias'
|
mode = 'antialias'
|
||||||
if mode not in modes:
|
modes = [t['id'] for t in settings.CONFIG['timelines']]
|
||||||
mode = modes[0]
|
|
||||||
if mode not in modes:
|
if mode not in modes:
|
||||||
raise Http404
|
raise Http404
|
||||||
modes.pop(modes.index(mode))
|
modes.pop(modes.index(mode))
|
||||||
|
@ -1033,10 +1029,7 @@ def download(request, id, resolution=None, format='webm', part=None):
|
||||||
return HttpResponseForbidden()
|
return HttpResponseForbidden()
|
||||||
elif r is True:
|
elif r is True:
|
||||||
response = HttpResponse(FileWrapper(video), content_type=content_type)
|
response = HttpResponse(FileWrapper(video), content_type=content_type)
|
||||||
try:
|
response['Content-Length'] = os.path.getsize(video.name)
|
||||||
response['Content-Length'] = os.path.getsize(video.name)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
else:
|
else:
|
||||||
response = HttpFileResponse(r, content_type=content_type)
|
response = HttpFileResponse(r, content_type=content_type)
|
||||||
else:
|
else:
|
||||||
|
@ -1047,6 +1040,27 @@ def download(request, id, resolution=None, format='webm', part=None):
|
||||||
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % quote(filename.encode('utf-8'))
|
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % quote(filename.encode('utf-8'))
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
def torrent(request, id, filename=None):
|
||||||
|
item = get_object_or_404(models.Item, public_id=id)
|
||||||
|
if not item.access(request.user):
|
||||||
|
return HttpResponseForbidden()
|
||||||
|
if not item.torrent:
|
||||||
|
raise Http404
|
||||||
|
if not filename or filename.endswith('.torrent'):
|
||||||
|
response = HttpResponse(item.get_torrent(request),
|
||||||
|
content_type='application/x-bittorrent')
|
||||||
|
filename = utils.safe_filename("%s.torrent" % item.get('title'))
|
||||||
|
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % quote(filename.encode('utf-8'))
|
||||||
|
return response
|
||||||
|
while filename.startswith('/'):
|
||||||
|
filename = filename[1:]
|
||||||
|
filename = filename.replace('/../', '/')
|
||||||
|
filename = item.path('torrent/%s' % filename)
|
||||||
|
filename = os.path.abspath(os.path.join(settings.MEDIA_ROOT, filename))
|
||||||
|
response = HttpFileResponse(filename)
|
||||||
|
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % \
|
||||||
|
quote(os.path.basename(filename.encode('utf-8')))
|
||||||
|
return response
|
||||||
|
|
||||||
def video(request, id, resolution, format, index=None, track=None):
|
def video(request, id, resolution, format, index=None, track=None):
|
||||||
resolution = int(resolution)
|
resolution = int(resolution)
|
||||||
|
@ -1268,6 +1282,12 @@ def atom_xml(request):
|
||||||
el.text = "1:1"
|
el.text = "1:1"
|
||||||
|
|
||||||
if has_capability(request.user, 'canDownloadVideo'):
|
if has_capability(request.user, 'canDownloadVideo'):
|
||||||
|
if item.torrent:
|
||||||
|
el = ET.SubElement(entry, "link")
|
||||||
|
el.attrib['rel'] = 'enclosure'
|
||||||
|
el.attrib['type'] = 'application/x-bittorrent'
|
||||||
|
el.attrib['href'] = '%s/torrent/' % page_link
|
||||||
|
el.attrib['length'] = '%s' % ox.get_torrent_size(item.torrent.path)
|
||||||
# FIXME: loop over streams
|
# FIXME: loop over streams
|
||||||
# for s in item.streams().filter(resolution=max(settings.CONFIG['video']['resolutions'])):
|
# for s in item.streams().filter(resolution=max(settings.CONFIG['video']['resolutions'])):
|
||||||
for s in item.streams().filter(source=None):
|
for s in item.streams().filter(source=None):
|
||||||
|
@ -1290,15 +1310,12 @@ def atom_xml(request):
|
||||||
'application/atom+xml'
|
'application/atom+xml'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def oembed(request):
|
def oembed(request):
|
||||||
format = request.GET.get('format', 'json')
|
format = request.GET.get('format', 'json')
|
||||||
maxwidth = int(request.GET.get('maxwidth', 640))
|
maxwidth = int(request.GET.get('maxwidth', 640))
|
||||||
maxheight = int(request.GET.get('maxheight', 480))
|
maxheight = int(request.GET.get('maxheight', 480))
|
||||||
|
|
||||||
url = request.GET.get('url')
|
url = request.GET['url']
|
||||||
if not url:
|
|
||||||
raise Http404
|
|
||||||
parts = urlparse(url).path.split('/')
|
parts = urlparse(url).path.split('/')
|
||||||
if len(parts) < 2:
|
if len(parts) < 2:
|
||||||
raise Http404
|
raise Http404
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
from django.apps import AppConfig
|
|
||||||
|
|
||||||
|
|
||||||
class ItemListConfig(AppConfig):
|
|
||||||
default_auto_field = "django.db.models.BigAutoField"
|
|
||||||
name = 'itemlist'
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue