Compare commits

...

321 commits

Author SHA1 Message Date
j
ff236e8828 only add oidc urls if oidc is enabled 2024-11-08 12:47:47 +00:00
j
34af2b1fab add ocid based login 2024-11-08 12:29:35 +00:00
j
d83309d4cd ff 2024-11-07 16:28:18 +00:00
j
59c2045ac6 move first signup code into function for reuse 2024-11-07 15:57:16 +00:00
j
a24d96b098 log invalid api requests 2024-11-07 14:28:28 +00:00
j
03daede441 pass download 2024-10-18 17:49:54 +01:00
j
9e6ecb5459 avoid people accidentally adding itesm to current video 2024-10-18 15:57:10 +01:00
j
e7ede6ade0 fix sort 2024-10-13 17:20:33 +01:00
j
f4bfe9294b reuse getSortValue 2024-10-13 17:10:11 +01:00
j
c69ca372ee sort annotations 2024-10-13 17:06:15 +01:00
j
9e00dd09e3 allow adding global yt-dlp flags 2024-10-09 17:49:21 +01:00
j
7cc1504069 fix isClipsQuery check 2024-09-16 20:51:37 +01:00
j
d5ace7aeca use pandoractl to update db 2024-09-16 17:38:48 +01:00
j
1b1442e664 might not be loaded 2024-09-16 17:23:21 +01:00
j
18cbf0ec9c print pandora versions during update 2024-09-16 09:06:10 +01:00
j
a8aa619217 stop using ppa, use deb repository from code.0x2620.org instead 2024-09-11 23:01:57 +01:00
j
ff1c929d4d don't checkout oxtimelines twice 2024-09-11 15:05:21 +01:00
j
e8c28c16c3 use master as default 2024-09-11 15:00:43 +01:00
j
caae7d84bc raw regexp strings 2024-09-02 13:36:55 +02:00
j
a218b906cc documents data view 2024-08-30 16:23:07 +02:00
j
446a748a79 include filename in pandora config 2024-08-25 15:26:55 +02:00
j
1ccc8df75c use exif tags for image rotation 2024-08-23 18:33:54 +02:00
j
f245824448 no opacity 2024-08-09 16:57:25 +02:00
j
12e0181157 make user italic 2024-08-09 10:29:06 +02:00
j
4748930460 option to show user in mobile view 2024-08-08 14:37:27 +02:00
j
b8de041316 typo 2024-08-03 18:22:06 +02:00
j
140b7454e0 add sudo example 2024-06-19 12:03:11 +02:00
j
9ed5a4abd9 listen on ipv6 port by default 2024-06-19 12:01:08 +02:00
j
d7b2cb18f7 use legacy build of pdf.js 2024-06-18 20:06:52 +02:00
j
48f5bcfd11 some german terms 2024-06-14 15:50:40 +01:00
j
b482600179 reuse fps 2024-06-08 11:44:46 +01:00
j
ca701b9749 force_framerate is in video track 2024-06-08 11:42:22 +01:00
j
3579a4de45 force_framerate is in video track 2024-06-08 11:41:35 +01:00
j
df1a96f97a force framerate if source has 90k fps 2024-06-08 11:39:22 +01:00
j
8b60075f39 pdf.js update 2024-06-02 09:12:22 +01:00
j
9e464a1d63 update pdf.js 2024-06-01 11:00:44 +01:00
j
d98177e686 add mjs as javascript extension 2024-06-01 10:59:11 +01:00
j
27d64f2ad5 use code.0x2620.org instead of git.0x2620.org 2024-06-01 10:37:27 +01:00
j
cf0ad718f7 only show clipLayer clips in clips view 2024-05-17 14:13:52 +02:00
j
8e32737776 option to disable empty clips 2024-05-17 12:08:29 +02:00
j
3b56274713 import subtitles with language 2024-05-17 12:08:20 +02:00
j
774450d263 missing imports and typos 2024-04-05 21:20:45 +01:00
j
e1a28c941a fix country fallback 2024-04-05 21:20:15 +01:00
j
c55c733bcd celery fixes 2024-04-05 21:19:57 +01:00
j
853efbba9a link source/project 2024-04-05 10:41:36 +01:00
j
5ade53a0b5 don't show key twice 2024-04-05 10:27:05 +01:00
j
5af5812179 transcript->translation 2024-04-03 19:06:34 +01:00
j
a25509e1a4 use css class for inline images 2024-04-02 22:10:01 +02:00
j
8fa00f31a8 only show content/transcript once 2024-04-02 22:01:05 +02:00
j
82615cfb18 update padma documentKeys 2024-04-02 21:56:39 +02:00
j
5bf71ead09 fallback item 2024-02-17 11:01:32 +00:00
j
55f07ba4b7 fix poster 2024-02-17 11:01:21 +00:00
j
7ad121d912 merge changes from phantasma 2024-02-17 10:55:11 +00:00
j
1b7ee7b275 update edit preview in backend 2024-02-17 10:52:19 +00:00
j
03bd598785 format date 2024-02-17 10:30:40 +00:00
j
40c8c52180 fix rightslevels 2024-02-17 10:30:23 +00:00
j
9b2fbe2e3f docker-compose -> docker compose 2024-01-08 12:10:13 +01:00
j
ecfdee6298 use code.0x2620.org as container registry everywhere 2024-01-07 22:49:03 +01:00
j
5ffcdda762 fix celery docker setup 2024-01-07 19:33:29 +01:00
j
037be04549 use postgres 15 2024-01-07 19:15:27 +01:00
j
d02055d85e chown : 2024-01-07 17:21:16 +01:00
j
3683e60497 default Network not needed 2024-01-07 17:19:35 +01:00
j
876044ce74 use code.0x2620.org as container registry 2024-01-07 16:35:48 +01:00
j
74fca45a9f use /etc/apt/trusted.gpg.d/ instead of apt-key add 2023-12-08 16:17:28 +00:00
j
bc3208a846 tr translations 2023-12-02 18:59:24 +01:00
j
540ee0f4c0 tr translations 2023-12-02 18:45:49 +01:00
j
35852c36cf translate filters 2023-12-02 18:43:25 +01:00
j
1c8ea51d9b tag more translations 2023-12-02 17:49:54 +01:00
j
ba5fba5a10 typo 2023-12-02 17:25:12 +01:00
j
69cd34489a update turkish translations 2023-12-02 17:22:44 +01:00
j
5fa3b66a43 translate site sections 2023-12-02 17:18:50 +01:00
j
f9edbbe1c6 featured edits should work too 2023-11-23 11:38:33 +00:00
j
4794c0f68a fix remux 2023-11-20 20:58:58 +00:00
j
de818c4204 better error for failed imports 2023-11-20 10:19:45 +00:00
j
bcece622e3 fix import 2023-11-20 09:57:25 +00:00
j
7c96d9e994 try audio/video only and without limit after that 2023-11-20 09:56:23 +00:00
j
2cd000698e fix js errors 2023-11-18 21:11:16 +01:00
j
a32ede6b2f more metadata on default info page 2023-11-18 21:04:31 +01:00
j
a890b11790 make elasticsearch work as a single-node and limit ram 2023-11-18 20:52:30 +01:00
j
dca95f9d95 update dependencies 2023-11-18 16:38:20 +01:00
j
667ad0c9d5 use pip for local installs 2023-11-18 16:31:12 +01:00
j
4ef8773562 update yt-dlp 2023-11-18 16:31:02 +01:00
j
384d92214d stram is done if reusing upload 2023-11-18 16:30:50 +01:00
j
19bb7e3b83 don't fail if hieght is None 2023-11-18 12:14:12 +01:00
j
95485f3e4b stream is available after reuse 2023-11-18 12:03:33 +01:00
j
4c415c6b5d get the right resolution 2023-11-18 11:50:08 +01:00
j
a537963b9f fix links with html entities 2023-11-17 11:36:15 +01:00
j
ee11e6b7e1 fix quotes in project titles(more) 2023-11-17 11:28:39 +01:00
j
b82440df24 fix quotes in project titles 2023-11-17 11:25:28 +01:00
j
4fed1112a6 description value has bto be escaped 2023-11-17 11:22:27 +01:00
j
f671971b35 support sites that don't have 480p 2023-11-15 15:37:27 +01:00
j
c0420cabcb display additional keys 2023-11-15 15:36:26 +01:00
j
06dbc277c7 don't fail if extract.timelines is called from another folder 2023-11-14 14:44:57 +01:00
j
a2b7cc9744 include next item tv api 2023-11-04 12:37:45 +01:00
j
615860a7e8 complex filters might be too long for dialog, scroll in that case 2023-10-30 12:48:50 +01:00
j
a99a3eee7a secondary menu 2023-10-27 15:54:01 +02:00
j
32a73f402b load pdf.js with version 2023-10-27 15:53:44 +02:00
j
47b348f724 add embed button to pdf viewer 2023-10-27 13:47:56 +02:00
j
5bc0b70d73 update pdf.js 2023-10-27 13:19:54 +02:00
j
b0356a63bd add crop icons 2023-10-27 13:19:44 +02:00
j
6cf62ed1cd add pandora pdf.js glue 2023-10-27 13:16:54 +02:00
j
39015c0148 clone with --depth 1 causes issues in get_version of python-ox. use full checkout instead 2023-09-22 13:32:08 +02:00
j
900f9bb44d show description in mobile view 2023-08-31 12:22:57 +01:00
j
628bc728ed 2d30059cee followup, textsize fixed in python-ox, switch to Pillow 10 2023-08-25 17:30:07 +02:00
j
869d154d5b fix /m/ view for edits with _ in id 2023-08-25 17:29:24 +02:00
j
2d30059cee pillow 10 fails with: AttributeError: 'ImageDraw' object has no attribute 'textsize' downgrade until that is fixed 2023-08-25 12:30:35 +02:00
j
cbaeffde68 Image.ANTIALIAS->Image.LANCZOS 2023-08-24 23:39:54 +02:00
j
eee51e672c fix upload 2023-08-24 22:22:43 +02:00
j
49187c8faf merge /m view 2023-08-23 21:01:33 +02:00
j
2905774b10 typo 2023-08-18 15:04:21 +02:00
j
d7f087125e speed up stream lookup 2023-08-18 14:59:45 +02:00
j
61dd667a71 source is used in padma, use other key 2023-08-17 12:14:28 +02:00
j
ce27979259 use direct import 2023-08-17 12:14:02 +02:00
j
09e3f2bc7d more translations 2023-08-04 18:48:21 +02:00
j
d44e5e2b25 first round of turkish translations 2023-08-04 17:14:06 +02:00
j
56f248894e better error if we have invalid locale files 2023-08-04 17:13:56 +02:00
j
a8d906743a wrap canShare so we can also limit it to views that work 2023-08-04 14:28:42 +02:00
j
f2ad63143a fix translation creation 2023-08-04 14:04:20 +02:00
j
48aba7bde8 support batch editing user 2023-08-04 13:54:36 +02:00
j
a7e660a8da sequence id migration 2023-07-29 13:43:00 +02:00
j
a5c35d0f41 update db changed check 2023-07-29 13:42:07 +02:00
j
9d5222f0cc oder of video/audio settings matters in ffmpeg>5 2023-07-29 11:07:36 +02:00
j
a25ad7c947 revert 8c977 wrong format, use b:v instead of vb, b:a instead of ab, no speed for 1st pass vp9 2023-07-29 10:58:52 +02:00
j
4073942ec2 only use vp9 if also enabled 2023-07-29 10:27:47 +02:00
j
8c977ba11b newer version of ffmpeg require -v:pass to limit 2-pass encoing to video 2023-07-28 19:53:13 +02:00
j
5c951e2559 fix file create 2023-07-28 19:18:31 +02:00
j
094fb06b92 fix default error log sort 2023-07-28 19:08:37 +02:00
j
fdc7279836 fix auto field 2023-07-28 14:59:13 +02:00
j
f561f1b5e2 don't break annotation index 2023-07-28 00:01:10 +02:00
j
27e5449e59 Celery broker_connection_retry_on_startup=True 2023-07-27 23:50:01 +02:00
j
4fb8879ba0 migrate bigint ids 2023-07-27 23:45:37 +02:00
j
543d01696e dont use lambda in default 2023-07-27 23:30:19 +02:00
j
a035d37229 add apps.py to all apps 2023-07-27 19:27:00 +02:00
j
ff0c9b8221 only run install -r requirements once 2023-07-27 19:05:54 +02:00
j
13d6f7e316 update pip first 2023-07-27 19:05:54 +02:00
j
262e3f29e0 update repo urls 2023-07-27 19:05:54 +02:00
j
00194f13ba now with Debian 12 support 2023-07-27 19:05:47 +02:00
j
27c2e9c849 update tornado version 2023-07-27 15:42:21 +02:00
j
c80f16c77a update django/celery 2023-07-27 15:35:53 +02:00
j
d4df903f82 add multi value add/remove to items 2023-07-27 15:06:43 +02:00
j
7cc5ae8d65 remove unused gpac reference, no longer in debian 2023-07-27 13:11:18 +02:00
j
4913bf8d38 no share button for guests until it only is enabled for urls that work 2023-07-21 11:17:26 +01:00
j
7369954c5f self->document 2023-07-21 11:14:38 +01:00
j
30af377ecb no preview for html pages 2023-07-21 11:13:17 +01:00
j
6e283ad870 fix colleciton links 2023-07-20 12:33:12 +01:00
j
b13b621fd0 fix update 2023-07-15 19:09:38 +05:30
j
84c2a3ac3c make mobile code usable for embedding 2023-07-15 12:59:38 +05:30
j
cf6374e8a6 fix annotation clips 2023-07-15 12:41:41 +05:30
j
d4e89db4c1 fix sort 2023-07-15 12:38:27 +05:30
j
a8cc838d7b fix edit preview 2023-07-15 12:28:15 +05:30
j
cff1c06546 fix annotation links 2023-07-15 12:23:19 +05:30
j
9b1f482a21 fix post update 2023-07-15 12:16:32 +05:30
j
bea0d301a4 add share link at /m/, add share dialog in view menu, fix preview for documents 2023-07-15 12:15:23 +05:30
j
17801df8de remove trailing space 2023-07-15 12:01:50 +05:30
j
5466247848 dont fail if no videopoints exist 2023-07-12 15:07:39 +05:30
j
e7292a5989 elasticsearch no longer uses doc_type, enable on padma and icma 2023-07-12 14:59:55 +05:30
j
b0ece6a566 404 instead of error for invalid oembed requests 2023-07-12 14:17:09 +05:30
j
83df2c0011 remove torrent backend 2023-07-10 14:37:24 +05:30
j
9355ae691d return empty string instead of invalid data url 2023-07-09 15:43:50 +05:30
j
5225026d66 more edit paste fixes 2023-07-09 15:15:54 +05:30
j
dcdbfee72d fix folder collapsed state per section 2023-07-09 15:04:57 +05:30
j
e41b7e19d4 only take first locale 2023-07-09 09:58:54 +05:30
j
1175e5d5bf avoid scroll for long document titles, closes #3140 2023-07-08 19:30:11 +05:30
j
6e8a338f8a add submenu seperator, closes #1983 2023-07-08 19:13:38 +05:30
j
6fb635ebf8 this, closes #3118 2023-07-08 19:02:37 +05:30
j
11c8bb9076 avoid error if switching quickly between videos and edits 2023-07-08 15:16:06 +05:30
j
fd294baec9 make sure in/out/position is <= duration, closes #696 2023-07-08 14:09:23 +05:30
j
51038b7c59 update outdated code 2023-07-08 14:08:53 +05:30
j
adaeb16c69 fix hiding of lists with : in name 2023-07-07 16:14:28 +05:30
j
409c5a1fc2 avoid undefined errors 2023-07-07 16:14:17 +05:30
j
512f07400d don't fail if hidden is not set 2023-07-07 11:49:56 +05:30
j
034b448846 add ability to hide list/edits/collections from personal section but keep around in case they are shared or linked from elsewhere, but cluster the personal section 2023-07-07 11:46:09 +05:30
j
bd9d2ecd7e slightly better group sort 2023-07-06 17:32:05 +05:30
j
028def73d9 resize documents filter on sidebar resize, closes: #3268 2023-07-06 16:50:22 +05:30
j
a667fc5b5b autocomplete keywords 2023-07-06 16:22:33 +05:30
j
f5becafad4 avoid negative pixels 2023-07-06 16:22:23 +05:30
j
92d5c6e763 empty value 2023-07-06 13:59:41 +05:30
j
962be98592 width/height are integer 2023-07-06 10:48:24 +05:30
j
eb15170059 don't fail for invalid resolution requests 2023-07-06 10:34:44 +05:30
j
a2c2e32bfe speed up get api call for items with many files 2023-07-06 09:43:02 +05:30
j
d3b9d87c43 fix annotation title, no startswith for annotations 2023-06-30 16:48:51 +05:30
j
8c3d089bd8 pandora..browser might be undefined 2023-06-27 19:33:21 +05:30
j
4ddb170bc9 pandora..findInput.focusInput might be undefined 2023-06-27 19:29:47 +05:30
j
f65336c28d list might not exist 2023-06-27 12:45:15 +05:30
j
345195e959 more missing panels with early resize 2023-06-27 12:30:53 +05:30
j
69fae04bb8 fall back to use url as is 2023-06-26 10:48:38 +05:30
j
94fc4fbe7a we can only update view if it exists. switch otherwise 2023-06-24 16:13:25 +05:30
j
64b8967ded avoid 'Cannot convert undefined or null to object' on android if device is rotated during initial load 2023-06-24 16:03:16 +05:30
j
068293050e fall back to default edit settings 2023-06-24 14:29:09 +05:30
j
dffb47ffef avoid error if list has not loaded yet 2023-06-24 14:28:46 +05:30
j
9c3edac263 install new requirements 2023-06-23 16:26:14 +05:30
j
9340c0e578 update to new celery api 2023-06-23 16:21:31 +05:30
j
f71434a1ff always reload on close of batch edit dialog if something has changed 2023-06-22 14:48:59 +05:30
j
3c73ef8999 allow batch editing one document 2023-06-22 14:35:35 +05:30
j
f664927b5f columnRequired should not be set for non default keys 2023-06-22 14:31:25 +05:30
j
21db208556 use isSubtitles to check if subtitle layer exists 2023-06-22 14:21:34 +05:30
j
55e47c36bd ugettext_lazy->gettext_lazy 2023-06-13 22:58:29 +01:00
j
48e6d4af6f render search result highlights as images and show in pages view 2023-06-12 14:30:32 +01:00
j
57d3fc0d32 don't store id in batch edits 2023-06-10 13:01:43 +01:00
j
0d70326aa8 fallback to empty string not undefined 2023-06-10 12:49:48 +01:00
j
614beae48e use base64 for urls (work some urls don't work otherwise) 2023-06-08 14:45:52 +02:00
j
19bd818461 prepare for bookworm/lunar 2023-06-08 00:41:08 +02:00
j
448a4ff5e1 update readme to bullseye 2023-06-07 23:00:28 +02:00
j
42bfbb0808 use focal oxframe builds for bullseye 2023-06-07 23:00:28 +02:00
j
7216e255de avoid installing too new version 2023-03-08 12:03:27 +01:00
j
a3880f3b38 fix level check 2023-02-21 18:26:43 +01:00
j
97c8cf67b9 fix subtitles in player view 2023-02-14 10:37:55 +01:00
j
873ec27803 only list items/documents < max level 2023-01-05 14:03:44 +00:00
j
623bbd472c users can see private items if they own it. limit to max_level instead, a80af1 fixup 2023-01-04 14:41:39 +00:00
j
99a135c7d3 don't fail on empty title 2023-01-04 13:58:15 +00:00
j
a80af18400 not editable if rightslevel is > allowed level 2022-12-08 12:26:17 +01:00
j
ffc2504c0f download via media url 2022-11-30 19:44:15 +01:00
j
729b2ea771 use download dialog in player 2022-11-30 19:44:03 +01:00
j
ccbc966282 list view needs sort, increase columnwidth 2022-10-20 11:16:31 +02:00
j
171c0b6095 make user and group available in list view 2022-10-20 11:10:40 +02:00
j
1c8b5b4b48 avoid double test for h264 2022-10-20 11:09:43 +02:00
j
1b0de9ade4 fix manage for python3.10 2022-04-22 18:07:46 +01:00
j
65710e5ef1 add trusted.gpg file 2022-03-26 12:51:30 +01:00
j
40e2481f61 switch to yt-dlp 2022-03-18 16:53:19 +00:00
j
d191f24d03 .ogg can be video too 2022-02-19 13:51:25 +01:00
j
86de7f6269 getPasteIndex might not exist if no edits exist 2022-02-15 18:29:17 +01:00
j
9d124d65af log error, don't fail 2022-02-15 14:56:29 +01:00
j
4be61a3982 smaller transaction, use update_fields to update clip values 2022-02-14 13:32:24 +01:00
j
e3e5e3b9e5 also load localInit in embed view 2022-01-21 17:39:08 +00:00
j
5505d495cf use same subtitle defaults for embedded player 2022-01-21 17:32:24 +00:00
j
af65750363 avoid transaction.atomic insided of transaction.atomic 2022-01-12 11:03:25 +01:00
j
5cfe392e22 close more fds 2022-01-03 12:37:02 +01:00
j
79c5d948b5 close filedescriptor after use 2022-01-03 12:33:15 +01:00
j
e4133ffa1d missing import 2022-01-03 12:26:16 +01:00
j
723c4e3f42 revert 85fd930 not needed 2021-11-24 17:11:40 +01:00
j
85fd930360 update cache on editing items 2021-11-24 17:02:05 +01:00
j
e237d08120 fix sort by value of smart edits 2021-11-24 14:23:50 +01:00
j
5c89662f14 add page views 2021-11-19 14:51:16 +01:00
j
fa42aa4810 log pandora errors too 2021-11-16 12:04:52 +00:00
j
d1c157ffb4 limit titles and names by default 2021-11-16 12:04:33 +00:00
j
950bec609d fulltext search in pages 2021-11-15 15:27:12 +00:00
j
cc2b60453b fix location for pandoractl install 2021-11-15 11:52:48 +00:00
j
2fec9590f7 fix install 2021-11-15 11:50:51 +00:00
j
0cfb499594 only keep templates in git 2021-11-15 11:49:19 +00:00
j
491d8c4629 use environment file for tasks and encoding queue 2021-11-15 11:42:32 +00:00
j
476fa9f0a3 space 2021-11-15 11:36:03 +00:00
j
65fc082b1b if file has audio/video track flag as type 2021-11-14 13:42:06 +00:00
j
49356d2f7a replace all % and & 2021-11-14 13:41:49 +00:00
j
4dbb5e3c51 typo 2021-11-05 00:12:46 +00:00
j
ae8363d4e2 don't show fulltext keys by default 2021-11-04 16:37:32 +00:00
j
10dd0768d1 include itemTitleKeys in for clips 2021-11-01 11:07:57 +00:00
j
bc123a885e update poster inline 2021-10-08 17:26:24 +01:00
j
ed31259fdd don't wait inside task 2021-10-08 16:10:44 +01:00
j
26708eff1f fall back to default preview ratio if item does not have videoRatio 2021-09-30 21:44:20 +01:00
j
fd318ef706 fall back to empty string 2021-09-16 18:49:38 +02:00
j
8a16c7e37f escape & too 2021-09-15 14:05:11 +02:00
j
6118bf3c3a escape & 2021-09-15 14:02:36 +02:00
j
cabcbeb35d list might have gone 2021-08-08 20:29:13 +02:00
j
098d953bbc filter more invalid clips 2021-08-08 19:47:38 +02:00
j
0ba80ada1f fix timeline for edits with 0 duration clips 2021-08-06 14:39:48 +02:00
j
0a7b6c0452 include outer cuts (before, after) 2021-08-06 12:05:19 +02:00
j
b1834520bd typos 2021-08-03 14:38:28 +02:00
j
066d273e10 fix tags later for friends 2021-08-03 13:16:52 +02:00
j
cd8cd40626 hook to add local urls 2021-08-01 18:39:01 +02:00
j
cf95aeff7a fix NoCopyright icon in config.pandora.jsonc 2021-06-15 22:39:28 +01:00
j
feb10ececb disable keyframe index 2021-06-14 10:47:47 +01:00
j
47e4f1cd84 pass subtitles to player too 2021-06-03 17:32:01 +01:00
j
098811d383 fallback to first mode 2021-06-03 17:17:57 +01:00
j
41d417e2a1 editable documents 2021-06-03 17:17:44 +01:00
j
a4d0830e4b fix canEdit flag in renderGroup 2021-06-03 16:53:52 +01:00
j
5583fad801 fix variable 2021-06-03 16:44:46 +01:00
j
ffd735e622 users can add annotations to own videos 2021-06-03 16:38:08 +01:00
j
894e5c5064 only add rights button if we have a rights page 2021-06-03 16:31:13 +01:00
j
166b566fde if user can edit metadata of item, should also be able to add annotations for that item. independent of canAddAnnotations rights 2021-06-03 16:25:56 +01:00
j
157cc7478b fix paste via menu 2021-06-03 16:24:13 +01:00
j
02b89c06dd pass item data to icon 2021-06-03 16:13:48 +01:00
j
49ef611bdb remove unused dialog 2021-06-03 16:02:03 +01:00
j
86bb19b9b6 fallback to previewRatio 2021-05-27 09:41:35 +01:00
j
a9f883859a no need to use full title for tmp files 2021-05-23 19:28:08 +01:00
j
08dc8f2bea overwrite subtitlesTrack 2021-05-21 20:38:25 +01:00
j
c17cc6d5cf don't fail on invalid arrays 2021-05-19 16:02:18 +01:00
j
54362e8b41 reference pandoractl instead of update.py 2021-05-15 14:38:58 +02:00
j
4e1e6e3b17 new youtube-dl version 2021-05-15 14:22:24 +02:00
j
814f98a571 only switch to subtitles if no layer was selected 2021-05-05 20:54:25 +02:00
j
7fa3f7d8fe no title fallback 2021-04-29 21:57:52 +02:00
j
7f31998254 add 0p for audio only profile 2021-04-01 11:30:28 +02:00
j
20b3831862 don't fail if getsize fails on tmp file 2021-03-29 11:03:12 +02:00
j
4689af2050 replace all not just first 2021-03-19 10:19:29 +01:00
j
7ea1f38a0f check body 2021-03-19 10:19:19 +01:00
j
7d8b38e627 cast list values to str, just in case 2021-02-05 10:07:14 +01:00
j
b377956a13 add option to add/remove values to listkeys in batchedit dialog 2020-11-26 20:23:10 +01:00
j
e7df18f727 typo 2020-11-08 23:26:16 +01:00
j
1bba157f7f pass referer to get_info 2020-11-08 22:54:49 +01:00
j
41fce072a3 pass referer in more places 2020-11-08 22:43:47 +01:00
j
4cf28434eb pass referer to video download 2020-11-08 22:29:24 +01:00
j
5d6e753b05 DEA.L. and DEAIL. work for aac 2020-10-17 09:47:07 +02:00
j
f157ebf2c4 honor _blank 2020-10-15 11:46:16 +02:00
j
ac8c01ee98 add menu item to toggle icon size 2020-09-29 11:03:39 +02:00
j
f13f7d238c still load config 2020-09-27 18:02:23 +02:00
j
63d82ad7e7 remove config reloader 2020-09-27 17:58:35 +02:00
j
7ab789f80a docker fixes 2020-09-27 17:53:37 +02:00
j
213adcaaaa update requirements 2020-09-27 17:19:02 +02:00
j
fdac35c00d update django, sync docker install 2020-09-27 13:03:45 +02:00
j
03e85c6eef double check 2020-09-25 12:49:26 +02:00
j
09f9580e1e use new id after create 2020-09-22 14:49:44 +02:00
j
2dfe0f3ff2 queue [add|edit]Annotation calls
addAnnotation was called multiple times creating multiple annotations
editAnnotation calls might overwrite later calls depending on response
time.
2020-09-22 12:49:23 +02:00
j
5bbb6ca195 support ipv6 2020-09-21 15:03:27 +02:00
j
2779d8d099 change password in case user already existed 2020-09-19 14:49:44 +02:00
j
d00cf08638 move buld item metadata edits to tasks too 2020-09-13 16:41:34 +02:00
j
945ac98dad fix info in item documents panel 2020-09-13 11:06:22 +02:00
j
5024a2ba0c display lists in collection list view 2020-09-11 14:31:50 +02:00
j
940632369a list view, load keys 2020-09-11 14:16:43 +02:00
j
1c0462393c add to current item 2020-09-11 14:07:08 +02:00
j
40edf9dd4a typo 2020-09-11 13:57:54 +02:00
j
ca7741f92c run bulk update as task 2020-09-11 13:55:01 +02:00
j
2d8a3f24dc can see > direct upload 2020-08-29 23:54:19 +02:00
j
d795e40344 fix username 2020-08-29 23:37:59 +02:00
j
ffb512a304 add oshash to changelog, keep upload filename, show direct upload instance 2020-08-29 23:32:57 +02:00
j
800725d54c add more collumn state 2020-08-24 01:05:42 +02:00
j
80597790f9 no title placeholder 2020-08-18 10:17:14 +02:00
j
638dfc8bb3 get all ids 2020-07-21 15:26:55 +02:00
j
e4815f091d move rights level code into infoViewUtils 2020-07-21 14:51:31 +02:00
j
4bde77abcc rebuild posters 2020-07-21 14:51:01 +02:00
j
953cb93745 load editable in batch dialog 2020-07-21 14:23:08 +02:00
j
19da7ca26d delete items from archive while in list 2020-07-21 13:12:31 +02:00
j
8788dd9fe8 pandoractl installs itself 2020-06-07 17:19:46 +02:00
637 changed files with 158409 additions and 85258 deletions

2
.gitignore vendored
View file

@ -36,3 +36,5 @@ pandora/gunicorn_config.py
.DS_Store .DS_Store
.env .env
overlay/ overlay/
pandora/encoding.conf
pandora/tasks.conf

View file

@ -1,4 +1,4 @@
FROM 0x2620/pandora-base:latest FROM code.0x2620.org/0x2620/pandora-base:latest
LABEL maintainer="0x2620@0x2620.org" LABEL maintainer="0x2620@0x2620.org"

View file

@ -7,7 +7,7 @@
We recommend to run pan.do/ra inside of LXD or LXC or dedicated VM or server. We recommend to run pan.do/ra inside of LXD or LXC or dedicated VM or server.
You will need at least 2GB of free disk space You will need at least 2GB of free disk space
pan.do/ra is known to work with Ubuntu 18.04, 20.04 and Debian/10 (buster), pan.do/ra is known to work with Debian/12 (bookworm) and Ubuntu 20.04,
other distributions might also work, let us know if it works for you. other distributions might also work, let us know if it works for you.
Use the following commands as root to install pan.do/ra and all dependencies: Use the following commands as root to install pan.do/ra and all dependencies:
@ -16,7 +16,7 @@
cd /root cd /root
curl -sL https://pan.do/ra-install > pandora_install.sh curl -sL https://pan.do/ra-install > pandora_install.sh
chmod +x pandora_install.sh chmod +x pandora_install.sh
export BRANCH=stable # change to 'master' to get current developement version export BRANCH=master # change to 'stable' to get the latest release (sometimes outdated)
./pandora_install.sh 2>&1 | tee pandora_install.log ./pandora_install.sh 2>&1 | tee pandora_install.log
``` ```
@ -50,4 +50,9 @@ export BRANCH=stable # change to 'master' to get current developement version
More info at More info at
https://code.0x2620.org/0x2620/pandora/wiki/Customization https://code.0x2620.org/0x2620/pandora/wiki/Customization
## Update
To update your existing instlalation run
pandoractl update

37
ctl
View file

@ -17,7 +17,7 @@ if [ "$action" = "init" ]; then
SUDO="" SUDO=""
PANDORA_USER=`ls -l update.py | cut -f3 -d" "` PANDORA_USER=`ls -l update.py | cut -f3 -d" "`
if [ `whoami` != $PANDORA_USER ]; then if [ `whoami` != $PANDORA_USER ]; then
SUDO="sudo -H -u $PANDORA_USER" SUDO="sudo -E -H -u $PANDORA_USER"
fi fi
$SUDO python3 -m venv --system-site-packages . $SUDO python3 -m venv --system-site-packages .
branch=`cat .git/HEAD | sed 's@/@\n@g' | tail -n1` branch=`cat .git/HEAD | sed 's@/@\n@g' | tail -n1`
@ -27,25 +27,30 @@ if [ "$action" = "init" ]; then
$SUDO bin/python3 -m pip install -U --ignore-installed "pip<9" $SUDO bin/python3 -m pip install -U --ignore-installed "pip<9"
fi fi
if [ ! -d static/oxjs ]; then if [ ! -d static/oxjs ]; then
$SUDO git clone --depth 1 -b $branch https://git.0x2620.org/oxjs.git static/oxjs $SUDO git clone -b $branch https://code.0x2620.org/0x2620/oxjs.git static/oxjs
fi fi
$SUDO mkdir -p src $SUDO mkdir -p src
if [ ! -d src/oxtimelines ]; then
$SUDO git clone --depth 1 -b $branch https://git.0x2620.org/oxtimelines.git src/oxtimelines
fi
for package in oxtimelines python-ox; do for package in oxtimelines python-ox; do
cd ${BASE} cd ${BASE}
if [ ! -d src/${package} ]; then if [ ! -d src/${package} ]; then
$SUDO git clone --depth 1 -b $branch https://git.0x2620.org/${package}.git src/${package} $SUDO git clone -b $branch https://code.0x2620.org/0x2620/${package}.git src/${package}
fi fi
cd ${BASE}/src/${package} cd ${BASE}/src/${package}
$SUDO ${BASE}/bin/python setup.py develop
$SUDO ${BASE}/bin/pip install -e .
done done
cd ${BASE} cd ${BASE}
$SUDO ./bin/pip install -r requirements.txt $SUDO ./bin/pip install -r requirements.txt
if [ ! -e pandora/gunicorn_config.py ]; then for template in gunicorn_config.py encoding.conf tasks.conf; do
$SUDO cp pandora/gunicorn_config.py.in pandora/gunicorn_config.py if [ ! -e pandora/$template ]; then
fi $SUDO cp pandora/${template}.in pandora/$template
fi
done
exit 0
fi
if [ "$action" = "version" ]; then
git rev-list HEAD --count
exit 0 exit 0
fi fi
@ -62,11 +67,10 @@ if [ ! -z $cmd ]; then
SUDO="" SUDO=""
PANDORA_USER=`ls -l update.py | cut -f3 -d" "` PANDORA_USER=`ls -l update.py | cut -f3 -d" "`
if [ `whoami` != $PANDORA_USER ]; then if [ `whoami` != $PANDORA_USER ]; then
SUDO="sudo -H -u $PANDORA_USER" SUDO="sudo -E -H -u $PANDORA_USER"
fi fi
shift shift
$SUDO "$BASE/$cmd" $@ exec $SUDO "$BASE/$cmd" $@
exit $?
fi fi
if [ `whoami` != 'root' ]; then if [ `whoami` != 'root' ]; then
@ -74,10 +78,15 @@ if [ `whoami` != 'root' ]; then
exit 1 exit 1
fi fi
if [ "$action" = "install" ]; then if [ "$action" = "install" ]; then
cd "`dirname "$0"`" cd "`dirname "$self"`"
BASE=`pwd` BASE=`pwd`
if [ -x /bin/systemctl ]; then if [ -x /bin/systemctl ]; then
if [ -d /etc/systemd/system/ ]; then if [ -d /etc/systemd/system/ ]; then
for template in gunicorn_config.py encoding.conf tasks.conf; do
if [ ! -e pandora/$template ]; then
$SUDO cp pandora/${template}.in pandora/$template
fi
done
for service in $SERVICES; do for service in $SERVICES; do
if [ -e /lib/systemd/system/${service}.service ]; then if [ -e /lib/systemd/system/${service}.service ]; then
rm -f /lib/systemd/system/${service}.service \ rm -f /lib/systemd/system/${service}.service \

View file

@ -15,7 +15,6 @@ services:
- "127.0.0.1:2620:80" - "127.0.0.1:2620:80"
networks: networks:
- backend - backend
- default
links: links:
- pandora - pandora
- websocketd - websocketd
@ -28,7 +27,7 @@ services:
restart: unless-stopped restart: unless-stopped
db: db:
image: postgres:latest image: postgres:15
networks: networks:
- backend - backend
env_file: .env env_file: .env

View file

@ -1,4 +1,4 @@
FROM debian:buster FROM debian:12
LABEL maintainer="0x2620@0x2620.org" LABEL maintainer="0x2620@0x2620.org"

View file

@ -1,9 +1,17 @@
#!/bin/bash #!/bin/bash
UBUNTU_CODENAME=bionic
if [ -e /etc/os-release ]; then if [ -e /etc/os-release ]; then
. /etc/os-release . /etc/os-release
fi fi
if [ -z "$UBUNTU_CODENAME" ]; then
UBUNTU_CODENAME=bionic
fi
if [ "$VERSION_CODENAME" = "bullseye" ]; then
UBUNTU_CODENAME=focal
fi
if [ "$VERSION_CODENAME" = "bookworm" ]; then
UBUNTU_CODENAME=lunar
fi
export DEBIAN_FRONTEND=noninteractive export DEBIAN_FRONTEND=noninteractive
echo 'Acquire::Languages "none";' > /etc/apt/apt.conf.d/99languages echo 'Acquire::Languages "none";' > /etc/apt/apt.conf.d/99languages
@ -30,6 +38,8 @@ apt-get update -qq
apt-get install -y \ apt-get install -y \
netcat-openbsd \ netcat-openbsd \
sudo \ sudo \
rsync \
iproute2 \
vim \ vim \
wget \ wget \
pwgen \ pwgen \
@ -42,22 +52,23 @@ apt-get install -y \
python3-numpy \ python3-numpy \
python3-psycopg2 \ python3-psycopg2 \
python3-pyinotify \ python3-pyinotify \
python3-simplejson \
python3-lxml \ python3-lxml \
python3-cssselect \ python3-cssselect \
python3-html5lib \ python3-html5lib \
python3-ox \ python3-ox \
python3-elasticsearch \
oxframe \ oxframe \
ffmpeg \ ffmpeg \
mkvtoolnix \ mkvtoolnix \
gpac \
imagemagick \ imagemagick \
poppler-utils \ poppler-utils \
youtube-dl \
ipython3 \ ipython3 \
tesseract-ocr \
tesseract-ocr-eng \
postfix \ postfix \
postgresql-client postgresql-client
apt-get install -y --no-install-recommends youtube-dl rtmpdump
apt-get clean apt-get clean
rm -f /install.sh rm -f /install.sh

View file

@ -11,7 +11,7 @@ else
proxy= proxy=
fi fi
docker build $proxy -t 0x2620/pandora-base base docker build $proxy -t code.0x2620.org/0x2620/pandora-base base
docker build -t 0x2620/pandora-nginx nginx docker build -t code.0x2620.org/0x2620/pandora-nginx nginx
cd .. cd ..
docker build -t 0x2620/pandora . docker build -t code.0x2620.org/0x2620/pandora .

View file

@ -6,7 +6,9 @@ user=pandora
export LANG=en_US.UTF-8 export LANG=en_US.UTF-8
mkdir -p /run/pandora mkdir -p /run/pandora
chown -R ${user}.${user} /run/pandora chown -R ${user}:${user} /run/pandora
update="/usr/bin/sudo -u $user -E -H /srv/pandora/update.py"
# pan.do/ra services # pan.do/ra services
if [ "$action" = "pandora" ]; then if [ "$action" = "pandora" ]; then
@ -26,12 +28,12 @@ if [ "$action" = "pandora" ]; then
/overlay/install.py /overlay/install.py
echo "Initializing database..." echo "Initializing database..."
echo "CREATE EXTENSION pg_trgm;" | /srv/pandora/pandora/manage.py dbshell echo "CREATE EXTENSION pg_trgm;" | /srv/pandora/pandora/manage.py dbshell || true
/srv/pandora/pandora/manage.py init_db /srv/pandora/pandora/manage.py init_db
/srv/pandora/update.py db $update db
echo "Generating static files..." echo "Generating static files..."
/srv/pandora/update.py static chown -R ${user}:${user} /srv/pandora/
chown -R ${user}.${user} /srv/pandora/ $update static
touch /srv/pandora/initialized touch /srv/pandora/initialized
fi fi
/srv/pandora_base/docker/wait-for db 5432 /srv/pandora_base/docker/wait-for db 5432
@ -44,54 +46,53 @@ if [ "$action" = "encoding" ]; then
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized /srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
/srv/pandora_base/docker/wait-for rabbitmq 5672 /srv/pandora_base/docker/wait-for rabbitmq 5672
name=pandora-encoding-$(hostname) name=pandora-encoding-$(hostname)
cd /srv/pandora/pandora
exec /usr/bin/sudo -u $user -E -H \ exec /usr/bin/sudo -u $user -E -H \
/srv/pandora/bin/python \ /srv/pandora/bin/celery \
/srv/pandora/pandora/manage.py \ -A app worker \
celery worker \ -Q encoding -n ${name} \
-c 1 \ --pidfile /run/pandora/encoding.pid \
-Q encoding -n $name \ --max-tasks-per-child 500 \
-l INFO -c 1 \
-l INFO
fi fi
if [ "$action" = "tasks" ]; then if [ "$action" = "tasks" ]; then
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized /srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
/srv/pandora_base/docker/wait-for rabbitmq 5672 /srv/pandora_base/docker/wait-for rabbitmq 5672
name=pandora-default-$(hostname) name=pandora-default-$(hostname)
cd /srv/pandora/pandora
exec /usr/bin/sudo -u $user -E -H \ exec /usr/bin/sudo -u $user -E -H \
/srv/pandora/bin/python \ /srv/pandora/bin/celery \
/srv/pandora/pandora/manage.py \ -A app worker \
celery worker \ -Q default,celery -n ${name} \
-Q default,celery -n $name \ --pidfile /run/pandora/tasks.pid \
--maxtasksperchild 1000 \ --max-tasks-per-child 1000 \
-l INFO -l INFO
fi fi
if [ "$action" = "cron" ]; then if [ "$action" = "cron" ]; then
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized /srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
/srv/pandora_base/docker/wait-for rabbitmq 5672 /srv/pandora_base/docker/wait-for rabbitmq 5672
cd /srv/pandora/pandora
exec /usr/bin/sudo -u $user -E -H \ exec /usr/bin/sudo -u $user -E -H \
/srv/pandora/bin/python \ /srv/pandora/bin/celery \
/srv/pandora/pandora/manage.py \ -A app beat \
celerybeat -s /run/pandora/celerybeat-schedule \ -s /run/pandora/celerybeat-schedule \
--pidfile /run/pandora/cron.pid \ --pidfile /run/pandora/cron.pid \
-l INFO -l INFO
fi fi
if [ "$action" = "websocketd" ]; then if [ "$action" = "websocketd" ]; then
/srv/pandora_base/docker/wait-for-file /srv/pandora/initialized /srv/pandora_base/docker/wait-for-file /srv/pandora/initialized
/srv/pandora_base/docker/wait-for rabbitmq 5672 /srv/pandora_base/docker/wait-for rabbitmq 5672
cd /srv/pandora/pandora
exec /usr/bin/sudo -u $user -E -H \ exec /usr/bin/sudo -u $user -E -H \
/srv/pandora/bin/python \ /srv/pandora/bin/python \
/srv/pandora/pandora/manage.py websocketd /srv/pandora/pandora/manage.py websocketd
fi fi
# pan.do/ra management and update # pan.do/ra management and update
if [ "$action" = "manage.py" ]; then if [ "$action" = "ctl" ]; then
shift shift
exec /usr/bin/sudo -u $user -E -H \ exec /srv/pandora/ctl "$@"
/srv/pandora/pandora/manage.py "$@"
fi
if [ "$action" = "update.py" ]; then
shift
exec /usr/bin/sudo -u $user -E -H \
/srv/pandora/update.py "$@"
fi fi
if [ "$action" = "bash" ]; then if [ "$action" = "bash" ]; then
shift shift
@ -102,9 +103,9 @@ fi
# pan.do/ra setup hooks # pan.do/ra setup hooks
if [ "$action" = "docker-compose.yml" ]; then if [ "$action" = "docker-compose.yml" ]; then
cat /srv/pandora_base/docker-compose.yml | \ cat /srv/pandora_base/docker-compose.yml | \
sed "s#build: \.#image: 0x2620/pandora:latest#g" | \ sed "s#build: \.#image: code.0x2620.org/0x2620/pandora:latest#g" | \
sed "s#\./overlay:#.:#g" | \ sed "s#\./overlay:#.:#g" | \
sed "s#build: docker/nginx#image: 0x2620/pandora-nginx:latest#g" sed "s#build: docker/nginx#image: code.0x2620.org/0x2620/pandora-nginx:latest#g"
exit exit
fi fi
if [ "$action" = ".env" ]; then if [ "$action" = ".env" ]; then
@ -130,5 +131,5 @@ echo " docker run 0x2620/pandora setup | sh"
echo echo
echo adjust created files to match your needs and run: echo adjust created files to match your needs and run:
echo echo
echo " docker-compose up" echo " docker compose up"
echo echo

View file

@ -56,13 +56,9 @@ cp /srv/pandora/docker/entrypoint.sh /entrypoint.sh
mv /srv/pandora/ /srv/pandora_base/ mv /srv/pandora/ /srv/pandora_base/
mkdir /pandora mkdir /pandora
ln -s /pandora /srv/pandora ln -s /pandora /srv/pandora
cat > /usr/local/bin/update.py << EOF
#!/bin/sh
exec /srv/pandora/update.py \$@
EOF
cat > /usr/local/bin/manage.py << EOF cat > /usr/local/bin/pandoractl << EOF
#!/bin/sh #!/bin/sh
exec /srv/pandora/pandora/manage.py \$@ exec /srv/pandora/ctl \$@
EOF EOF
chmod +x /usr/local/bin/manage.py /usr/local/bin/update.py chmod +x /usr/local/bin/pandoractl

12
docker/publish.sh Normal file
View file

@ -0,0 +1,12 @@
#!/bin/bash
# push new version of pan.do/ra to code.0x2620.org
set -e
cd /tmp
git clone https://code.0x2620.org/0x2620/pandora
cd pandora
./docker/build.sh
docker push code.0x2620.org/0x2620/pandora-base:latest
docker push code.0x2620.org/0x2620/pandora-nginx:latest
docker push code.0x2620.org/0x2620/pandora:latest

View file

@ -1,18 +1,18 @@
#!/bin/sh #!/bin/sh
docker run 0x2620/pandora docker-compose.yml > docker-compose.yml docker run --rm code.0x2620.org/0x2620/pandora docker-compose.yml > docker-compose.yml
if [ ! -e .env ]; then if [ ! -e .env ]; then
docker run 0x2620/pandora .env > .env docker run --rm code.0x2620.org/0x2620/pandora .env > .env
echo .env >> .gitignore echo .env >> .gitignore
fi fi
if [ ! -e config.jsonc ]; then if [ ! -e config.jsonc ]; then
docker run 0x2620/pandora config.jsonc > config.jsonc docker run --rm code.0x2620.org/0x2620/pandora config.jsonc > config.jsonc
fi fi
cat > README.md << EOF cat > README.md << EOF
pan.do/ra docker instance pan.do/ra docker instance
this folder was created with this folder was created with
docker run 0x2620/pandora setup | sh docker run --rm code.0x2620.org/0x2620/pandora setup | sh
To start pan.do/ra adjust the files in this folder: To start pan.do/ra adjust the files in this folder:
@ -22,11 +22,14 @@ To start pan.do/ra adjust the files in this folder:
and to get started run this: and to get started run this:
docker-compose up -d docker compose up -d
To update pan.do/ra run: To update pan.do/ra run:
docker-compose run pandora update.py docker compose run --rm pandora ctl update
To run pan.do/ra manage shell:
docker compose run --rm pandora ctl manage shell
EOF EOF
touch __init__.py touch __init__.py

View file

@ -1,5 +1,5 @@
#!/bin/sh #!/bin/sh
TIMEOUT=60 TIMEOUT=180
TARGET="$1" TARGET="$1"
for i in `seq $TIMEOUT` ; do for i in `seq $TIMEOUT` ; do

View file

@ -17,6 +17,7 @@ server {
#server_name pandora.YOURDOMAIN.COM; #server_name pandora.YOURDOMAIN.COM;
listen 80 default; listen 80 default;
listen [::]:80 default;
access_log /var/log/nginx/pandora.access.log; access_log /var/log/nginx/pandora.access.log;
error_log /var/log/nginx/pandora.error.log; error_log /var/log/nginx/pandora.error.log;

1
etc/sudoers.d/pandora Normal file
View file

@ -0,0 +1 @@
pandora ALL=(ALL:ALL) NOPASSWD:/usr/local/bin/pandoractl

View file

@ -11,7 +11,7 @@ PIDFile=/run/pandora/cron.pid
WorkingDirectory=/srv/pandora/pandora WorkingDirectory=/srv/pandora/pandora
ExecStart=/srv/pandora/bin/celery \ ExecStart=/srv/pandora/bin/celery \
-A app beat \ -A app beat \
-s /run/pandora/celerybeat-schedule \ --scheduler django_celery_beat.schedulers:DatabaseScheduler \
--pidfile /run/pandora/cron.pid \ --pidfile /run/pandora/cron.pid \
-l INFO -l INFO
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID

View file

@ -7,14 +7,16 @@ Type=simple
Restart=always Restart=always
User=pandora User=pandora
Group=pandora Group=pandora
EnvironmentFile=/srv/pandora/pandora/encoding.conf
PIDFile=/run/pandora/encoding.pid PIDFile=/run/pandora/encoding.pid
WorkingDirectory=/srv/pandora/pandora WorkingDirectory=/srv/pandora/pandora
ExecStart=/srv/pandora/bin/celery \ ExecStart=/srv/pandora/bin/celery \
-A app worker \ -A app worker \
-Q encoding -n pandora-encoding \ -Q encoding -n pandora-encoding \
--pidfile /run/pandora/encoding.pid \ --pidfile /run/pandora/encoding.pid \
--maxtasksperchild 500 \ -c $CONCURRENCY \
-l INFO --max-tasks-per-child $MAX_TASKS_PER_CHILD \
-l $LOGLEVEL
ExecReload=/bin/kill -TERM $MAINPID ExecReload=/bin/kill -TERM $MAINPID
[Install] [Install]

View file

@ -7,14 +7,16 @@ Type=simple
Restart=always Restart=always
User=pandora User=pandora
Group=pandora Group=pandora
EnvironmentFile=/srv/pandora/pandora/tasks.conf
PIDFile=/run/pandora/tasks.pid PIDFile=/run/pandora/tasks.pid
WorkingDirectory=/srv/pandora/pandora WorkingDirectory=/srv/pandora/pandora
ExecStart=/srv/pandora/bin/celery \ ExecStart=/srv/pandora/bin/celery \
-A app worker \ -A app worker \
-Q default,celery -n pandora-default \ -Q default,celery -n pandora-default \
--pidfile /run/pandora/tasks.pid \ --pidfile /run/pandora/tasks.pid \
--maxtasksperchild 1000 \ -c $CONCURRENCY \
-l INFO --max-tasks-per-child $MAX_TASKS_PER_CHILD \
-l $LOGLEVEL
ExecReload=/bin/kill -TERM $MAINPID ExecReload=/bin/kill -TERM $MAINPID
[Install] [Install]

View file

@ -0,0 +1,6 @@
from django.apps import AppConfig
class AnnotationConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'annotation'

View file

@ -27,6 +27,7 @@ class Command(BaseCommand):
parser.add_argument('username', help='username') parser.add_argument('username', help='username')
parser.add_argument('item', help='item') parser.add_argument('item', help='item')
parser.add_argument('layer', help='layer') parser.add_argument('layer', help='layer')
parser.add_argument('language', help='language', default="")
parser.add_argument('filename', help='filename.srt') parser.add_argument('filename', help='filename.srt')
def handle(self, *args, **options): def handle(self, *args, **options):
@ -34,6 +35,7 @@ class Command(BaseCommand):
public_id = options['item'] public_id = options['item']
layer_id = options['layer'] layer_id = options['layer']
filename = options['filename'] filename = options['filename']
language = options.get("language")
user = User.objects.get(username=username) user = User.objects.get(username=username)
item = Item.objects.get(public_id=public_id) item = Item.objects.get(public_id=public_id)
@ -47,6 +49,9 @@ class Command(BaseCommand):
for i in range(len(annotations)-1): for i in range(len(annotations)-1):
if annotations[i]['out'] == annotations[i+1]['in']: if annotations[i]['out'] == annotations[i+1]['in']:
annotations[i]['out'] = annotations[i]['out'] - 0.001 annotations[i]['out'] = annotations[i]['out'] - 0.001
if language:
for annotation in annotations:
annotation["value"] = '<span lang="%s">%s</span>' % (language, annotation["value"])
tasks.add_annotations.delay({ tasks.add_annotations.delay({
'item': item.public_id, 'item': item.public_id,
'layer': layer_id, 'layer': layer_id,

View file

@ -0,0 +1,18 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('annotation', '0003_auto_20160219_1537'),
]
operations = [
migrations.AlterField(
model_name='annotation',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -163,28 +163,25 @@ class Annotation(models.Model):
self.sortvalue = None self.sortvalue = None
self.languages = None self.languages = None
with transaction.atomic(): if not self.clip or self.start != self.clip.start or self.end != self.clip.end:
if not self.clip or self.start != self.clip.start or self.end != self.clip.end: self.clip, created = Clip.get_or_create(self.item, self.start, self.end)
self.clip, created = Clip.get_or_create(self.item, self.start, self.end)
with transaction.atomic():
if set_public_id: if set_public_id:
self.set_public_id() self.set_public_id()
super(Annotation, self).save(*args, **kwargs) super(Annotation, self).save(*args, **kwargs)
if self.clip: if self.clip:
Clip.objects.filter(**{ self.clip.update_findvalue()
'id': self.clip.id, setattr(self.clip, self.layer, True)
self.layer: False self.clip.save(update_fields=[self.layer, 'sortvalue', 'findvalue'])
}).update(**{self.layer: True})
# update clip.findvalue
self.clip.save()
# update matches in bulk if called from load_subtitles # update matches in bulk if called from load_subtitles
if not delay_matches: if not delay_matches:
self.update_matches() self.update_matches()
self.update_documents() self.update_documents()
self.update_translations() self.update_translations()
def update_matches(self): def update_matches(self):
from place.models import Place from place.models import Place
@ -267,7 +264,10 @@ class Annotation(models.Model):
from translation.models import Translation from translation.models import Translation
layer = self.get_layer() layer = self.get_layer()
if layer.get('translate'): if layer.get('translate'):
Translation.objects.get_or_create(lang=lang, key=self.value, defaults={'type': Translation.CONTENT}) for lang in settings.CONFIG['languages']:
if lang == settings.CONFIG['language']:
continue
Translation.objects.get_or_create(lang=lang, key=self.value, defaults={'type': Translation.CONTENT})
def delete(self, *args, **kwargs): def delete(self, *args, **kwargs):
with transaction.atomic(): with transaction.atomic():

View file

@ -5,12 +5,12 @@ from django.contrib.auth import get_user_model
from django.db import transaction from django.db import transaction
import ox import ox
from celery.task import task from app.celery import app
from .models import Annotation from .models import Annotation
@task(ignore_results=False, queue='default') @app.task(ignore_results=False, queue='default')
def add_annotations(data): def add_annotations(data):
from item.models import Item from item.models import Item
from entity.models import Entity from entity.models import Entity
@ -51,7 +51,7 @@ def add_annotations(data):
annotation.item.update_facets() annotation.item.update_facets()
return True return True
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_item(id, force=False): def update_item(id, force=False):
from item.models import Item from item.models import Item
from clip.models import Clip from clip.models import Clip
@ -72,7 +72,7 @@ def update_item(id, force=False):
a.item.save() a.item.save()
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_annotations(layers, value): def update_annotations(layers, value):
items = {} items = {}

View file

@ -183,7 +183,7 @@ def addAnnotation(request, data):
layer_id = data['layer'] layer_id = data['layer']
layer = get_by_id(settings.CONFIG['layers'], layer_id) layer = get_by_id(settings.CONFIG['layers'], layer_id)
if layer['canAddAnnotations'].get(request.user.profile.get_level()): if layer['canAddAnnotations'].get(request.user.profile.get_level()) or item.editable(request.user):
if layer['type'] == 'entity': if layer['type'] == 'entity':
try: try:
value = Entity.get_by_name(ox.decode_html(data['value']), layer['entity']).get_id() value = Entity.get_by_name(ox.decode_html(data['value']), layer['entity']).get_id()
@ -241,8 +241,7 @@ def addAnnotations(request, data):
layer_id = data['layer'] layer_id = data['layer']
layer = get_by_id(settings.CONFIG['layers'], layer_id) layer = get_by_id(settings.CONFIG['layers'], layer_id)
if item.editable(request.user) \ if item.editable(request.user):
and layer['canAddAnnotations'].get(request.user.profile.get_level()):
response = json_response() response = json_response()
data['user'] = request.user.username data['user'] = request.user.username
t = add_annotations.delay(data) t = add_annotations.delay(data)

7
pandora/app/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class AppConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'app'

View file

@ -6,16 +6,8 @@ root_dir = os.path.normpath(os.path.abspath(os.path.dirname(__file__)))
root_dir = os.path.dirname(root_dir) root_dir = os.path.dirname(root_dir)
os.chdir(root_dir) os.chdir(root_dir)
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings') os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
app = Celery('pandora') app = Celery('pandora', broker_connection_retry_on_startup=True)
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY') app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks() app.autodiscover_tasks()

View file

@ -24,9 +24,6 @@ User = get_user_model()
_win = (sys.platform == "win32") _win = (sys.platform == "win32")
RUN_RELOADER = True
NOTIFIER = None
def get_version(): def get_version():
git_dir = join(dirname(dirname(dirname(__file__))), '.git') git_dir = join(dirname(dirname(dirname(__file__))), '.git')
if exists(git_dir): if exists(git_dir):
@ -136,7 +133,13 @@ def load_config(init=False):
added = [] added = []
for key in sorted(d): for key in sorted(d):
if key not in c: if key not in c:
added.append("\"%s\": %s," % (key, json.dumps(d[key]))) if key not in (
'hidden',
'find',
'findDocuments',
'videoPoints',
):
added.append("\"%s\": %s," % (key, json.dumps(d[key])))
c[key] = d[key] c[key] = d[key]
if added: if added:
sys.stderr.write("adding default %s:\n\t" % section) sys.stderr.write("adding default %s:\n\t" % section)
@ -257,46 +260,6 @@ check the README for further details.
except: except:
pass pass
def reloader_thread():
global NOTIFIER
settings.RELOADER_RUNNING=True
_config_mtime = 0
try:
import pyinotify
INOTIFY = True
except:
INOTIFY = False
if INOTIFY:
def add_watch():
name = os.path.realpath(settings.SITE_CONFIG)
wm.add_watch(name, pyinotify.IN_CLOSE_WRITE, reload_config)
def reload_config(event):
load_config()
add_watch()
wm = pyinotify.WatchManager()
add_watch()
notifier = pyinotify.Notifier(wm)
NOTIFIER = notifier
notifier.loop()
else:
while RUN_RELOADER:
try:
stat = os.stat(settings.SITE_CONFIG)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if mtime > _config_mtime:
load_config()
_config_mtime = mtime
time.sleep(10)
except:
#sys.stderr.write("reloading config failed\n")
pass
def update_static(): def update_static():
oxjs_build = os.path.join(settings.STATIC_ROOT, 'oxjs/tools/build/build.py') oxjs_build = os.path.join(settings.STATIC_ROOT, 'oxjs/tools/build/build.py')
if os.path.exists(oxjs_build): if os.path.exists(oxjs_build):
@ -364,7 +327,11 @@ def update_static():
#locale #locale
for f in sorted(glob(os.path.join(settings.STATIC_ROOT, 'json/locale.pandora.*.json'))): for f in sorted(glob(os.path.join(settings.STATIC_ROOT, 'json/locale.pandora.*.json'))):
with open(f) as fd: with open(f) as fd:
locale = json.load(fd) try:
locale = json.load(fd)
except:
print("failed to parse %s" % f)
raise
site_locale = f.replace('locale.pandora', 'locale.' + settings.CONFIG['site']['id']) site_locale = f.replace('locale.pandora', 'locale.' + settings.CONFIG['site']['id'])
locale_file = f.replace('locale.pandora', 'locale') locale_file = f.replace('locale.pandora', 'locale')
print('write', locale_file) print('write', locale_file)
@ -407,17 +374,4 @@ def update_geoip(force=False):
print('failed to download GeoLite2-City.mmdb') print('failed to download GeoLite2-City.mmdb')
def init(): def init():
if not settings.RELOADER_RUNNING: load_config(True)
load_config(True)
if settings.RELOAD_CONFIG:
thread.start_new_thread(reloader_thread, ())
def shutdown():
if settings.RELOADER_RUNNING:
RUN_RELOADER = False
settings.RELOADER_RUNNING = False
if NOTIFIER:
NOTIFIER.stop()

View file

@ -11,6 +11,8 @@ def run(cmd):
stdout, stderr = p.communicate() stdout, stderr = p.communicate()
if p.returncode != 0: if p.returncode != 0:
print('failed to run:', cmd)
print(stdout)
print(stderr) print(stderr)
sys.exit(1) sys.exit(1)

View file

@ -0,0 +1,23 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='page',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='settings',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

34
pandora/app/oidc.py Normal file
View file

@ -0,0 +1,34 @@
import unicodedata
from django.contrib.auth import get_user_model
import mozilla_django_oidc.auth
from user.utils import prepare_user
User = get_user_model()
class OIDCAuthenticationBackend(mozilla_django_oidc.auth.OIDCAuthenticationBackend):
def create_user(self, claims):
user = super(OIDCAuthenticationBackend, self).create_user(claims)
username = claims.get("preferred_username")
n = 1
if username and username != user.username:
uname = username
while User.objects.filter(username=uname).exclude(id=user.id).exists():
n += 1
uname = '%s (%s)' % (username, n)
user.username = uname
user.save()
prepare_user(user)
return user
def update_user(self, user, claims):
print("update user", user, claims)
#user.save()
return user
def generate_username(email):
return unicodedata.normalize('NFKC', email)[:150]

View file

@ -2,13 +2,16 @@
import datetime import datetime
from celery.task import periodic_task from app.celery import app
from celery.schedules import crontab from celery.schedules import crontab
@app.task(queue='encoding')
@periodic_task(run_every=crontab(hour=6, minute=0), queue='encoding')
def cron(**kwargs): def cron(**kwargs):
from django.db import transaction from django.db import transaction
from django.contrib.sessions.models import Session from django.contrib.sessions.models import Session
Session.objects.filter(expire_date__lt=datetime.datetime.now()).delete() Session.objects.filter(expire_date__lt=datetime.datetime.now()).delete()
transaction.commit() transaction.commit()
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(crontab(hour=6, minute=0), cron.s())

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import copy
from datetime import datetime from datetime import datetime
import base64
import copy
from django.shortcuts import render, redirect from django.shortcuts import render, redirect
from django.conf import settings from django.conf import settings
@ -53,17 +53,18 @@ def embed(request, id):
}) })
def redirect_url(request, url): def redirect_url(request, url):
if request.META['QUERY_STRING']: try:
url += "?" + request.META['QUERY_STRING'] url = base64.decodebytes(url.encode()).decode()
except:
pass
if settings.CONFIG['site'].get('sendReferrer', False): if settings.CONFIG['site'].get('sendReferrer', False):
return redirect(url) return redirect(url)
else: else:
return HttpResponse('<script>document.location.href=%s;</script>'%json.dumps(url)) return HttpResponse('<script>document.location.href=%s;</script>' % json.dumps(url))
def opensearch_xml(request): def opensearch_xml(request):
osd = ET.Element('OpenSearchDescription') osd = ET.Element('OpenSearchDescription')
osd.attrib['xmlns']="http://a9.com/-/spec/opensearch/1.1/" osd.attrib['xmlns'] = "http://a9.com/-/spec/opensearch/1.1/"
e = ET.SubElement(osd, 'ShortName') e = ET.SubElement(osd, 'ShortName')
e.text = settings.SITENAME e.text = settings.SITENAME
e = ET.SubElement(osd, 'Description') e = ET.SubElement(osd, 'Description')
@ -162,7 +163,7 @@ def init(request, data):
del config['keys'] del config['keys']
if 'HTTP_ACCEPT_LANGUAGE' in request.META: if 'HTTP_ACCEPT_LANGUAGE' in request.META:
response['data']['locale'] = request.META['HTTP_ACCEPT_LANGUAGE'].split(';')[0].split('-')[0] response['data']['locale'] = request.META['HTTP_ACCEPT_LANGUAGE'].split(';')[0].split('-')[0].split(',')[0]
if request.META.get('HTTP_X_PREFIX') == 'NO': if request.META.get('HTTP_X_PREFIX') == 'NO':
config['site']['videoprefix'] = '' config['site']['videoprefix'] = ''
@ -183,6 +184,7 @@ def init(request, data):
except: except:
pass pass
config['site']['oidc'] = bool(getattr(settings, 'OIDC_RP_CLIENT_ID', False))
response['data']['site'] = config response['data']['site'] = config
response['data']['user'] = init_user(request.user, request) response['data']['user'] = init_user(request.user, request)
request.session['last_init'] = str(datetime.now()) request.session['last_init'] = str(datetime.now())
@ -245,7 +247,7 @@ def getEmbedDefaults(request, data):
i = qs[0].cache i = qs[0].cache
response['data']['item'] = i['id'] response['data']['item'] = i['id']
response['data']['itemDuration'] = i['duration'] response['data']['itemDuration'] = i['duration']
response['data']['itemRatio'] = i['videoRatio'] response['data']['itemRatio'] = i.get('videoRatio', settings.CONFIG['video']['previewRatio'])
qs = List.objects.exclude(status='private').order_by('name') qs = List.objects.exclude(status='private').order_by('name')
if qs.exists(): if qs.exists():
i = qs[0].json() i = qs[0].json()

7
pandora/archive/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class ArchiveConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'archive'

View file

@ -1,10 +1,11 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import json import json
import subprocess import logging
import shutil
import tempfile
import os import os
import shutil
import subprocess
import tempfile
import ox import ox
from django.conf import settings from django.conf import settings
@ -14,6 +15,9 @@ from item.tasks import load_subtitles
from . import models from . import models
logger = logging.getLogger('pandora.' + __name__)
info_keys = [ info_keys = [
'title', 'title',
'description', 'description',
@ -36,8 +40,14 @@ info_key_map = {
'display_id': 'id', 'display_id': 'id',
} }
def get_info(url): YT_DLP = ['yt-dlp']
cmd = ['youtube-dl', '-j', '--all-subs', url] if settings.YT_DLP_EXTRA:
YT_DLP += settings.YT_DLP_EXTRA
def get_info(url, referer=None):
cmd = YT_DLP + ['-j', '--all-subs', url]
if referer:
cmd += ['--referer', referer]
p = subprocess.Popen(cmd, p = subprocess.Popen(cmd,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True) stderr=subprocess.PIPE, close_fds=True)
@ -57,6 +67,8 @@ def get_info(url):
info[-1]['tags'] = [] info[-1]['tags'] = []
if 'upload_date' in i and i['upload_date']: if 'upload_date' in i and i['upload_date']:
info[-1]['date'] = '-'.join([i['upload_date'][:4], i['upload_date'][4:6], i['upload_date'][6:]]) info[-1]['date'] = '-'.join([i['upload_date'][:4], i['upload_date'][4:6], i['upload_date'][6:]])
if 'referer' not in info[-1]:
info[-1]['referer'] = url
return info return info
def add_subtitles(item, media, tmp): def add_subtitles(item, media, tmp):
@ -84,9 +96,18 @@ def add_subtitles(item, media, tmp):
sub.selected = True sub.selected = True
sub.save() sub.save()
def download(item_id, url): def load_formats(url):
cmd = YT_DLP + ['-q', url, '-j', '-F']
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
formats = stdout.decode().strip().split('\n')[-1]
return json.loads(formats)
def download(item_id, url, referer=None):
item = Item.objects.get(public_id=item_id) item = Item.objects.get(public_id=item_id)
info = get_info(url) info = get_info(url, referer)
if not len(info): if not len(info):
return '%s contains no videos' % url return '%s contains no videos' % url
media = info[0] media = info[0]
@ -95,13 +116,19 @@ def download(item_id, url):
if isinstance(tmp, bytes): if isinstance(tmp, bytes):
tmp = tmp.decode('utf-8') tmp = tmp.decode('utf-8')
os.chdir(tmp) os.chdir(tmp)
cmd = ['youtube-dl', '-q', media['url']] cmd = YT_DLP + ['-q', media['url']]
if settings.CONFIG['video'].get('reuseUload', False): if referer:
cmd += ['--referer', referer]
elif 'referer' in media:
cmd += ['--referer', media['referer']]
cmd += ['-o', '%(title)80s.%(ext)s']
if settings.CONFIG['video'].get('reuseUpload', False):
max_resolution = max(settings.CONFIG['video']['resolutions']) max_resolution = max(settings.CONFIG['video']['resolutions'])
format = settings.CONFIG['video']['formats'][0] format = settings.CONFIG['video']['formats'][0]
if format == 'mp4': if format == 'mp4':
cmd += [ cmd += [
'-f', 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/bestvideo+bestaudio', '-f', 'bestvideo[height<=%s][ext=mp4]+bestaudio[ext=m4a]' % max_resolution,
'--merge-output-format', 'mp4' '--merge-output-format', 'mp4'
] ]
elif format == 'webm': elif format == 'webm':
@ -111,6 +138,50 @@ def download(item_id, url):
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True) stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate() stdout, stderr = p.communicate()
if stderr and b'Requested format is not available.' in stderr:
formats = load_formats(url)
has_audio = bool([fmt for fmt in formats['formats'] if fmt['resolution'] == 'audio only'])
has_video = bool([fmt for fmt in formats['formats'] if 'x' in fmt['resolution']])
cmd = [
'yt-dlp', '-q', url,
'-o', '%(title)80s.%(ext)s'
]
if referer:
cmd += ['--referer', referer]
elif 'referer' in media:
cmd += ['--referer', media['referer']]
if has_video and not has_audio:
cmd += [
'-f', 'bestvideo[height<=%s][ext=mp4]' % max_resolution,
]
elif not has_video and has_audio:
cmd += [
'bestaudio[ext=m4a]'
]
else:
cmd = []
if cmd:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
if stderr and b'Requested format is not available.' in stderr:
cmd = [
'yt-dlp', '-q', url,
'-o', '%(title)80s.%(ext)s'
]
if referer:
cmd += ['--referer', referer]
elif 'referer' in media:
cmd += ['--referer', media['referer']]
if cmd:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
if stdout or stderr:
logger.error("import failed:\n%s\n%s\n%s", " ".join(cmd), stdout.decode(), stderr.decode())
parts = list(os.listdir(tmp)) parts = list(os.listdir(tmp))
if parts: if parts:
part = 1 part = 1
@ -138,6 +209,7 @@ def download(item_id, url):
f.extract_stream() f.extract_stream()
status = True status = True
else: else:
logger.error("failed to import %s file already exists %s", url, oshash)
status = 'file exists' status = 'file exists'
if len(parts) == 1: if len(parts) == 1:
add_subtitles(f.item, media, tmp) add_subtitles(f.item, media, tmp)

View file

@ -1,26 +1,30 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import os from distutils.spawn import find_executable
from glob import glob
from os.path import exists from os.path import exists
import fractions import fractions
import logging
import math
import os
import re
import shutil
import subprocess import subprocess
import tempfile import tempfile
import time import time
import math
import shutil
from distutils.spawn import find_executable
from glob import glob
import numpy as np import numpy as np
import ox import ox
import ox.image import ox.image
from ox.utils import json from ox.utils import json
from django.conf import settings from django.conf import settings
from PIL import Image from PIL import Image, ImageOps
from .chop import Chop, make_keyframe_index from .chop import Chop, make_keyframe_index
logger = logging.getLogger('pandora.' + __name__)
img_extension = 'jpg' img_extension = 'jpg'
MAX_DISTANCE = math.sqrt(3 * pow(255, 2)) MAX_DISTANCE = math.sqrt(3 * pow(255, 2))
@ -57,14 +61,15 @@ def supported_formats():
stdout = stdout.decode('utf-8') stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8') stderr = stderr.decode('utf-8')
version = stderr.split('\n')[0].split(' ')[2] version = stderr.split('\n')[0].split(' ')[2]
mp4 = 'libx264' in stdout and bool(re.compile('DEA.L. aac').findall(stdout))
return { return {
'version': version.split('.'), 'version': version.split('.'),
'ogg': 'libtheora' in stdout and 'libvorbis' in stdout, 'ogg': 'libtheora' in stdout and 'libvorbis' in stdout,
'webm': 'libvpx' in stdout and 'libvorbis' in stdout, 'webm': 'libvpx' in stdout and 'libvorbis' in stdout,
'vp8': 'libvpx' in stdout and 'libvorbis' in stdout, 'vp8': 'libvpx' in stdout and 'libvorbis' in stdout,
'vp9': 'libvpx-vp9' in stdout and 'libopus' in stdout, 'vp9': 'libvpx-vp9' in stdout and 'libopus' in stdout,
'mp4': 'libx264' in stdout and 'DEA.L. aac' in stdout, 'mp4': mp4,
'h264': 'libx264' in stdout and 'DEA.L. aac' in stdout, 'h264': mp4,
} }
def stream(video, target, profile, info, audio_track=0, flags={}): def stream(video, target, profile, info, audio_track=0, flags={}):
@ -145,10 +150,17 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
audioquality = -1 audioquality = -1
audiobitrate = '22k' audiobitrate = '22k'
audiochannels = 1 audiochannels = 1
elif profile == '0p':
info['video'] = []
audiorate = 48000
audioquality = 6
audiobitrate = None
audiochannels = None
audio_codec = 'libopus'
else: else:
height = 96 height = 96
if settings.FFMPEG_SUPPORTS_VP9: if settings.USE_VP9 and settings.FFMPEG_SUPPORTS_VP9:
audio_codec = 'libopus' audio_codec = 'libopus'
video_codec = 'libvpx-vp9' video_codec = 'libvpx-vp9'
@ -211,7 +223,7 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
bitrate = height*width*fps*bpp/1000 bitrate = height*width*fps*bpp/1000
video_settings = trim + [ video_settings = trim + [
'-vb', '%dk' % bitrate, '-b:v', '%dk' % bitrate,
'-aspect', aspect, '-aspect', aspect,
# '-vf', 'yadif', # '-vf', 'yadif',
'-max_muxing_queue_size', '512', '-max_muxing_queue_size', '512',
@ -239,6 +251,8 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
'-level', '4.0', '-level', '4.0',
'-pix_fmt', 'yuv420p', '-pix_fmt', 'yuv420p',
] ]
if info['video'][0].get("force_framerate"):
video_settings += ['-r:v', str(fps)]
video_settings += ['-map', '0:%s,0:0' % info['video'][0]['id']] video_settings += ['-map', '0:%s,0:0' % info['video'][0]['id']]
audio_only = False audio_only = False
else: else:
@ -278,7 +292,7 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
ac = min(ac, audiochannels) ac = min(ac, audiochannels)
audio_settings += ['-ac', str(ac)] audio_settings += ['-ac', str(ac)]
if audiobitrate: if audiobitrate:
audio_settings += ['-ab', audiobitrate] audio_settings += ['-b:a', audiobitrate]
if format == 'mp4': if format == 'mp4':
audio_settings += ['-c:a', 'aac', '-strict', '-2'] audio_settings += ['-c:a', 'aac', '-strict', '-2']
elif audio_codec == 'libopus': elif audio_codec == 'libopus':
@ -311,14 +325,15 @@ def stream(video, target, profile, info, audio_track=0, flags={}):
pass1_post = post[:] pass1_post = post[:]
pass1_post[-1] = '/dev/null' pass1_post[-1] = '/dev/null'
if format == 'webm': if format == 'webm':
pass1_post = ['-speed', '4'] + pass1_post if video_codec != 'libvpx-vp9':
pass1_post = ['-speed', '4'] + pass1_post
post = ['-speed', '1'] + post post = ['-speed', '1'] + post
cmds.append(base + ['-an', '-pass', '1', '-passlogfile', '%s.log' % target] cmds.append(base + ['-pass', '1', '-passlogfile', '%s.log' % target]
+ video_settings + pass1_post) + video_settings + ['-an'] + pass1_post)
cmds.append(base + ['-pass', '2', '-passlogfile', '%s.log' % target] cmds.append(base + ['-pass', '2', '-passlogfile', '%s.log' % target]
+ audio_settings + video_settings + post) + video_settings + audio_settings + post)
else: else:
cmds.append(base + audio_settings + video_settings + post) cmds.append(base + video_settings + audio_settings + post)
if settings.FFMPEG_DEBUG: if settings.FFMPEG_DEBUG:
print('\n'.join([' '.join(cmd) for cmd in cmds])) print('\n'.join([' '.join(cmd) for cmd in cmds]))
@ -426,10 +441,15 @@ def frame_direct(video, target, position):
r = run_command(cmd) r = run_command(cmd)
return r == 0 return r == 0
def open_image_rgb(image_source):
source = Image.open(image_source)
source = ImageOps.exif_transpose(source)
source = source.convert('RGB')
return source
def resize_image(image_source, image_output, width=None, size=None): def resize_image(image_source, image_output, width=None, size=None):
if exists(image_source): if exists(image_source):
source = Image.open(image_source).convert('RGB') source = open_image_rgb(image_source)
source_width = source.size[0] source_width = source.size[0]
source_height = source.size[1] source_height = source.size[1]
if size: if size:
@ -450,7 +470,7 @@ def resize_image(image_source, image_output, width=None, size=None):
height = max(height, 1) height = max(height, 1)
if width < source_width: if width < source_width:
resize_method = Image.ANTIALIAS resize_method = Image.LANCZOS
else: else:
resize_method = Image.BICUBIC resize_method = Image.BICUBIC
output = source.resize((width, height), resize_method) output = source.resize((width, height), resize_method)
@ -464,7 +484,7 @@ def timeline(video, prefix, modes=None, size=None):
size = [64, 16] size = [64, 16]
if isinstance(video, str): if isinstance(video, str):
video = [video] video = [video]
cmd = ['../bin/oxtimelines', cmd = [os.path.normpath(os.path.join(settings.BASE_DIR, '../bin/oxtimelines')),
'-s', ','.join(map(str, reversed(sorted(size)))), '-s', ','.join(map(str, reversed(sorted(size)))),
'-m', ','.join(modes), '-m', ','.join(modes),
'-o', prefix, '-o', prefix,
@ -596,7 +616,7 @@ def timeline_strip(item, cuts, info, prefix):
print(frame, 'cut', c, 'frame', s, frame, 'width', widths[s], box) print(frame, 'cut', c, 'frame', s, frame, 'width', widths[s], box)
# FIXME: why does this have to be frame+1? # FIXME: why does this have to be frame+1?
frame_image = Image.open(item.frame((frame+1)/fps)) frame_image = Image.open(item.frame((frame+1)/fps))
frame_image = frame_image.crop(box).resize((widths[s], timeline_height), Image.ANTIALIAS) frame_image = frame_image.crop(box).resize((widths[s], timeline_height), Image.LANCZOS)
for x_ in range(widths[s]): for x_ in range(widths[s]):
line_image.append(frame_image.crop((x_, 0, x_ + 1, timeline_height))) line_image.append(frame_image.crop((x_, 0, x_ + 1, timeline_height)))
frame += widths[s] frame += widths[s]
@ -724,19 +744,24 @@ def remux_stream(src, dst):
cmd = [ cmd = [
settings.FFMPEG, settings.FFMPEG,
'-nostats', '-loglevel', 'error', '-nostats', '-loglevel', 'error',
'-map_metadata', '-1', '-sn',
'-i', src, '-i', src,
'-map_metadata', '-1', '-sn',
] + video + [ ] + video + [
] + audio + [ ] + audio + [
'-movflags', '+faststart', '-movflags', '+faststart',
dst dst
] ]
print(cmd)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=open('/dev/null', 'w'), stdout=subprocess.PIPE,
stderr=open('/dev/null', 'w'), stderr=subprocess.STDOUT,
close_fds=True) close_fds=True)
p.wait() stdout, stderr = p.communicate()
return True, None if stderr:
logger.error("failed to remux %s %s", cmd, stderr)
return False, stderr
else:
return True, None
def ffprobe(path, *args): def ffprobe(path, *args):

View file

@ -0,0 +1,100 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('archive', '0005_auto_20180804_1554'),
]
operations = [
migrations.AlterField(
model_name='file',
name='extension',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='file',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='file',
name='info',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='file',
name='language',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='file',
name='part',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='file',
name='part_title',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='file',
name='path',
field=models.CharField(default='', max_length=2048),
),
migrations.AlterField(
model_name='file',
name='sort_path',
field=models.CharField(default='', max_length=2048),
),
migrations.AlterField(
model_name='file',
name='type',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='file',
name='version',
field=models.CharField(default='', max_length=255, null=True),
),
migrations.AlterField(
model_name='frame',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='instance',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='stream',
name='error',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='stream',
name='format',
field=models.CharField(default='webm', max_length=255),
),
migrations.AlterField(
model_name='stream',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='stream',
name='info',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='volume',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -0,0 +1,17 @@
# Generated by Django 4.2.3 on 2023-08-18 12:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archive', '0006_alter_file_extension_alter_file_id_alter_file_info_and_more'),
]
operations = [
migrations.AddIndex(
model_name='stream',
index=models.Index(fields=['file', 'source', 'available'], name='archive_str_file_id_69a542_idx'),
),
]

View file

@ -151,8 +151,10 @@ class File(models.Model):
self.sampleate = 0 self.sampleate = 0
self.channels = 0 self.channels = 0
if self.framerate: if self.framerate and self.duration > 0:
self.pixels = int(self.width * self.height * float(utils.parse_decimal(self.framerate)) * self.duration) self.pixels = int(self.width * self.height * float(utils.parse_decimal(self.framerate)) * self.duration)
else:
self.pixels = 0
def get_path_info(self): def get_path_info(self):
data = {} data = {}
@ -181,6 +183,13 @@ class File(models.Model):
for type in ox.movie.EXTENSIONS: for type in ox.movie.EXTENSIONS:
if data['extension'] in ox.movie.EXTENSIONS[type]: if data['extension'] in ox.movie.EXTENSIONS[type]:
data['type'] = type data['type'] = type
if data['extension'] == 'ogg' and self.info.get('video'):
data['type'] = 'video'
if data['type'] == 'unknown':
if self.info.get('video'):
data['type'] = 'video'
elif self.info.get('audio'):
data['type'] = 'audio'
if 'part' in data and isinstance(data['part'], int): if 'part' in data and isinstance(data['part'], int):
data['part'] = str(data['part']) data['part'] = str(data['part'])
return data return data
@ -268,7 +277,7 @@ class File(models.Model):
if self.type not in ('audio', 'video'): if self.type not in ('audio', 'video'):
self.duration = None self.duration = None
else: elif self.id:
duration = sum([s.info.get('duration', 0) duration = sum([s.info.get('duration', 0)
for s in self.streams.filter(source=None)]) for s in self.streams.filter(source=None)])
if duration: if duration:
@ -276,7 +285,7 @@ class File(models.Model):
if self.is_subtitle: if self.is_subtitle:
self.available = self.data and True or False self.available = self.data and True or False
else: elif self.id:
self.available = not self.uploading and \ self.available = not self.uploading and \
self.streams.filter(source=None, available=True).count() self.streams.filter(source=None, available=True).count()
super(File, self).save(*args, **kwargs) super(File, self).save(*args, **kwargs)
@ -336,7 +345,9 @@ class File(models.Model):
def done_cb(): def done_cb():
if done: if done:
self.info.update(ox.avinfo(self.data.path)) info = ox.avinfo(self.data.path)
del info['path']
self.info.update(info)
self.parse_info() self.parse_info()
# reject invalid uploads # reject invalid uploads
if self.info.get('oshash') != self.oshash: if self.info.get('oshash') != self.oshash:
@ -363,8 +374,8 @@ class File(models.Model):
self.info.update(stream.info) self.info.update(stream.info)
self.parse_info() self.parse_info()
self.save() self.save()
if stream.info.get('video'): #if stream.info.get('video'):
extract.make_keyframe_index(stream.media.path) # extract.make_keyframe_index(stream.media.path)
return True, stream.media.size return True, stream.media.size
return save_chunk(stream, stream.media, chunk, offset, name, done_cb) return save_chunk(stream, stream.media, chunk, offset, name, done_cb)
return False, 0 return False, 0
@ -393,7 +404,7 @@ class File(models.Model):
config = settings.CONFIG['video'] config = settings.CONFIG['video']
height = self.info['video'][0]['height'] if self.info.get('video') else None height = self.info['video'][0]['height'] if self.info.get('video') else None
max_resolution = max(config['resolutions']) max_resolution = max(config['resolutions'])
if height <= max_resolution and self.extension in ('mov', 'mkv', 'mp4', 'm4v'): if height and height <= max_resolution and self.extension in ('mov', 'mkv', 'mp4', 'm4v'):
vcodec = self.get_codec('video') vcodec = self.get_codec('video')
acodec = self.get_codec('audio') acodec = self.get_codec('audio')
if vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS: if vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS:
@ -404,7 +415,7 @@ class File(models.Model):
config = settings.CONFIG['video'] config = settings.CONFIG['video']
height = self.info['video'][0]['height'] if self.info.get('video') else None height = self.info['video'][0]['height'] if self.info.get('video') else None
max_resolution = max(config['resolutions']) max_resolution = max(config['resolutions'])
if height <= max_resolution and config['formats'][0] == self.extension: if height and height <= max_resolution and config['formats'][0] == self.extension:
vcodec = self.get_codec('video') vcodec = self.get_codec('video')
acodec = self.get_codec('audio') acodec = self.get_codec('audio')
if self.extension in ['mp4', 'm4v'] and vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS: if self.extension in ['mp4', 'm4v'] and vcodec in self.MP4_VCODECS and acodec in self.MP4_ACODECS:
@ -481,6 +492,13 @@ class File(models.Model):
user.is_staff or \ user.is_staff or \
self.item.user == user or \ self.item.user == user or \
self.item.groups.filter(id__in=user.groups.all()).count() > 0 self.item.groups.filter(id__in=user.groups.all()).count() > 0
if 'instances' in data and 'filename' in self.info and self.data:
data['instances'].append({
'ignore': False,
'path': self.info['filename'],
'user': self.item.user.username if self.item and self.item.user else 'system',
'volume': 'Direct Upload'
})
if not can_see_media: if not can_see_media:
if 'instances' in data: if 'instances' in data:
data['instances'] = [] data['instances'] = []
@ -716,6 +734,9 @@ class Stream(models.Model):
class Meta: class Meta:
unique_together = ("file", "resolution", "format") unique_together = ("file", "resolution", "format")
indexes = [
models.Index(fields=['file', 'source', 'available'])
]
file = models.ForeignKey(File, related_name='streams', on_delete=models.CASCADE) file = models.ForeignKey(File, related_name='streams', on_delete=models.CASCADE)
resolution = models.IntegerField(default=96) resolution = models.IntegerField(default=96)
@ -795,9 +816,15 @@ class Stream(models.Model):
shutil.move(self.file.data.path, target) shutil.move(self.file.data.path, target)
self.file.data.name = '' self.file.data.name = ''
self.file.save() self.file.save()
self.available = True
self.save()
done = True
elif self.file.can_remux(): elif self.file.can_remux():
ok, error = extract.remux_stream(media, target) ok, error = extract.remux_stream(media, target)
done = True if ok:
self.available = True
self.save()
done = True
if not done: if not done:
ok, error = extract.stream(media, target, self.name(), info, flags=self.flags) ok, error = extract.stream(media, target, self.name(), info, flags=self.flags)
@ -805,7 +832,7 @@ class Stream(models.Model):
# get current version from db and update # get current version from db and update
try: try:
self.refresh_from_db() self.refresh_from_db()
except archive.models.DoesNotExist: except Stream.DoesNotExist:
pass pass
else: else:
self.update_status(ok, error) self.update_status(ok, error)

View file

@ -1,11 +1,9 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from datetime import datetime from datetime import datetime
from time import time from time import time, monotonic
import celery.task.control
import kombu.five
from app.celery import app
from .models import File from .models import File
@ -18,7 +16,7 @@ def parse_job(job):
'file': f.oshash 'file': f.oshash
} }
if job['time_start']: if job['time_start']:
start_time = datetime.fromtimestamp(time() - (kombu.five.monotonic() - job['time_start'])) start_time = datetime.fromtimestamp(time() - (monotonic() - job['time_start']))
r.update({ r.update({
'started': start_time, 'started': start_time,
'running': (datetime.now() - start_time).total_seconds() 'running': (datetime.now() - start_time).total_seconds()
@ -30,7 +28,7 @@ def parse_job(job):
def status(): def status():
status = [] status = []
encoding_jobs = ('archive.tasks.extract_stream', 'archive.tasks.process_stream') encoding_jobs = ('archive.tasks.extract_stream', 'archive.tasks.process_stream')
c = celery.task.control.inspect() c = app.control.inspect()
for job in c.active(safe=True).get('celery@pandora-encoding', []): for job in c.active(safe=True).get('celery@pandora-encoding', []):
if job['name'] in encoding_jobs: if job['name'] in encoding_jobs:
status.append(parse_job(job)) status.append(parse_job(job))
@ -67,7 +65,7 @@ def fill_queue():
def get_celery_worker_status(): def get_celery_worker_status():
ERROR_KEY = "ERROR" ERROR_KEY = "ERROR"
try: try:
insp = celery.task.control.inspect() insp = app.control.inspect()
d = insp.stats() d = insp.stats()
if not d: if not d:
d = {ERROR_KEY: 'No running Celery workers were found.'} d = {ERROR_KEY: 'No running Celery workers were found.'}

View file

@ -2,13 +2,14 @@
from glob import glob from glob import glob
from celery.task import task
from django.conf import settings from django.conf import settings
from django.db import transaction
from django.db.models import Q from django.db.models import Q
from item.models import Item from item.models import Item
from item.tasks import update_poster, update_timeline from item.tasks import update_poster, update_timeline
from taskqueue.models import Task from taskqueue.models import Task
from app.celery import app
from . import models from . import models
from . import extract from . import extract
@ -68,7 +69,7 @@ def update_or_create_instance(volume, f):
instance.file.item.update_wanted() instance.file.item.update_wanted()
return instance return instance
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_files(user, volume, files): def update_files(user, volume, files):
user = models.User.objects.get(username=user) user = models.User.objects.get(username=user)
volume, created = models.Volume.objects.get_or_create(user=user, name=volume) volume, created = models.Volume.objects.get_or_create(user=user, name=volume)
@ -100,7 +101,7 @@ def update_files(user, volume, files):
Task.start(i, user) Task.start(i, user)
update_timeline.delay(i.public_id) update_timeline.delay(i.public_id)
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_info(user, info): def update_info(user, info):
user = models.User.objects.get(username=user) user = models.User.objects.get(username=user)
files = models.File.objects.filter(oshash__in=list(info)) files = models.File.objects.filter(oshash__in=list(info))
@ -114,7 +115,7 @@ def update_info(user, info):
Task.start(i, user) Task.start(i, user)
update_timeline.delay(i.public_id) update_timeline.delay(i.public_id)
@task(queue="encoding") @app.task(queue="encoding")
def process_stream(fileId): def process_stream(fileId):
''' '''
process uploaded stream process uploaded stream
@ -140,7 +141,7 @@ def process_stream(fileId):
Task.finish(file.item) Task.finish(file.item)
return True return True
@task(queue="encoding") @app.task(queue="encoding")
def extract_stream(fileId): def extract_stream(fileId):
''' '''
extract stream from direct upload extract stream from direct upload
@ -169,7 +170,7 @@ def extract_stream(fileId):
models.File.objects.filter(id=fileId).update(encoding=False) models.File.objects.filter(id=fileId).update(encoding=False)
Task.finish(file.item) Task.finish(file.item)
@task(queue="encoding") @app.task(queue="encoding")
def extract_derivatives(fileId, rebuild=False): def extract_derivatives(fileId, rebuild=False):
file = models.File.objects.get(id=fileId) file = models.File.objects.get(id=fileId)
streams = file.streams.filter(source=None) streams = file.streams.filter(source=None)
@ -177,7 +178,7 @@ def extract_derivatives(fileId, rebuild=False):
streams[0].extract_derivatives(rebuild) streams[0].extract_derivatives(rebuild)
return True return True
@task(queue="encoding") @app.task(queue="encoding")
def update_stream(id): def update_stream(id):
s = models.Stream.objects.get(pk=id) s = models.Stream.objects.get(pk=id)
if not glob("%s*" % s.timeline_prefix): if not glob("%s*" % s.timeline_prefix):
@ -199,11 +200,11 @@ def update_stream(id):
c.update_calculated_values() c.update_calculated_values()
c.save() c.save()
@task(queue="encoding") @app.task(queue="encoding")
def download_media(item_id, url): def download_media(item_id, url, referer=None):
return external.download(item_id, url) return external.download(item_id, url, referer)
@task(queue='default') @app.task(queue='default')
def move_media(data, user): def move_media(data, user):
from changelog.models import add_changelog from changelog.models import add_changelog
from item.models import get_item, Item, ItemSort from item.models import get_item, Item, ItemSort
@ -248,7 +249,8 @@ def move_media(data, user):
if old_item and old_item.files.count() == 0 and i.files.count() == len(data['ids']): if old_item and old_item.files.count() == 0 and i.files.count() == len(data['ids']):
for a in old_item.annotations.all().order_by('id'): for a in old_item.annotations.all().order_by('id'):
a.item = i a.item = i
a.set_public_id() with transaction.atomic():
a.set_public_id()
Annotation.objects.filter(id=a.id).update(item=i, public_id=a.public_id) Annotation.objects.filter(id=a.id).update(item=i, public_id=a.public_id)
old_item.clips.all().update(item=i, sort=i.sort) old_item.clips.all().update(item=i, sort=i.sort)

View file

@ -103,7 +103,7 @@ def update(request, data):
file__available=False, file__available=False,
file__wanted=True)] file__wanted=True)]
if list(filter(lambda l: l['id'] == 'subtitles', settings.CONFIG['layers'])): if utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True):
qs = files.filter( qs = files.filter(
file__is_subtitle=True, file__is_subtitle=True,
file__available=False file__available=False
@ -195,7 +195,9 @@ def addMedia(request, data):
response['data']['item'] = f.item.public_id response['data']['item'] = f.item.public_id
response['data']['itemUrl'] = request.build_absolute_uri('/%s' % f.item.public_id) response['data']['itemUrl'] = request.build_absolute_uri('/%s' % f.item.public_id)
if not f.available: if not f.available:
add_changelog(request, data, f.item.public_id) changelog_data = data.copy()
changelog_data['oshash'] = oshash
add_changelog(request, changelog_data, f.item.public_id)
else: else:
if 'item' in data: if 'item' in data:
i = Item.objects.get(public_id=data['item']) i = Item.objects.get(public_id=data['item'])
@ -220,11 +222,15 @@ def addMedia(request, data):
if 'info' in data and data['info'] and isinstance(data['info'], dict): if 'info' in data and data['info'] and isinstance(data['info'], dict):
f.info = data['info'] f.info = data['info']
f.info['extension'] = extension f.info['extension'] = extension
if 'filename' in data:
f.info['filename'] = data['filename']
f.parse_info() f.parse_info()
f.save() f.save()
response['data']['item'] = i.public_id response['data']['item'] = i.public_id
response['data']['itemUrl'] = request.build_absolute_uri('/%s' % i.public_id) response['data']['itemUrl'] = request.build_absolute_uri('/%s' % i.public_id)
add_changelog(request, data, i.public_id) changelog_data = data.copy()
changelog_data['oshash'] = oshash
add_changelog(request, changelog_data, i.public_id)
return render_to_json_response(response) return render_to_json_response(response)
actions.register(addMedia, cache=False) actions.register(addMedia, cache=False)
@ -739,6 +745,7 @@ def addMediaUrl(request, data):
takes { takes {
url: string, // url url: string, // url
referer: string // optional referer url
item: string // item item: string // item
} }
returns { returns {
@ -751,7 +758,7 @@ def addMediaUrl(request, data):
response = json_response() response = json_response()
i = Item.objects.get(public_id=data['item']) i = Item.objects.get(public_id=data['item'])
Task.start(i, request.user) Task.start(i, request.user)
t = tasks.download_media.delay(data['item'], data['url']) t = tasks.download_media.delay(data['item'], data['url'], data.get('referer'))
response['data']['taskId'] = t.task_id response['data']['taskId'] = t.task_id
add_changelog(request, data, data['item']) add_changelog(request, data, data['item'])
return render_to_json_response(response) return render_to_json_response(response)

View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class ChangelogConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'changelog'

View file

@ -0,0 +1,35 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('changelog', '0002_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='changelog',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='changelog',
name='value',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='log',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='log',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

7
pandora/clip/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class ClipConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'clip'

View file

@ -17,6 +17,7 @@ keymap = {
'place': 'annotations__places__id', 'place': 'annotations__places__id',
'text': 'findvalue', 'text': 'findvalue',
'annotations': 'findvalue', 'annotations': 'findvalue',
'layer': 'annotations__layer',
'user': 'annotations__user__username', 'user': 'annotations__user__username',
} }
case_insensitive_keys = ('annotations__user__username', ) case_insensitive_keys = ('annotations__user__username', )

View file

@ -0,0 +1,18 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clip', '0003_auto_20160219_1805'),
]
operations = [
migrations.AlterField(
model_name='clip',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -8,6 +8,7 @@ import ox
from archive import extract from archive import extract
from . import managers from . import managers
from .utils import add_cuts
def get_layers(item, interval=None, user=None): def get_layers(item, interval=None, user=None):
@ -59,9 +60,7 @@ class MetaClip(object):
self.hue = self.saturation = self.lightness = 0 self.hue = self.saturation = self.lightness = 0
self.volume = 0 self.volume = 0
def save(self, *args, **kwargs): def update_findvalue(self):
if self.duration != self.end - self.start:
self.update_calculated_values()
if not self.aspect_ratio and self.item: if not self.aspect_ratio and self.item:
streams = self.item.streams() streams = self.item.streams()
if streams: if streams:
@ -89,6 +88,11 @@ class MetaClip(object):
self.findvalue = '\n'.join(list(filter(None, [a.findvalue for a in anns]))) self.findvalue = '\n'.join(list(filter(None, [a.findvalue for a in anns])))
for l in [k['id'] for k in settings.CONFIG['layers']]: for l in [k['id'] for k in settings.CONFIG['layers']]:
setattr(self, l, l in anns_by_layer and bool(len(anns_by_layer[l]))) setattr(self, l, l in anns_by_layer and bool(len(anns_by_layer[l])))
def save(self, *args, **kwargs):
if self.duration != self.end - self.start:
self.update_calculated_values()
self.update_findvalue()
models.Model.save(self, *args, **kwargs) models.Model.save(self, *args, **kwargs)
clip_keys = ('id', 'in', 'out', 'position', 'created', 'modified', clip_keys = ('id', 'in', 'out', 'position', 'created', 'modified',
@ -111,8 +115,7 @@ class MetaClip(object):
del j[key] del j[key]
#needed here to make item find with clips work #needed here to make item find with clips work
if 'annotations' in keys: if 'annotations' in keys:
#annotations = self.annotations.filter(layer__in=settings.CONFIG['clipLayers']) annotations = self.annotations.all().exclude(value='')
annotations = self.annotations.all()
if qs: if qs:
for q in qs: for q in qs:
annotations = annotations.filter(q) annotations = annotations.filter(q)
@ -150,12 +153,12 @@ class MetaClip(object):
data['annotation'] = qs[0].public_id data['annotation'] = qs[0].public_id
data['parts'] = self.item.cache['parts'] data['parts'] = self.item.cache['parts']
data['durations'] = self.item.cache['durations'] data['durations'] = self.item.cache['durations']
for key in ('title', 'director', 'year', 'videoRatio'): for key in settings.CONFIG['itemTitleKeys'] + ['videoRatio']:
value = self.item.cache.get(key) value = self.item.cache.get(key)
if value: if value:
data[key] = value data[key] = value
data['duration'] = data['out'] - data['in'] data['duration'] = data['out'] - data['in']
data['cuts'] = tuple([c for c in self.item.get('cuts', []) if c > self.start and c < self.end]) add_cuts(data, self.item, self.start, self.end)
data['layers'] = self.get_layers(user) data['layers'] = self.get_layers(user)
data['streams'] = [s.file.oshash for s in self.item.streams()] data['streams'] = [s.file.oshash for s in self.item.streams()]
return data return data
@ -186,6 +189,7 @@ class MetaClip(object):
def __str__(self): def __str__(self):
return self.public_id return self.public_id
class Meta: class Meta:
unique_together = ("item", "start", "end") unique_together = ("item", "start", "end")

22
pandora/clip/utils.py Normal file
View file

@ -0,0 +1,22 @@
def add_cuts(data, item, start, end):
cuts = []
last = False
outer = []
first = 0
for cut in item.get('cuts', []):
if cut > start and cut < end:
if not cuts:
outer.append(first)
cuts.append(cut)
last = True
elif cut <= start:
first = cut
elif cut >= end:
if not len(outer):
outer.append(first)
if len(outer) == 1:
outer.append(cut)
data['cuts'] = tuple(cuts)
data['outerCuts'] = tuple(outer)

View file

@ -1009,7 +1009,7 @@
{ {
"id": "tags", "id": "tags",
"title": "Tags", "title": "Tags",
"canAddAnnotations": {"member": true, "staff": true, "admin": true}, "canAddAnnotations": {"member": true, "friend": true, "staff": true, "admin": true},
"item": "Tag", "item": "Tag",
"autocomplete": true, "autocomplete": true,
"overlap": true, "overlap": true,
@ -1399,10 +1399,8 @@
corner of the screen corner of the screen
"resolutions": List of video resolutions. Supported values are 96, 144, "resolutions": List of video resolutions. Supported values are 96, 144,
240, 288, 360, 432, 480, 720 and 1080. 240, 288, 360, 432, 480, 720 and 1080.
"torrent": If true, video downloads are offered via BitTorrent
*/ */
"video": { "video": {
"torrent": false,
"formats": ["webm", "mp4"], "formats": ["webm", "mp4"],
// fixme: this should be named "ratio" or "defaultRatio", // fixme: this should be named "ratio" or "defaultRatio",
// as it also applies to clip lists (on load) // as it also applies to clip lists (on load)

View file

@ -73,13 +73,14 @@
"canSeeAccessed": {"researcher": true, "staff": true, "admin": true}, "canSeeAccessed": {"researcher": true, "staff": true, "admin": true},
"canSeeAllTasks": {"staff": true, "admin": true}, "canSeeAllTasks": {"staff": true, "admin": true},
"canSeeDebugMenu": {"researcher": true, "staff": true, "admin": true}, "canSeeDebugMenu": {"researcher": true, "staff": true, "admin": true},
"canSeeExtraItemViews": {"researcher": true, "staff": true, "admin": true},
"canSeeMedia": {"researcher": true, "staff": true, "admin": true},
"canSeeDocument": {"guest": 1, "member": 1, "researcher": 2, "staff": 3, "admin": 3}, "canSeeDocument": {"guest": 1, "member": 1, "researcher": 2, "staff": 3, "admin": 3},
"canSeeExtraItemViews": {"researcher": true, "staff": true, "admin": true},
"canSeeItem": {"guest": 2, "member": 2, "researcher": 2, "staff": 3, "admin": 3}, "canSeeItem": {"guest": 2, "member": 2, "researcher": 2, "staff": 3, "admin": 3},
"canSeeMedia": {"researcher": true, "staff": true, "admin": true},
"canSeeSize": {"researcher": true, "staff": true, "admin": true}, "canSeeSize": {"researcher": true, "staff": true, "admin": true},
"canSeeSoftwareVersion": {"researcher": true, "staff": true, "admin": true}, "canSeeSoftwareVersion": {"researcher": true, "staff": true, "admin": true},
"canSendMail": {"staff": true, "admin": true} "canSendMail": {"staff": true, "admin": true},
"canShare": {"staff": true, "admin": true}
}, },
/* /*
"clipKeys" are the properties that clips can be sorted by (the values are "clipKeys" are the properties that clips can be sorted by (the values are
@ -312,6 +313,14 @@
"autocomplete": true, "autocomplete": true,
"columnWidth": 128 "columnWidth": 128
}, },
{
"id": "fulltext",
"operator": "+",
"title": "Fulltext",
"type": "text",
"fulltext": true,
"find": true
},
{ {
"id": "created", "id": "created",
"operator": "-", "operator": "-",
@ -1494,6 +1503,7 @@
"hasEvents": true, "hasEvents": true,
"hasPlaces": true, "hasPlaces": true,
"item": "Keyword", "item": "Keyword",
"autocomplete": true,
"overlap": true, "overlap": true,
"type": "string" "type": "string"
}, },
@ -1875,10 +1885,8 @@
corner of the screen corner of the screen
"resolutions": List of video resolutions. Supported values are 96, 144, "resolutions": List of video resolutions. Supported values are 96, 144,
240, 288, 360, 432, 480, 720 and 1080. 240, 288, 360, 432, 480, 720 and 1080.
"torrent": If true, video downloads are offered via BitTorrent
*/ */
"video": { "video": {
"torrent": false,
"formats": ["webm", "mp4"], "formats": ["webm", "mp4"],
"previewRatio": 1.375, "previewRatio": 1.375,
"resolutions": [240, 480] "resolutions": [240, 480]

View file

@ -71,13 +71,14 @@
"canSeeAccessed": {"staff": true, "admin": true}, "canSeeAccessed": {"staff": true, "admin": true},
"canSeeAllTasks": {"staff": true, "admin": true}, "canSeeAllTasks": {"staff": true, "admin": true},
"canSeeDebugMenu": {"staff": true, "admin": true}, "canSeeDebugMenu": {"staff": true, "admin": true},
"canSeeExtraItemViews": {"staff": true, "admin": true},
"canSeeMedia": {"staff": true, "admin": true},
"canSeeDocument": {"guest": 1, "member": 1, "staff": 4, "admin": 4}, "canSeeDocument": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
"canSeeExtraItemViews": {"staff": true, "admin": true},
"canSeeItem": {"guest": 1, "member": 1, "staff": 4, "admin": 4}, "canSeeItem": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
"canSeeMedia": {"staff": true, "admin": true},
"canSeeSize": {"staff": true, "admin": true}, "canSeeSize": {"staff": true, "admin": true},
"canSeeSoftwareVersion": {"staff": true, "admin": true}, "canSeeSoftwareVersion": {"staff": true, "admin": true},
"canSendMail": {"staff": true, "admin": true} "canSendMail": {"staff": true, "admin": true},
"canShare": {"staff": true, "admin": true}
}, },
/* /*
"clipKeys" are the properties that clips can be sorted by (the values are "clipKeys" are the properties that clips can be sorted by (the values are
@ -246,6 +247,28 @@
"filter": true, "filter": true,
"find": true "find": true
}, },
{
"id": "source",
"title": "Source",
"type": "string",
"autocomplete": true,
"description": true,
"columnWidth": 180,
"filter": true,
"find": true,
"sort": true
},
{
"id": "project",
"title": "Project",
"type": "string",
"autocomplete": true,
"description": true,
"columnWidth": 120,
"filter": true,
"find": true,
"sort": true
},
{ {
"id": "id", "id": "id",
"operator": "+", "operator": "+",
@ -291,6 +314,24 @@
"sort": true, "sort": true,
"columnWidth": 256 "columnWidth": 256
}, },
{
"id": "content",
"operator": "+",
"title": "Content",
"type": "text",
"find": true,
"sort": true,
"columnWidth": 256
},
{
"id": "translation",
"operator": "+",
"title": "Translation",
"type": "text",
"find": true,
"sort": true,
"columnWidth": 256
},
{ {
"id": "matches", "id": "matches",
"operator": "-", "operator": "-",
@ -310,6 +351,20 @@
"autocomplete": true, "autocomplete": true,
"columnWidth": 128 "columnWidth": 128
}, },
{
"id": "notes",
"title": "Notes",
"type": "text",
"capability": "canEditMetadata"
},
{
"id": "fulltext",
"operator": "+",
"title": "Fulltext",
"type": "text",
"fulltext": true,
"find": true
},
{ {
"id": "created", "id": "created",
"operator": "-", "operator": "-",
@ -545,7 +600,6 @@
"title": "Director", "title": "Director",
"type": ["string"], "type": ["string"],
"autocomplete": true, "autocomplete": true,
"columnRequired": true,
"columnWidth": 180, "columnWidth": 180,
"sort": true, "sort": true,
"sortType": "person" "sortType": "person"
@ -564,7 +618,6 @@
"title": "Featuring", "title": "Featuring",
"type": ["string"], "type": ["string"],
"autocomplete": true, "autocomplete": true,
"columnRequired": true,
"columnWidth": 180, "columnWidth": 180,
"filter": true, "filter": true,
"sort": true, "sort": true,
@ -620,7 +673,7 @@
{ {
"id": "annotations", "id": "annotations",
"title": "Annotations", "title": "Annotations",
"type": "string", // fixme: not the best type for this magic key "type": "text", // fixme: not the best type for this magic key
"find": true "find": true
}, },
{ {
@ -658,7 +711,7 @@
}, },
{ {
"id": "numberofannotations", "id": "numberofannotations",
"title": "Annotations", "title": "Number of Annotations",
"type": "integer", "type": "integer",
"columnWidth": 60, "columnWidth": 60,
"sort": true "sort": true
@ -794,12 +847,16 @@
"id": "user", "id": "user",
"title": "User", "title": "User",
"type": "string", "type": "string",
"columnWidth": 90,
"capability": "canSeeMedia", "capability": "canSeeMedia",
"sort": true,
"find": true "find": true
}, },
{ {
"id": "groups", "id": "groups",
"title": "Group", "title": "Group",
"columnWidth": 90,
"sort": true,
"type": ["string"] "type": ["string"]
}, },
{ {
@ -1332,10 +1389,8 @@
corner of the screen corner of the screen
"resolutions": List of video resolutions. Supported values are 96, 144, "resolutions": List of video resolutions. Supported values are 96, 144,
240, 288, 360, 432, 480, 720 and 1080. 240, 288, 360, 432, 480, 720 and 1080.
"torrent": If true, video downloads are offered via BitTorrent
*/ */
"video": { "video": {
"torrent": true,
"formats": ["webm", "mp4"], "formats": ["webm", "mp4"],
"previewRatio": 1.3333333333, "previewRatio": 1.3333333333,
//supported resolutions are //supported resolutions are

View file

@ -29,7 +29,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"text": Text shown on mouseover "text": Text shown on mouseover
*/ */
"cantPlay": { "cantPlay": {
"icon": "noCopyright", "icon": "NoCopyright",
"link": "", "link": "",
"text": "" "text": ""
}, },
@ -67,7 +67,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"canManageEntities": {"member": true, "staff": true, "admin": true}, "canManageEntities": {"member": true, "staff": true, "admin": true},
"canManageHome": {"staff": true, "admin": true}, "canManageHome": {"staff": true, "admin": true},
"canManagePlacesAndEvents": {"member": true, "staff": true, "admin": true}, "canManagePlacesAndEvents": {"member": true, "staff": true, "admin": true},
"canManageTitlesAndNames": {"member": true, "staff": true, "admin": true}, "canManageTitlesAndNames": {"member": false, "staff": true, "admin": true},
"canManageTranslations": {"admin": true}, "canManageTranslations": {"admin": true},
"canManageUsers": {"staff": true, "admin": true}, "canManageUsers": {"staff": true, "admin": true},
"canPlayClips": {"guest": 1, "member": 1, "staff": 4, "admin": 4}, "canPlayClips": {"guest": 1, "member": 1, "staff": 4, "admin": 4},
@ -102,8 +102,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
], ],
/* /*
"clipLayers" is the ordered list of public layers that will appear as the "clipLayers" is the ordered list of public layers that will appear as the
text of clips (in grid view, below the icon). Excluding a layer from this text of clips (in grid view, below the icon).
list means it will not be included in find annotations.
*/ */
"clipLayers": ["publicnotes", "keywords", "subtitles"], "clipLayers": ["publicnotes", "keywords", "subtitles"],
/* /*
@ -351,11 +350,11 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"type": "enum", "type": "enum",
"columnWidth": 90, "columnWidth": 90,
"format": {"type": "ColorLevel", "args": [ "format": {"type": "ColorLevel", "args": [
["Public", "Out of Copyright", "Under Copyright", "Private"] ["Public", "Restricted", "Private"]
]}, ]},
"sort": true, "sort": true,
"sortOperator": "+", "sortOperator": "+",
"values": ["Public", "Out of Copyright", "Under Copyright", "Private", "Unknown"] "values": ["Public", "Restricted", "Private", "Unknown"]
} }
], ],
/* /*
@ -753,6 +752,13 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"capability": "canSeeMedia", "capability": "canSeeMedia",
"find": true "find": true
}, },
{
"id": "filename",
"title": "Filename",
"type": ["string"],
"capability": "canSeeMedia",
"find": true
},
{ {
"id": "created", "id": "created",
"title": "Date Created", "title": "Date Created",
@ -1159,6 +1165,11 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
"findDocuments": {"conditions": [], "operator": "&"}, "findDocuments": {"conditions": [], "operator": "&"},
"followPlayer": true, "followPlayer": true,
"help": "", "help": "",
"hidden": {
"collections": [],
"edits": [],
"lists": []
},
"icons": "posters", "icons": "posters",
"infoIconSize": 256, "infoIconSize": 256,
"item": "", "item": "",
@ -1267,13 +1278,11 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
corner of the screen corner of the screen
"resolutions": List of video resolutions. Supported values are 96, 144, "resolutions": List of video resolutions. Supported values are 96, 144,
240, 288, 360, 432, 480, 720 and 1080. 240, 288, 360, 432, 480, 720 and 1080.
"torrent": If true, video downloads are offered via BitTorrent
*/ */
"video": { "video": {
"downloadFormat": "webm", "downloadFormat": "webm",
"formats": ["webm", "mp4"], "formats": ["webm", "mp4"],
"previewRatio": 1.3333333333, "previewRatio": 1.3333333333,
"resolutions": [240, 480], "resolutions": [240, 480]
"torrent": false
} }
} }

7
pandora/document/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class DocumentConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'document'

View file

@ -1,14 +1,37 @@
import logging
import os
import subprocess import subprocess
import tempfile
from django.conf import settings from django.conf import settings
def extract_text(pdf): logger = logging.getLogger('pandora.' + __name__)
cmd = ['pdftotext', pdf, '-']
def extract_text(pdf, page=None):
if page is not None:
page = str(page)
cmd = ['pdftotext', '-f', page, '-l', page, pdf, '-']
else:
cmd = ['pdftotext', pdf, '-']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate() stdout, stderr = p.communicate()
stdout = stdout.decode() stdout = stdout.decode().strip()
return stdout.strip() if not stdout:
if page:
# split page from pdf and ocr
fd, page_pdf = tempfile.mkstemp('.pdf')
cmd = ['pdfseparate', '-f', page, '-l', page, pdf, page_pdf]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
text = ocr_image(page_pdf)
os.unlink(page_pdf)
os.close(fd)
return text
else:
return ocr_image(pdf)
return stdout
def ocr_image(path): def ocr_image(path):
cmd = ['tesseract', path, '-', 'txt'] cmd = ['tesseract', path, '-', 'txt']
@ -43,9 +66,11 @@ class FulltextMixin:
if self.has_fulltext_key(): if self.has_fulltext_key():
from elasticsearch.exceptions import NotFoundError from elasticsearch.exceptions import NotFoundError
try: try:
res = self.elasticsearch().delete(index=self._ES_INDEX, doc_type='document', id=self.id) res = self.elasticsearch().delete(index=self._ES_INDEX, id=self.id)
except NotFoundError: except NotFoundError:
pass pass
except:
logger.error('failed to delete fulltext document', exc_info=True)
def update_fulltext(self): def update_fulltext(self):
if self.has_fulltext_key(): if self.has_fulltext_key():
@ -54,7 +79,7 @@ class FulltextMixin:
doc = { doc = {
'text': text.lower() 'text': text.lower()
} }
res = self.elasticsearch().index(index=self._ES_INDEX, doc_type='document', id=self.id, body=doc) res = self.elasticsearch().index(index=self._ES_INDEX, id=self.id, body=doc)
@classmethod @classmethod
def find_fulltext(cls, query): def find_fulltext(cls, query):
@ -95,3 +120,69 @@ class FulltextMixin:
ids += [int(r['_id']) for r in res['hits']['hits']] ids += [int(r['_id']) for r in res['hits']['hits']]
from_ += len(res['hits']['hits']) from_ += len(res['hits']['hits'])
return ids return ids
def highlight_page(self, page, query, size):
import pypdfium2 as pdfium
from PIL import Image
from PIL import ImageDraw
pdfpath = self.file.path
pagenumber = int(page) - 1
jpg = tempfile.NamedTemporaryFile(suffix='.jpg')
output = jpg.name
TINT_COLOR = (255, 255, 0)
TRANSPARENCY = .45
OPACITY = int(255 * TRANSPARENCY)
scale = 150/72
pdf = pdfium.PdfDocument(pdfpath)
page = pdf[pagenumber]
bitmap = page.render(scale=scale, rotation=0)
img = bitmap.to_pil().convert('RGBA')
overlay = Image.new('RGBA', img.size, TINT_COLOR+(0,))
draw = ImageDraw.Draw(overlay)
textpage = page.get_textpage()
search = textpage.search(query)
result = search.get_next()
while result:
pos, steps = result
steps += 1
while steps:
box = textpage.get_charbox(pos)
box = [b*scale for b in box]
tl = (box[0], img.size[1] - box[3])
br = (box[2], img.size[1] - box[1])
draw.rectangle((tl, br), fill=TINT_COLOR+(OPACITY,))
pos += 1
steps -= 1
result = search.get_next()
img = Image.alpha_composite(img, overlay)
img = img.convert("RGB")
aspect = img.size[0] / img.size[1]
resize_method = Image.LANCZOS
if img.size[0] >= img.size[1]:
width = size
height = int(size / aspect)
else:
width = int(size / aspect)
height = size
img = img.resize((width, height), resize_method)
img.save(output, quality=72)
return jpg
class FulltextPageMixin(FulltextMixin):
_ES_INDEX = "document-page-index"
def extract_fulltext(self):
if self.document.file:
if self.document.extension == 'pdf':
return extract_text(self.document.file.path, self.page)
elif self.extension in ('png', 'jpg'):
return ocr_image(self.document.file.path)
elif self.extension == 'html':
# FIXME: is there a nice way to split that into pages
return self.data.get('text', '')
return ''

View file

@ -5,7 +5,6 @@ from django.db import connection, transaction
from django.db.models import fields from django.db.models import fields
from django.conf import settings from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch import app.monkey_patch
from ... import models from ... import models

View file

@ -5,7 +5,6 @@ from django.db import connection, transaction
from django.db.models import fields from django.db.models import fields
from django.conf import settings from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch import app.monkey_patch

View file

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from datetime import datetime
import unicodedata import unicodedata
from django.db.models import Q, Manager from django.db.models import Q, Manager
@ -14,6 +15,7 @@ from documentcollection.models import Collection
from item import utils from item import utils
from user.models import Group from user.models import Group
from .pages import PageManager
keymap = { keymap = {
'item': 'items__public_id', 'item': 'items__public_id',
@ -61,7 +63,7 @@ def parseCondition(condition, user, item=None, owner=None):
def buildCondition(k, op, v, user, exclude=False, owner=None): def buildCondition(k, op, v, user, exclude=False, owner=None):
import entity.models import entity.models
from . import models from .. import models
# fixme: frontend should never call with list # fixme: frontend should never call with list
if k == 'list': if k == 'list':
@ -297,5 +299,8 @@ class DocumentManager(Manager):
q |= Q(groups__in=user.groups.all()) q |= Q(groups__in=user.groups.all())
rendered_q |= Q(groups__in=user.groups.all()) rendered_q |= Q(groups__in=user.groups.all())
qs = qs.filter(q) qs = qs.filter(q)
max_level = len(settings.CONFIG['documentRightsLevels'])
qs = qs.filter(rightslevel__lte=max_level)
return qs return qs

View file

@ -0,0 +1,302 @@
# -*- coding: utf-8 -*-
from datetime import datetime
import unicodedata
from six import string_types
from django.db.models import Q, Manager
from django.conf import settings
import ox
from oxdjango.query import QuerySet
import entity.managers
from oxdjango.managers import get_operator
from documentcollection.models import Collection
from item import utils
from user.models import Group
keymap = {
'item': 'items__public_id',
}
default_key = 'title'
def get_key_type(k):
key_type = (utils.get_by_id(settings.CONFIG['documentKeys'], k) or {'type': 'string'}).get('type')
if isinstance(key_type, list):
key_type = key_type[0]
key_type = {
'title': 'string',
'person': 'string',
'text': 'string',
'year': 'string',
'length': 'string',
'layer': 'string',
'list': 'list',
}.get(key_type, key_type)
return key_type
def parseCondition(condition, user, item=None, owner=None):
'''
'''
k = condition.get('key', default_key)
k = keymap.get(k, k)
if not k:
k = default_key
if item and k == 'description':
item_conditions = condition.copy()
item_conditions['key'] = 'items__itemproperties__description'
return parseCondition(condition, user) | parseCondition(item_conditions, user)
v = condition['value']
op = condition.get('operator')
if not op:
op = '='
if op.startswith('!'):
return buildCondition(k, op[1:], v, user, True, owner=owner)
else:
return buildCondition(k, op, v, user, owner=owner)
def buildCondition(k, op, v, user, exclude=False, owner=None):
import entity.models
from .. import models
# fixme: frontend should never call with list
if k == 'list':
print('fixme: frontend should never call with list', k, op, v)
k = 'collection'
key_type = get_key_type(k)
key_config = (utils.get_by_id(settings.CONFIG['documentKeys'], k) or {'type': 'string'})
facet_keys = models.Document.facet_keys
if k == 'document':
k = 'document__id'
if op == '&' and isinstance(v, list):
v = [ox.fromAZ(id_) for id_ in v]
k += get_operator(op)
else:
v = ox.fromAZ(v)
q = Q(**{k: v})
if exclude:
q = ~Q(document__id__in=models.Document.objects.filter(q))
return q
elif k == 'rightslevel':
q = Q(document__rightslevel=v)
if exclude:
q = ~Q(document__rightslevel=v)
return q
elif k == 'groups':
if op == '==' and v == '$my':
if not owner:
owner = user
groups = owner.groups.all()
else:
key = 'name' + get_operator(op)
groups = Group.objects.filter(**{key: v})
if not groups.count():
return Q(id=0)
q = Q(document__groups__in=groups)
if exclude:
q = ~q
return q
elif k in ('oshash', 'items__public_id'):
q = Q(**{k: v})
if exclude:
q = ~Q(id__in=models.Document.objects.filter(q))
return q
elif isinstance(v, bool):
key = k
elif k == 'entity':
entity_key, entity_v = entity.managers.namePredicate(op, v)
key = 'id__in'
v = entity.models.DocumentProperties.objects.filter(**{
'entity__' + entity_key: entity_v
}).values_list('document_id', flat=True)
elif k == 'collection':
q = Q(id=0)
l = v.split(":", 1)
if len(l) >= 2:
lqs = list(Collection.objects.filter(name=l[1], user__username=l[0]))
if len(lqs) == 1 and lqs[0].accessible(user):
l = lqs[0]
if l.query.get('static', False) is False:
data = l.query
q = parseConditions(data.get('conditions', []),
data.get('operator', '&'),
user, owner=l.user)
else:
q = Q(id__in=l.documents.all())
else:
q = Q(id=0)
return q
elif key_config.get('fulltext'):
qs = models.Page.find_fulltext_ids(v)
q = Q(id__in=qs)
if exclude:
q = ~Q(id__in=qs)
return q
elif key_type == 'boolean':
q = Q(**{'find__key': k, 'find__value': v})
if exclude:
q = ~Q(id__in=models.Document.objects.filter(q))
return q
elif key_type == "string":
in_find = True
if in_find:
value_key = 'find__value'
else:
value_key = k
if isinstance(v, string_types):
v = unicodedata.normalize('NFKD', v).lower()
if k in facet_keys:
in_find = False
facet_value = 'facets__value' + get_operator(op, 'istr')
v = models.Document.objects.filter(**{'facets__key': k, facet_value: v})
value_key = 'id__in'
else:
value_key = value_key + get_operator(op)
k = str(k)
value_key = str(value_key)
if k == '*':
q = Q(**{'find__value' + get_operator(op): v}) | \
Q(**{'facets__value' + get_operator(op, 'istr'): v})
elif in_find:
q = Q(**{'find__key': k, value_key: v})
else:
q = Q(**{value_key: v})
if exclude:
q = ~Q(id__in=models.Document.objects.filter(q))
return q
elif key_type == 'date':
def parse_date(d):
while len(d) < 3:
d.append(1)
return datetime(*[int(i) for i in d])
#using sort here since find only contains strings
v = parse_date(v.split('-'))
vk = 'sort__%s%s' % (k, get_operator(op, 'int'))
vk = str(vk)
q = Q(**{vk: v})
if exclude:
q = ~q
return q
else: # integer, float, list, time
#use sort table here
if key_type == 'time':
v = int(utils.parse_time(v))
vk = 'sort__%s%s' % (k, get_operator(op, 'int'))
vk = str(vk)
q = Q(**{vk: v})
if exclude:
q = ~q
return q
key = str(key)
q = Q(**{key: v})
if exclude:
q = ~q
return q
def parseConditions(conditions, operator, user, item=None, owner=None):
'''
conditions: [
{
value: "war"
}
{
key: "year",
value: "1970-1980,
operator: "!="
},
{
key: "country",
value: "f",
operator: "^"
}
],
operator: "&"
'''
conn = []
for condition in conditions:
if 'conditions' in condition:
q = parseConditions(condition['conditions'],
condition.get('operator', '&'), user, item, owner=owner)
if q:
conn.append(q)
pass
else:
conn.append(parseCondition(condition, user, item, owner=owner))
if conn:
q = conn[0]
for c in conn[1:]:
if operator == '|':
q = q | c
else:
q = q & c
return q
return None
class PageManager(Manager):
def get_query_set(self):
return QuerySet(self.model)
def find(self, data, user, item=None):
'''
query: {
conditions: [
{
value: "war"
}
{
key: "year",
value: "1970-1980,
operator: "!="
},
{
key: "country",
value: "f",
operator: "^"
}
],
operator: "&"
}
'''
#join query with operator
qs = self.get_query_set()
query = data.get('query', {})
conditions = parseConditions(query.get('conditions', []),
query.get('operator', '&'),
user, item)
if conditions:
qs = qs.filter(conditions)
qs = qs.distinct()
#anonymous can only see public items
if not user or user.is_anonymous:
level = 'guest'
allowed_level = settings.CONFIG['capabilities']['canSeeDocument'][level]
qs = qs.filter(document__rightslevel__lte=allowed_level)
rendered_q = Q(rendered=True)
#users can see public items, there own items and items of there groups
else:
level = user.profile.get_level()
allowed_level = settings.CONFIG['capabilities']['canSeeDocument'][level]
q = Q(document__rightslevel__lte=allowed_level) | Q(document__user=user)
rendered_q = Q(rendered=True) | Q(document__user=user)
if user.groups.count():
q |= Q(document__groups__in=user.groups.all())
rendered_q |= Q(document__groups__in=user.groups.all())
qs = qs.filter(q)
return qs

View file

@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2020-05-13 00:01
from __future__ import unicode_literals
import django.core.serializers.json
from django.db import migrations, models
import django.db.models.deletion
import document.fulltext
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('document', '0011_jsonfield'),
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('page', models.IntegerField(default=1)),
('data', oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder)),
],
bases=(models.Model, document.fulltext.FulltextPageMixin),
),
migrations.AddField(
model_name='page',
name='document',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pages_set', to='document.Document'),
),
]

View file

@ -0,0 +1,55 @@
# Generated by Django 4.2.3 on 2023-07-27 21:24
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('document', '0012_auto_20200513_0001'),
]
operations = [
migrations.AlterField(
model_name='access',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='document',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='document',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='facet',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='find',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='itemproperties',
name='description',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='itemproperties',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='page',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -6,11 +6,12 @@ import os
import re import re
import unicodedata import unicodedata
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models, transaction from django.db import models, transaction
from django.db.models import Q, Sum, Max from django.db.models import Q, Sum, Max
from django.contrib.auth import get_user_model
from django.db.models.signals import pre_delete from django.db.models.signals import pre_delete
from django.conf import settings from django.utils import datetime_safe
from oxdjango.fields import JSONField from oxdjango.fields import JSONField
from PIL import Image from PIL import Image
@ -21,7 +22,7 @@ from oxdjango.sortmodel import get_sort_field
from person.models import get_name_sort from person.models import get_name_sort
from item.models import Item from item.models import Item
from annotation.models import Annotation from annotation.models import Annotation
from archive.extract import resize_image from archive.extract import resize_image, open_image_rgb
from archive.chunk import save_chunk from archive.chunk import save_chunk
from user.models import Group from user.models import Group
from user.utils import update_groups from user.utils import update_groups
@ -29,7 +30,7 @@ from user.utils import update_groups
from . import managers from . import managers
from . import utils from . import utils
from . import tasks from . import tasks
from .fulltext import FulltextMixin from .fulltext import FulltextMixin, FulltextPageMixin
User = get_user_model() User = get_user_model()
@ -79,7 +80,7 @@ class Document(models.Model, FulltextMixin):
current_values = [] current_values = []
for k in settings.CONFIG['documentKeys']: for k in settings.CONFIG['documentKeys']:
if k.get('sortType') == 'person': if k.get('sortType') == 'person':
current_values += self.get(k['id'], []) current_values += self.get_value(k['id'], [])
if not isinstance(current_values, list): if not isinstance(current_values, list):
if not current_values: if not current_values:
current_values = [] current_values = []
@ -327,6 +328,9 @@ class Document(models.Model, FulltextMixin):
def editable(self, user, item=None): def editable(self, user, item=None):
if not user or user.is_anonymous: if not user or user.is_anonymous:
return False return False
max_level = len(settings.CONFIG['rightsLevels'])
if self.rightslevel > max_level:
return False
if self.user == user or \ if self.user == user or \
self.groups.filter(id__in=user.groups.all()).count() > 0 or \ self.groups.filter(id__in=user.groups.all()).count() > 0 or \
user.is_staff or \ user.is_staff or \
@ -346,6 +350,8 @@ class Document(models.Model, FulltextMixin):
groups = data.pop('groups') groups = data.pop('groups')
update_groups(self, groups) update_groups(self, groups)
for key in data: for key in data:
if key == "id":
continue
k = list(filter(lambda i: i['id'] == key, settings.CONFIG['documentKeys'])) k = list(filter(lambda i: i['id'] == key, settings.CONFIG['documentKeys']))
ktype = k and k[0].get('type') or '' ktype = k and k[0].get('type') or ''
if key == 'text' and self.extension == 'html': if key == 'text' and self.extension == 'html':
@ -546,10 +552,10 @@ class Document(models.Model, FulltextMixin):
if len(crop) == 4: if len(crop) == 4:
path = os.path.join(folder, '%dp%d,%s.jpg' % (1024, page, ','.join(map(str, crop)))) path = os.path.join(folder, '%dp%d,%s.jpg' % (1024, page, ','.join(map(str, crop))))
if not os.path.exists(path): if not os.path.exists(path):
img = Image.open(src).crop(crop) img = open_image_rgb(src).crop(crop)
img.save(path) img.save(path)
else: else:
img = Image.open(path) img = open_image_rgb(path)
src = path src = path
if size < max(img.size): if size < max(img.size):
path = os.path.join(folder, '%dp%d,%s.jpg' % (size, page, ','.join(map(str, crop)))) path = os.path.join(folder, '%dp%d,%s.jpg' % (size, page, ','.join(map(str, crop))))
@ -562,10 +568,10 @@ class Document(models.Model, FulltextMixin):
if len(crop) == 4: if len(crop) == 4:
path = os.path.join(folder, '%s.jpg' % ','.join(map(str, crop))) path = os.path.join(folder, '%s.jpg' % ','.join(map(str, crop)))
if not os.path.exists(path): if not os.path.exists(path):
img = Image.open(src).crop(crop) img = open_image_rgb(src).convert('RGB').crop(crop)
img.save(path) img.save(path)
else: else:
img = Image.open(path) img = open_image_rgb(path)
src = path src = path
if size < max(img.size): if size < max(img.size):
path = os.path.join(folder, '%sp%s.jpg' % (size, ','.join(map(str, crop)))) path = os.path.join(folder, '%sp%s.jpg' % (size, ','.join(map(str, crop))))
@ -574,7 +580,7 @@ class Document(models.Model, FulltextMixin):
if os.path.exists(src) and not os.path.exists(path): if os.path.exists(src) and not os.path.exists(path):
image_size = max(self.width, self.height) image_size = max(self.width, self.height)
if image_size == -1: if image_size == -1:
image_size = max(*Image.open(src).size) image_size = max(*open_image_rgb(src).size)
if size > image_size: if size > image_size:
path = src path = src
else: else:
@ -586,6 +592,11 @@ class Document(models.Model, FulltextMixin):
image = os.path.join(os.path.dirname(pdf), '1024p%d.jpg' % page) image = os.path.join(os.path.dirname(pdf), '1024p%d.jpg' % page)
utils.extract_pdfpage(pdf, image, page) utils.extract_pdfpage(pdf, image, page)
def create_pages(self):
for page in range(self.pages):
page += 1
p, c = Page.objects.get_or_create(document=self, page=page)
def get_info(self): def get_info(self):
if self.extension == 'pdf': if self.extension == 'pdf':
self.thumbnail(1024) self.thumbnail(1024)
@ -595,7 +606,7 @@ class Document(models.Model, FulltextMixin):
self.pages = utils.pdfpages(self.file.path) self.pages = utils.pdfpages(self.file.path)
elif self.width == -1: elif self.width == -1:
self.pages = -1 self.pages = -1
self.width, self.height = Image.open(self.file.path).size self.width, self.height = open_image_rgb(self.file.path).size
def get_ratio(self): def get_ratio(self):
if self.extension == 'pdf': if self.extension == 'pdf':
@ -702,6 +713,41 @@ class ItemProperties(models.Model):
super(ItemProperties, self).save(*args, **kwargs) super(ItemProperties, self).save(*args, **kwargs)
class Page(models.Model, FulltextPageMixin):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
document = models.ForeignKey(Document, related_name='pages_set', on_delete=models.CASCADE)
page = models.IntegerField(default=1)
data = JSONField(default=dict, editable=False)
objects = managers.PageManager()
def __str__(self):
return u"%s:%s" % (self.document, self.page)
def json(self, keys=None, user=None):
data = {}
data['document'] = ox.toAZ(self.document.id)
data['page'] = self.page
data['id'] = '{document}/{page}'.format(**data)
document_keys = []
if keys:
for key in list(data):
if key not in keys:
del data[key]
for key in keys:
if 'fulltext' in key:
data['fulltext'] = self.extract_fulltext()
elif key in ('document', 'page', 'id'):
pass
else:
document_keys.append(key)
if document_keys:
data.update(self.document.json(document_keys, user))
return data
class Access(models.Model): class Access(models.Model):
class Meta: class Meta:
unique_together = ("document", "user") unique_together = ("document", "user")

View file

@ -0,0 +1,135 @@
# -*- coding: utf-8 -*-
import os
import re
from glob import glob
import unicodedata
import ox
from ox.utils import json
from oxdjango.api import actions
from oxdjango.decorators import login_required_json
from oxdjango.http import HttpFileResponse
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response, HttpErrorJson
from django import forms
from django.db.models import Count, Sum
from django.conf import settings
from item import utils
from item.models import Item
from itemlist.models import List
from entity.models import Entity
from archive.chunk import process_chunk
from changelog.models import add_changelog
from . import models
from . import tasks
def parse_query(data, user):
query = {}
query['range'] = [0, 100]
query['sort'] = [{'key': 'page', 'operator': '+'}, {'key': 'title', 'operator': '+'}]
for key in ('keys', 'group', 'file', 'range', 'position', 'positions', 'sort'):
if key in data:
query[key] = data[key]
query['qs'] = models.Page.objects.find(data, user)
return query
def _order_query(qs, sort):
prefix = 'document__sort__'
order_by = []
for e in sort:
operator = e['operator']
if operator != '-':
operator = ''
key = {
'index': 'document__items__itemproperties__index',
'position': 'id',
'name': 'title',
}.get(e['key'], e['key'])
if key == 'resolution':
order_by.append('%swidth' % operator)
order_by.append('%sheight' % operator)
else:
if '__' not in key and key not in ('created', 'modified', 'page'):
key = "%s%s" % (prefix, key)
order = '%s%s' % (operator, key)
order_by.append(order)
if order_by:
qs = qs.order_by(*order_by, nulls_last=True)
qs = qs.distinct()
return qs
def _order_by_group(query):
prefix = 'document__sort__'
if 'sort' in query:
op = '-' if query['sort'][0]['operator'] == '-' else ''
if len(query['sort']) == 1 and query['sort'][0]['key'] == 'items':
order_by = op + prefix + 'items'
if query['group'] == "year":
secondary = op + prefix + 'sortvalue'
order_by = (order_by, secondary)
elif query['group'] != "keyword":
order_by = (order_by, prefix + 'sortvalue')
else:
order_by = (order_by, 'value')
else:
order_by = op + prefix + 'sortvalue'
order_by = (order_by, prefix + 'items')
else:
order_by = ('-' + prefix + 'sortvalue', prefix + 'items')
return order_by
def findPages(request, data):
'''
Finds documents pages for a given query
takes {
query: object, // query object, see `find`
sort: [object], // list of sort objects, see `find`
range: [int, int], // range of results, per current sort order
keys: [string] // list of keys to return
}
returns {
items: [{ // list of pages
id: string
page: int
}]
}
'''
query = parse_query(data, request.user)
#order
qs = _order_query(query['qs'], query['sort'])
response = json_response()
if 'group' in query:
response['data']['items'] = []
items = 'items'
document_qs = query['qs']
order_by = _order_by_group(query)
qs = models.Facet.objects.filter(key=query['group']).filter(document__id__in=document_qs)
qs = qs.values('value').annotate(items=Count('id')).order_by(*order_by)
if 'positions' in query:
response['data']['positions'] = {}
ids = [j['value'] for j in qs]
response['data']['positions'] = utils.get_positions(ids, query['positions'])
elif 'range' in data:
qs = qs[query['range'][0]:query['range'][1]]
response['data']['items'] = [{'name': i['value'], 'items': i[items]} for i in qs]
else:
response['data']['items'] = qs.count()
elif 'keys' in data:
qs = qs[query['range'][0]:query['range'][1]]
response['data']['items'] = [l.json(data['keys'], request.user) for l in qs]
elif 'position' in data:
#FIXME: actually implement position requests
response['data']['position'] = 0
elif 'positions' in data:
ids = list(qs.values_list('id', flat=True))
response['data']['positions'] = utils.get_positions(ids, query['positions'], decode_id=True)
else:
response['data']['items'] = qs.count()
return render_to_json_response(response)
actions.register(findPages)

View file

@ -1,8 +1,30 @@
# -*- coding: utf-8 -*- import ox
from celery.task import task from app.celery import app
@task(queue="encoding") @app.task(queue="encoding")
def extract_fulltext(id): def extract_fulltext(id):
from . import models from . import models
d = models.Document.objects.get(id=id) d = models.Document.objects.get(id=id)
d.update_fulltext() d.update_fulltext()
d.create_pages()
for page in d.pages_set.all():
page.update_fulltext()
@app.task(queue='default')
def bulk_edit(data, username):
from django.db import transaction
from . import models
from item.models import Item
user = models.User.objects.get(username=username)
item = 'item' in data and Item.objects.get(public_id=data['item']) or None
ids = data['id']
del data['id']
documents = models.Document.objects.filter(pk__in=map(ox.fromAZ, ids))
for document in documents:
if document.editable(user, item):
with transaction.atomic():
document.refresh_from_db()
document.edit(data, user, item)
document.save()
return {}

View file

@ -12,8 +12,10 @@ from oxdjango.decorators import login_required_json
from oxdjango.http import HttpFileResponse from oxdjango.http import HttpFileResponse
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response, HttpErrorJson from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response, HttpErrorJson
from django import forms from django import forms
from django.db.models import Count, Sum
from django.conf import settings from django.conf import settings
from django.db.models import Count, Sum
from django.http import HttpResponse
from django.shortcuts import render
from item import utils from item import utils
from item.models import Item from item.models import Item
@ -23,6 +25,8 @@ from archive.chunk import process_chunk
from changelog.models import add_changelog from changelog.models import add_changelog
from . import models from . import models
from . import tasks
from . import page_views
def get_document_or_404_json(request, id): def get_document_or_404_json(request, id):
response = {'status': {'code': 404, response = {'status': {'code': 404,
@ -131,13 +135,13 @@ def editDocument(request, data):
item = 'item' in data and Item.objects.get(public_id=data['item']) or None item = 'item' in data and Item.objects.get(public_id=data['item']) or None
if data['id']: if data['id']:
if isinstance(data['id'], list): if isinstance(data['id'], list):
documents = models.Document.objects.filter(pk__in=map(ox.fromAZ, data['id'])) add_changelog(request, data)
t = tasks.bulk_edit.delay(data, request.user.username)
response['data']['taskId'] = t.task_id
else: else:
documents = [models.Document.get(data['id'])] document = models.Document.get(data['id'])
for document in documents:
if document.editable(request.user, item): if document.editable(request.user, item):
if document == documents[0]: add_changelog(request, data)
add_changelog(request, data)
document.edit(data, request.user, item) document.edit(data, request.user, item)
document.save() document.save()
response['data'] = document.json(user=request.user, item=item) response['data'] = document.json(user=request.user, item=item)
@ -379,8 +383,12 @@ def file(request, id, name=None):
def thumbnail(request, id, size=256, page=None): def thumbnail(request, id, size=256, page=None):
size = int(size) size = int(size)
document = get_document_or_404_json(request, id) document = get_document_or_404_json(request, id)
if "q" in request.GET and page:
img = document.highlight_page(page, request.GET["q"], size)
return HttpResponse(img, content_type="image/jpeg")
return HttpFileResponse(document.thumbnail(size, page=page)) return HttpFileResponse(document.thumbnail(size, page=page))
@login_required_json @login_required_json
def upload(request): def upload(request):
if 'id' in request.GET: if 'id' in request.GET:
@ -505,3 +513,37 @@ def autocompleteDocuments(request, data):
response['data']['items'] = [i['value'] for i in qs] response['data']['items'] = [i['value'] for i in qs]
return render_to_json_response(response) return render_to_json_response(response)
actions.register(autocompleteDocuments) actions.register(autocompleteDocuments)
def document(request, fragment):
context = {}
parts = fragment.split('/')
# FIXME: parse collection urls and return the right metadata for those
id = parts[0]
page = None
crop = None
if len(parts) == 2:
rect = parts[1].split(',')
if len(rect) == 1:
page = rect[0]
else:
crop = rect
try:
document = models.Document.objects.filter(id=ox.fromAZ(id)).first()
except:
document = None
if document and document.access(request.user):
context['title'] = document.data['title']
if document.data.get('description'):
context['description'] = document.data['description']
link = request.build_absolute_uri(document.get_absolute_url())
public_id = ox.toAZ(document.id)
preview = '/documents/%s/512p.jpg' % public_id
if page:
preview = '/documents/%s/512p%s.jpg' % (public_id, page)
if crop:
preview = '/documents/%s/512p%s.jpg' % (public_id, ','.join(crop))
context['preview'] = request.build_absolute_uri(preview)
context['url'] = request.build_absolute_uri('/documents/' + fragment)
context['settings'] = settings
return render(request, "document.html", context)

View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class DocumentcollectionConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'documentcollection'

View file

@ -0,0 +1,61 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import documentcollection.models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('documentcollection', '0004_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='collection',
name='description',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='collection',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='collection',
name='poster_frames',
field=oxdjango.fields.JSONField(default=list, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='collection',
name='query',
field=oxdjango.fields.JSONField(default=documentcollection.models.default_query, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='collection',
name='sort',
field=oxdjango.fields.JSONField(default=documentcollection.models.get_collectionsort, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='collection',
name='status',
field=models.CharField(default='private', max_length=20),
),
migrations.AlterField(
model_name='collection',
name='type',
field=models.CharField(default='static', max_length=255),
),
migrations.AlterField(
model_name='collectiondocument',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='position',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -34,6 +34,9 @@ def get_collectionview():
def get_collectionsort(): def get_collectionsort():
return tuple(settings.CONFIG['user']['ui']['collectionSort']) return tuple(settings.CONFIG['user']['ui']['collectionSort'])
def default_query():
return {"static": True}
class Collection(models.Model): class Collection(models.Model):
class Meta: class Meta:
@ -46,7 +49,7 @@ class Collection(models.Model):
name = models.CharField(max_length=255) name = models.CharField(max_length=255)
status = models.CharField(max_length=20, default='private') status = models.CharField(max_length=20, default='private')
_status = ['private', 'public', 'featured'] _status = ['private', 'public', 'featured']
query = JSONField(default=lambda: {"static": True}, editable=False) query = JSONField(default=default_query, editable=False)
type = models.CharField(max_length=255, default='static') type = models.CharField(max_length=255, default='static')
description = models.TextField(default='') description = models.TextField(default='')

View file

@ -86,6 +86,11 @@ def findCollections(request, data):
for x in data.get('query', {}).get('conditions', []) for x in data.get('query', {}).get('conditions', [])
) )
is_personal = request.user.is_authenticated and any(
(x['key'] == 'user' and x['value'] == request.user.username and x['operator'] == '==')
for x in data.get('query', {}).get('conditions', [])
)
if is_section_request: if is_section_request:
qs = query['qs'] qs = query['qs']
if not is_featured and not request.user.is_anonymous: if not is_featured and not request.user.is_anonymous:
@ -94,6 +99,9 @@ def findCollections(request, data):
else: else:
qs = _order_query(query['qs'], query['sort']) qs = _order_query(query['qs'], query['sort'])
if is_personal and request.user.profile.ui.get('hidden', {}).get('collections'):
qs = qs.exclude(name__in=request.user.profile.ui['hidden']['collections'])
response = json_response() response = json_response()
if 'keys' in data: if 'keys' in data:
qs = qs[query['range'][0]:query['range'][1]] qs = qs[query['range'][0]:query['range'][1]]
@ -238,7 +246,7 @@ def addCollection(request, data):
'type' and 'view'. 'type' and 'view'.
see: editCollection, findCollections, getCollection, removeCollection, sortCollections see: editCollection, findCollections, getCollection, removeCollection, sortCollections
''' '''
data['name'] = re.sub(' \[\d+\]$', '', data.get('name', 'Untitled')).strip() data['name'] = re.sub(r' \[\d+\]$', '', data.get('name', 'Untitled')).strip()
name = data['name'] name = data['name']
if not name: if not name:
name = "Untitled" name = "Untitled"

7
pandora/edit/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class EditConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'edit'

View file

@ -0,0 +1,41 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import edit.models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('edit', '0005_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='clip',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='edit',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='edit',
name='poster_frames',
field=oxdjango.fields.JSONField(default=list, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='edit',
name='query',
field=oxdjango.fields.JSONField(default=edit.models.default_query, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='position',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -13,6 +13,7 @@ from django.conf import settings
from django.db import models, transaction from django.db import models, transaction
from django.db.models import Max from django.db.models import Max
from django.contrib.auth import get_user_model from django.contrib.auth import get_user_model
from django.core.cache import cache
from oxdjango.fields import JSONField from oxdjango.fields import JSONField
@ -24,6 +25,7 @@ import clip.models
from archive import extract from archive import extract
from user.utils import update_groups from user.utils import update_groups
from user.models import Group from user.models import Group
from clip.utils import add_cuts
from . import managers from . import managers
@ -33,6 +35,9 @@ User = get_user_model()
def get_path(f, x): return f.path(x) def get_path(f, x): return f.path(x)
def get_icon_path(f, x): return get_path(f, 'icon.jpg') def get_icon_path(f, x): return get_path(f, 'icon.jpg')
def default_query():
return {"static": True}
class Edit(models.Model): class Edit(models.Model):
class Meta: class Meta:
@ -51,7 +56,7 @@ class Edit(models.Model):
description = models.TextField(default='') description = models.TextField(default='')
rightslevel = models.IntegerField(db_index=True, default=0) rightslevel = models.IntegerField(db_index=True, default=0)
query = JSONField(default=lambda: {"static": True}, editable=False) query = JSONField(default=default_query, editable=False)
type = models.CharField(max_length=255, default='static') type = models.CharField(max_length=255, default='static')
icon = models.ImageField(default=None, blank=True, null=True, upload_to=get_icon_path) icon = models.ImageField(default=None, blank=True, null=True, upload_to=get_icon_path)
@ -93,6 +98,8 @@ class Edit(models.Model):
# dont add clip if in/out are invalid # dont add clip if in/out are invalid
if not c.annotation: if not c.annotation:
duration = c.item.sort.duration duration = c.item.sort.duration
if c.start is None or c.end is None:
return False
if c.start > c.end \ if c.start > c.end \
or round(c.start, 3) >= round(duration, 3) \ or round(c.start, 3) >= round(duration, 3) \
or round(c.end, 3) > round(duration, 3): or round(c.end, 3) > round(duration, 3):
@ -507,7 +514,7 @@ class Clip(models.Model):
if value: if value:
data[key] = value data[key] = value
data['duration'] = data['out'] - data['in'] data['duration'] = data['out'] - data['in']
data['cuts'] = tuple([c for c in self.item.get('cuts', []) if c > self.start and c < self.end]) add_cuts(data, self.item, self.start, self.end)
data['layers'] = self.get_layers(user) data['layers'] = self.get_layers(user)
data['streams'] = [s.file.oshash for s in self.item.streams()] data['streams'] = [s.file.oshash for s in self.item.streams()]
return data return data

View file

@ -3,14 +3,16 @@
import os import os
import re import re
import ox from oxdjango.api import actions
from oxdjango.decorators import login_required_json from oxdjango.decorators import login_required_json
from oxdjango.http import HttpFileResponse
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
import ox
from django.conf import settings
from django.db import transaction from django.db import transaction
from django.db.models import Max from django.db.models import Max
from oxdjango.http import HttpFileResponse from django.db.models import Sum
from oxdjango.api import actions
from django.conf import settings
from item import utils from item import utils
from changelog.models import add_changelog from changelog.models import add_changelog
@ -190,7 +192,7 @@ def _order_clips(edit, sort):
'in': 'start', 'in': 'start',
'out': 'end', 'out': 'end',
'text': 'sortvalue', 'text': 'sortvalue',
'volume': 'sortvolume', 'volume': 'volume' if edit.type == 'smart' else 'sortvolume',
'item__sort__item': 'item__sort__public_id', 'item__sort__item': 'item__sort__public_id',
}.get(key, key) }.get(key, key)
order = '%s%s' % (operator, key) order = '%s%s' % (operator, key)
@ -260,7 +262,7 @@ def addEdit(request, data):
} }
see: editEdit, findEdit, getEdit, removeEdit, sortEdits see: editEdit, findEdit, getEdit, removeEdit, sortEdits
''' '''
data['name'] = re.sub(' \[\d+\]$', '', data.get('name', 'Untitled')).strip() data['name'] = re.sub(r' \[\d+\]$', '', data.get('name', 'Untitled')).strip()
name = data['name'] name = data['name']
if not name: if not name:
name = "Untitled" name = "Untitled"
@ -412,6 +414,11 @@ def findEdits(request, data):
is_featured = any(filter(is_featured_condition, data.get('query', {}).get('conditions', []))) is_featured = any(filter(is_featured_condition, data.get('query', {}).get('conditions', [])))
is_personal = request.user.is_authenticated and any(
(x['key'] == 'user' and x['value'] == request.user.username and x['operator'] == '==')
for x in data.get('query', {}).get('conditions', [])
)
if is_section_request: if is_section_request:
qs = query['qs'] qs = query['qs']
if not is_featured and not request.user.is_anonymous: if not is_featured and not request.user.is_anonymous:
@ -420,6 +427,9 @@ def findEdits(request, data):
else: else:
qs = _order_query(query['qs'], query['sort']) qs = _order_query(query['qs'], query['sort'])
if is_personal and request.user.profile.ui.get('hidden', {}).get('edits'):
qs = qs.exclude(name__in=request.user.profile.ui['hidden']['edits'])
response = json_response() response = json_response()
if 'keys' in data: if 'keys' in data:
qs = qs[query['range'][0]:query['range'][1]] qs = qs[query['range'][0]:query['range'][1]]

3
pandora/encoding.conf.in Normal file
View file

@ -0,0 +1,3 @@
LOGLEVEL=info
MAX_TASKS_PER_CHILD=500
CONCURRENCY=1

6
pandora/entity/apps.py Normal file
View file

@ -0,0 +1,6 @@
from django.apps import AppConfig
class EntityConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'entity'

View file

@ -0,0 +1,50 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('entity', '0006_auto_20180918_0903'),
]
operations = [
migrations.AlterField(
model_name='documentproperties',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='documentproperties',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='entity',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='entity',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='entity',
name='name_find',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='find',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='link',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

6
pandora/event/apps.py Normal file
View file

@ -0,0 +1,6 @@
from django.apps import AppConfig
class EventConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'event'

View file

@ -0,0 +1,43 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('event', '0003_auto_20160304_1644'),
]
operations = [
migrations.AlterField(
model_name='event',
name='duration',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='event',
name='end',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='event',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='event',
name='name_find',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='event',
name='start',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='event',
name='type',
field=models.CharField(default='', max_length=255),
),
]

View file

@ -1,20 +1,26 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from celery.task import task from app.celery import app
from .models import Event from .models import Event
''' '''
@periodic_task(run_every=crontab(hour=7, minute=30), queue='encoding') from celery.schedules import crontab
@app.task(ignore_results=True, queue='encoding')
def update_all_matches(**kwargs): def update_all_matches(**kwargs):
ids = [e['id'] for e in Event.objects.all().values('id')] ids = [e['id'] for e in Event.objects.all().values('id')]
for i in ids: for i in ids:
e = Event.objects.get(pk=i) e = Event.objects.get(pk=i)
e.update_matches() e.update_matches()
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(crontab(hour=7, minute=30), update_all_matches.s())
''' '''
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_matches(eventId): def update_matches(eventId):
event = Event.objects.get(pk=eventId) event = Event.objects.get(pk=eventId)
event.update_matches() event.update_matches()

View file

@ -2,4 +2,5 @@ from django.apps import AppConfig
class HomeConfig(AppConfig): class HomeConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'home' name = 'home'

View file

@ -0,0 +1,30 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0002_jsonfield'),
]
operations = [
migrations.AlterField(
model_name='item',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='item',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='item',
name='index',
field=models.IntegerField(default=-1),
),
]

6
pandora/item/apps.py Normal file
View file

@ -0,0 +1,6 @@
from django.apps import AppConfig
class ItemConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'item'

View file

@ -4,7 +4,6 @@ from django.core.management.base import BaseCommand
from django.conf import settings from django.conf import settings
from django.db import transaction from django.db import transaction
settings.RELOAD_CONFIG = False
import app.monkey_patch import app.monkey_patch
from ... import models from ... import models

View file

@ -6,7 +6,6 @@ from django.db import connection, transaction
from django.db.models import fields from django.db.models import fields
from django.conf import settings from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch import app.monkey_patch
from ... import models from ... import models
import clip.models import clip.models

View file

@ -5,7 +5,6 @@ from django.db import connection, transaction
from django.db.models import fields from django.db.models import fields
from django.conf import settings from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch import app.monkey_patch
from ... import models from ... import models
import clip.models import clip.models

View file

@ -5,7 +5,6 @@ from django.db import connection, transaction
from django.db.models import fields from django.db.models import fields
from django.conf import settings from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch import app.monkey_patch
from ... import models from ... import models
import clip.models import clip.models

View file

@ -0,0 +1,29 @@
# -*- coding: utf-8 -*-
import os
from glob import glob
from django.core.management.base import BaseCommand
import app.monkey_patch
from ... import models
from ... import tasks
class Command(BaseCommand):
"""
rebuild posters for all items.
"""
help = 'rebuild all posters for all items.'
args = ''
def handle(self, **options):
offset = 0
chunk = 100
count = models.Item.objects.count()
while offset <= count:
for i in models.Item.objects.all().order_by('id')[offset:offset+chunk]:
print(i)
if i.poster:
i.poster.delete()
i.make_poster()
offset += chunk

View file

@ -6,7 +6,6 @@ from django.db import connection, transaction
from django.db.models import fields from django.db.models import fields
from django.conf import settings from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch import app.monkey_patch
from ... import models from ... import models

View file

@ -5,7 +5,6 @@ from django.core.management.base import BaseCommand
from django.db import connection, transaction from django.db import connection, transaction
from django.conf import settings from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch import app.monkey_patch
from ... import models from ... import models

View file

@ -5,7 +5,6 @@ from django.db import connection, transaction
from django.db.models import fields from django.db.models import fields
from django.conf import settings from django.conf import settings
settings.RELOAD_CONFIG = False
import app.monkey_patch import app.monkey_patch
from ... import models from ... import models
import clip.models import clip.models

View file

@ -33,7 +33,7 @@ def parseCondition(condition, user, owner=None):
k = {'id': 'public_id'}.get(k, k) k = {'id': 'public_id'}.get(k, k)
if not k: if not k:
k = '*' k = '*'
v = condition['value'] v = condition.get('value', '')
op = condition.get('operator') op = condition.get('operator')
if not op: if not op:
op = '=' op = '='
@ -62,6 +62,9 @@ def parseCondition(condition, user, owner=None):
if k == 'list': if k == 'list':
key_type = '' key_type = ''
if k in ('width', 'height'):
key_type = 'integer'
if k == 'groups': if k == 'groups':
if op == '==' and v == '$my': if op == '==' and v == '$my':
if not owner: if not owner:
@ -86,8 +89,11 @@ def parseCondition(condition, user, owner=None):
elif k == 'rendered': elif k == 'rendered':
return Q(rendered=v) return Q(rendered=v)
elif k == 'resolution': elif k == 'resolution':
q = parseCondition({'key': 'width', 'value': v[0], 'operator': op}, user) \ if isinstance(v, list) and len(v) == 2:
& parseCondition({'key': 'height', 'value': v[1], 'operator': op}, user) q = parseCondition({'key': 'width', 'value': v[0], 'operator': op}, user) \
& parseCondition({'key': 'height', 'value': v[1], 'operator': op}, user)
else:
q = Q(id=0)
if exclude: if exclude:
q = ~q q = ~q
return q return q
@ -318,6 +324,8 @@ class ItemManager(Manager):
q |= Q(groups__in=user.groups.all()) q |= Q(groups__in=user.groups.all())
rendered_q |= Q(groups__in=user.groups.all()) rendered_q |= Q(groups__in=user.groups.all())
qs = qs.filter(q) qs = qs.filter(q)
max_level = len(settings.CONFIG['rightsLevels'])
qs = qs.filter(level__lte=max_level)
if settings.CONFIG.get('itemRequiresVideo') and level != 'admin': if settings.CONFIG.get('itemRequiresVideo') and level != 'admin':
qs = qs.filter(rendered_q) qs = qs.filter(rendered_q)
return qs return qs

View file

@ -71,7 +71,7 @@ class Migration(migrations.Migration):
('poster_width', models.IntegerField(default=0)), ('poster_width', models.IntegerField(default=0)),
('poster_frame', models.FloatField(default=-1)), ('poster_frame', models.FloatField(default=-1)),
('icon', models.ImageField(blank=True, default=None, upload_to=item.models.get_icon_path)), ('icon', models.ImageField(blank=True, default=None, upload_to=item.models.get_icon_path)),
('torrent', models.FileField(blank=True, default=None, max_length=1000, upload_to=item.models.get_torrent_path)), ('torrent', models.FileField(blank=True, default=None, max_length=1000)),
('stream_info', oxdjango.fields.DictField(default={}, editable=False)), ('stream_info', oxdjango.fields.DictField(default={}, editable=False)),
('stream_aspect', models.FloatField(default=1.3333333333333333)), ('stream_aspect', models.FloatField(default=1.3333333333333333)),
], ],

View file

@ -0,0 +1,19 @@
# Generated by Django 3.0.10 on 2023-07-10 08:52
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('item', '0004_json_cache'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='torrent',
),
]

View file

@ -0,0 +1,65 @@
# Generated by Django 4.2.3 on 2023-07-27 21:28
import django.core.serializers.json
from django.db import migrations, models
import oxdjango.fields
class Migration(migrations.Migration):
dependencies = [
('item', '0005_auto_20230710_0852'),
]
operations = [
migrations.AlterField(
model_name='access',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='annotationsequence',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='description',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='facet',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='item',
name='cache',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='item',
name='data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='item',
name='external_data',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='item',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='item',
name='stream_info',
field=oxdjango.fields.JSONField(default=dict, editable=False, encoder=django.core.serializers.json.DjangoJSONEncoder),
),
migrations.AlterField(
model_name='itemfind',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import json import json
import logging
import os import os
import re import re
import shutil import shutil
@ -42,6 +43,7 @@ from user.utils import update_groups
from user.models import Group from user.models import Group
import archive.models import archive.models
logger = logging.getLogger('pandora.' + __name__)
User = get_user_model() User = get_user_model()
@ -155,9 +157,6 @@ def get_icon_path(f, x):
def get_poster_path(f, x): def get_poster_path(f, x):
return get_path(f, 'poster.jpg') return get_path(f, 'poster.jpg')
def get_torrent_path(f, x):
return get_path(f, 'torrent.torrent')
class Item(models.Model): class Item(models.Model):
created = models.DateTimeField(auto_now_add=True) created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True) modified = models.DateTimeField(auto_now=True)
@ -183,7 +182,6 @@ class Item(models.Model):
icon = models.ImageField(default=None, blank=True, upload_to=get_icon_path) icon = models.ImageField(default=None, blank=True, upload_to=get_icon_path)
torrent = models.FileField(default=None, blank=True, max_length=1000, upload_to=get_torrent_path)
stream_info = JSONField(default=dict, editable=False) stream_info = JSONField(default=dict, editable=False)
# stream related fields # stream related fields
@ -231,6 +229,9 @@ class Item(models.Model):
def editable(self, user): def editable(self, user):
if user.is_anonymous: if user.is_anonymous:
return False return False
max_level = len(settings.CONFIG['rightsLevels'])
if self.level > max_level:
return False
if user.profile.capability('canEditMetadata') or \ if user.profile.capability('canEditMetadata') or \
user.is_staff or \ user.is_staff or \
self.user == user or \ self.user == user or \
@ -238,7 +239,7 @@ class Item(models.Model):
return True return True
return False return False
def edit(self, data): def edit(self, data, is_task=False):
data = data.copy() data = data.copy()
# FIXME: how to map the keys to the right place to write them to? # FIXME: how to map the keys to the right place to write them to?
if 'id' in data: if 'id' in data:
@ -255,11 +256,12 @@ class Item(models.Model):
description = data.pop(key) description = data.pop(key)
if isinstance(description, dict): if isinstance(description, dict):
for value in description: for value in description:
value = ox.sanitize_html(value)
d, created = Description.objects.get_or_create(key=k, value=value) d, created = Description.objects.get_or_create(key=k, value=value)
d.description = ox.sanitize_html(description[value]) d.description = ox.sanitize_html(description[value])
d.save() d.save()
else: else:
value = data.get(k, self.get(k, '')) value = ox.sanitize_html(data.get(k, self.get(k, '')))
if not description: if not description:
description = '' description = ''
d, created = Description.objects.get_or_create(key=k, value=value) d, created = Description.objects.get_or_create(key=k, value=value)
@ -294,7 +296,10 @@ class Item(models.Model):
self.data[key] = ox.escape_html(data[key]) self.data[key] = ox.escape_html(data[key])
p = self.save() p = self.save()
if not settings.USE_IMDB and list(filter(lambda k: k in self.poster_keys, data)): if not settings.USE_IMDB and list(filter(lambda k: k in self.poster_keys, data)):
p = tasks.update_poster.delay(self.public_id) if is_task:
tasks.update_poster(self.public_id)
else:
p = tasks.update_poster.delay(self.public_id)
return p return p
def update_external(self): def update_external(self):
@ -473,7 +478,8 @@ class Item(models.Model):
for a in self.annotations.all().order_by('id'): for a in self.annotations.all().order_by('id'):
a.item = other a.item = other
a.set_public_id() with transaction.atomic():
a.set_public_id()
Annotation.objects.filter(id=a.id).update(item=other, public_id=a.public_id) Annotation.objects.filter(id=a.id).update(item=other, public_id=a.public_id)
try: try:
other_sort = other.sort other_sort = other.sort
@ -517,6 +523,7 @@ class Item(models.Model):
cmd, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), close_fds=True) cmd, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'), close_fds=True)
p.wait() p.wait()
os.unlink(tmp_output_txt) os.unlink(tmp_output_txt)
os.close(fd)
return True return True
else: else:
return None return None
@ -634,11 +641,11 @@ class Item(models.Model):
if self.poster_height: if self.poster_height:
i['posterRatio'] = self.poster_width / self.poster_height i['posterRatio'] = self.poster_width / self.poster_height
if keys and 'source' in keys: if keys and 'hasSource' in keys:
i['source'] = self.streams().exclude(file__data='').exists() i['hasSource'] = self.streams().exclude(file__data='').exists()
streams = self.streams() streams = self.streams()
i['durations'] = [s.duration for s in streams] i['durations'] = [s[0] for s in streams.values_list('duration')]
i['duration'] = sum(i['durations']) i['duration'] = sum(i['durations'])
i['audioTracks'] = self.audio_tracks() i['audioTracks'] = self.audio_tracks()
if not i['audioTracks']: if not i['audioTracks']:
@ -694,10 +701,12 @@ class Item(models.Model):
else: else:
values = self.get(key) values = self.get(key)
if values: if values:
values = [ox.sanitize_html(value) for value in values]
for d in Description.objects.filter(key=key, value__in=values): for d in Description.objects.filter(key=key, value__in=values):
i['%sdescription' % key][d.value] = d.description i['%sdescription' % key][d.value] = d.description
else: else:
qs = Description.objects.filter(key=key, value=self.get(key, '')) value = ox.sanitize_html(self.get(key, ''))
qs = Description.objects.filter(key=key, value=value)
i['%sdescription' % key] = '' if qs.count() == 0 else qs[0].description i['%sdescription' % key] = '' if qs.count() == 0 else qs[0].description
if keys: if keys:
info = {} info = {}
@ -855,7 +864,7 @@ class Item(models.Model):
values = list(set(values)) values = list(set(values))
else: else:
values = self.get(key, '') values = self.get(key, '')
if isinstance(values, list): if values and isinstance(values, list) and isinstance(values[0], str):
save(key, '\n'.join(values)) save(key, '\n'.join(values))
else: else:
save(key, values) save(key, values)
@ -1017,12 +1026,16 @@ class Item(models.Model):
set_value(s, name, value) set_value(s, name, value)
elif sort_type == 'person': elif sort_type == 'person':
value = sortNames(self.get(source, [])) value = sortNames(self.get(source, []))
if value is None:
value = ''
value = utils.sort_string(value)[:955] value = utils.sort_string(value)[:955]
set_value(s, name, value) set_value(s, name, value)
elif sort_type == 'string': elif sort_type == 'string':
value = self.get(source, '') value = self.get(source, '')
if value is None:
value = ''
if isinstance(value, list): if isinstance(value, list):
value = ','.join(value) value = ','.join([str(v) for v in value])
value = utils.sort_string(value)[:955] value = utils.sort_string(value)[:955]
set_value(s, name, value) set_value(s, name, value)
elif sort_type == 'words': elif sort_type == 'words':
@ -1099,7 +1112,11 @@ class Item(models.Model):
_current_values.append(value[0]) _current_values.append(value[0])
current_values = _current_values current_values = _current_values
current_values = list(set(current_values)) try:
current_values = list(set(current_values))
except:
logger.error('invalid facet data for %s: %s', key, current_values)
current_values = []
current_values = [ox.decode_html(ox.strip_tags(v)) for v in current_values] current_values = [ox.decode_html(ox.strip_tags(v)) for v in current_values]
current_values = [unicodedata.normalize('NFKD', v) for v in current_values] current_values = [unicodedata.normalize('NFKD', v) for v in current_values]
self.update_facet_values(key, current_values) self.update_facet_values(key, current_values)
@ -1192,7 +1209,7 @@ class Item(models.Model):
if not r: if not r:
return False return False
path = video.name path = video.name
duration = sum(item.cache['durations']) duration = sum(self.item.cache['durations'])
else: else:
path = stream.media.path path = stream.media.path
duration = stream.info['duration'] duration = stream.info['duration']
@ -1288,90 +1305,6 @@ class Item(models.Model):
self.files.filter(selected=True).update(selected=False) self.files.filter(selected=True).update(selected=False)
self.save() self.save()
def get_torrent(self, request):
if self.torrent:
self.torrent.seek(0)
data = ox.torrent.bdecode(self.torrent.read())
url = request.build_absolute_uri("%s/torrent/" % self.get_absolute_url())
if url.startswith('https://'):
url = 'http' + url[5:]
data['url-list'] = ['%s%s' % (url, u.split('torrent/')[1]) for u in data['url-list']]
return ox.torrent.bencode(data)
def make_torrent(self):
if not settings.CONFIG['video'].get('torrent'):
return
streams = self.streams()
if streams.count() == 0:
return
base = self.path('torrent')
base = os.path.abspath(os.path.join(settings.MEDIA_ROOT, base))
if not isinstance(base, bytes):
base = base.encode('utf-8')
if os.path.exists(base):
shutil.rmtree(base)
ox.makedirs(base)
filename = utils.safe_filename(ox.decode_html(self.get('title')))
base = self.path('torrent/%s' % filename)
base = os.path.abspath(os.path.join(settings.MEDIA_ROOT, base))
size = 0
duration = 0.0
if streams.count() == 1:
v = streams[0]
media_path = v.media.path
extension = media_path.split('.')[-1]
url = "%s/torrent/%s.%s" % (self.get_absolute_url(),
quote(filename.encode('utf-8')),
extension)
video = "%s.%s" % (base, extension)
if not isinstance(media_path, bytes):
media_path = media_path.encode('utf-8')
if not isinstance(video, bytes):
video = video.encode('utf-8')
media_path = os.path.relpath(media_path, os.path.dirname(video))
os.symlink(media_path, video)
size = v.media.size
duration = v.duration
else:
url = "%s/torrent/" % self.get_absolute_url()
part = 1
ox.makedirs(base)
for v in streams:
media_path = v.media.path
extension = media_path.split('.')[-1]
video = "%s/%s.Part %d.%s" % (base, filename, part, extension)
part += 1
if not isinstance(media_path, bytes):
media_path = media_path.encode('utf-8')
if not isinstance(video, bytes):
video = video.encode('utf-8')
media_path = os.path.relpath(media_path, os.path.dirname(video))
os.symlink(media_path, video)
size += v.media.size
duration += v.duration
video = base
torrent = '%s.torrent' % base
url = "http://%s%s" % (settings.CONFIG['site']['url'], url)
meta = {
'filesystem_encoding': 'utf-8',
'target': torrent,
'url-list': url,
}
if duration:
meta['playtime'] = ox.format_duration(duration*1000)[:-4]
# slightly bigger torrent file but better for streaming
piece_size_pow2 = 15 # 1 mbps -> 32KB pieces
if size / duration >= 1000000:
piece_size_pow2 = 16 # 2 mbps -> 64KB pieces
meta['piece_size_pow2'] = piece_size_pow2
ox.torrent.create_torrent(video, settings.TRACKER_URL, meta)
self.torrent.name = torrent[len(settings.MEDIA_ROOT)+1:]
self.save()
def audio_tracks(self): def audio_tracks(self):
tracks = [f['language'] tracks = [f['language']
for f in self.files.filter(selected=True).filter(Q(is_video=True) | Q(is_audio=True)).values('language') for f in self.files.filter(selected=True).filter(Q(is_video=True) | Q(is_audio=True)).values('language')
@ -1379,11 +1312,10 @@ class Item(models.Model):
return sorted(set(tracks)) return sorted(set(tracks))
def streams(self, track=None): def streams(self, track=None):
files = self.files.filter(selected=True).filter(Q(is_audio=True) | Q(is_video=True))
qs = archive.models.Stream.objects.filter( qs = archive.models.Stream.objects.filter(
source=None, available=True, file__item=self, file__selected=True file__in=files, source=None, available=True
).filter( ).select_related()
Q(file__is_audio=True) | Q(file__is_video=True)
)
if not track: if not track:
tracks = self.audio_tracks() tracks = self.audio_tracks()
if len(tracks) > 1: if len(tracks) > 1:
@ -1422,7 +1354,6 @@ class Item(models.Model):
self.select_frame() self.select_frame()
self.make_poster() self.make_poster()
self.make_icon() self.make_icon()
self.make_torrent()
self.rendered = streams.count() > 0 self.rendered = streams.count() > 0
self.save() self.save()
if self.rendered: if self.rendered:
@ -1608,8 +1539,15 @@ class Item(models.Model):
cmd += ['-l', timeline] cmd += ['-l', timeline]
if frame: if frame:
cmd += ['-f', frame] cmd += ['-f', frame]
p = subprocess.Popen(cmd, close_fds=True) if settings.ITEM_ICON_DATA:
p.wait() cmd += '-d', '-'
data = self.json()
data = utils.normalize_dict('NFC', data)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, close_fds=True)
p.communicate(json.dumps(data, default=to_json).encode('utf-8'))
else:
p = subprocess.Popen(cmd, close_fds=True)
p.wait()
# remove cached versions # remove cached versions
icon = os.path.abspath(os.path.join(settings.MEDIA_ROOT, icon)) icon = os.path.abspath(os.path.join(settings.MEDIA_ROOT, icon))
for f in glob(icon.replace('.jpg', '*.jpg')): for f in glob(icon.replace('.jpg', '*.jpg')):
@ -1621,11 +1559,13 @@ class Item(models.Model):
return icon return icon
def add_empty_clips(self): def add_empty_clips(self):
if not settings.EMPTY_CLIPS:
return
subtitles = utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True) subtitles = utils.get_by_key(settings.CONFIG['layers'], 'isSubtitles', True)
if not subtitles: if not subtitles:
return return
# otherwise add empty 5 seconds annotation every minute # otherwise add empty 5 seconds annotation every minute
duration = sum([s.duration for s in self.streams()]) duration = sum([s[0] for s in self.streams().values_list('duration')])
layer = subtitles['id'] layer = subtitles['id']
# FIXME: allow annotations from no user instead? # FIXME: allow annotations from no user instead?
user = User.objects.all().order_by('id')[0] user = User.objects.all().order_by('id')[0]
@ -1874,6 +1814,8 @@ class Description(models.Model):
value = models.CharField(max_length=1000, db_index=True) value = models.CharField(max_length=1000, db_index=True)
description = models.TextField() description = models.TextField()
def __str__(self):
return "%s=%s" % (self.key, self.value)
class AnnotationSequence(models.Model): class AnnotationSequence(models.Model):
item = models.OneToOneField('Item', related_name='_annotation_sequence', on_delete=models.CASCADE) item = models.OneToOneField('Item', related_name='_annotation_sequence', on_delete=models.CASCADE)
@ -1889,13 +1831,12 @@ class AnnotationSequence(models.Model):
@classmethod @classmethod
def nextid(cls, item): def nextid(cls, item):
with transaction.atomic(): s, created = cls.objects.get_or_create(item=item)
s, created = cls.objects.get_or_create(item=item) if created:
if created: nextid = s.value
nextid = s.value else:
else: cursor = connection.cursor()
cursor = connection.cursor() sql = "UPDATE %s SET value = value + 1 WHERE item_id = %s RETURNING value" % (cls._meta.db_table, item.id)
sql = "UPDATE %s SET value = value + 1 WHERE item_id = %s RETURNING value" % (cls._meta.db_table, item.id) cursor.execute(sql)
cursor.execute(sql) nextid = cursor.fetchone()[0]
nextid = cursor.fetchone()[0]
return "%s/%s" % (item.public_id, ox.toAZ(nextid)) return "%s/%s" % (item.public_id, ox.toAZ(nextid))

View file

@ -24,10 +24,6 @@ urls = [
re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<format>webm|ogv|mp4)$', views.video), re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<format>webm|ogv|mp4)$', views.video),
re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<track>.+)\.(?P<format>webm|ogv|mp4)$', views.video), re_path(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<track>.+)\.(?P<format>webm|ogv|mp4)$', views.video),
#torrent
re_path(r'^(?P<id>[A-Z0-9].*)/torrent$', views.torrent),
re_path(r'^(?P<id>[A-Z0-9].*)/torrent/(?P<filename>.*?)$', views.torrent),
#export #export
re_path(r'^(?P<id>[A-Z0-9].*)/json$', views.item_json), re_path(r'^(?P<id>[A-Z0-9].*)/json$', views.item_json),
re_path(r'^(?P<id>[A-Z0-9].*)/xml$', views.item_xml), re_path(r'^(?P<id>[A-Z0-9].*)/xml$', views.item_xml),

View file

@ -2,27 +2,35 @@
from datetime import timedelta, datetime from datetime import timedelta, datetime
from urllib.parse import quote from urllib.parse import quote
import xml.etree.ElementTree as ET
import gzip import gzip
import os import os
import random import random
import logging
from celery.task import task, periodic_task from app.celery import app
from celery.schedules import crontab
from django.conf import settings from django.conf import settings
from django.db import connection, transaction from django.db import connection, transaction
from django.db.models import Q from django.db.models import Q
from ox.utils import ET
from app.utils import limit_rate from app.utils import limit_rate
from taskqueue.models import Task from taskqueue.models import Task
@periodic_task(run_every=timedelta(days=1), queue='encoding') logger = logging.getLogger('pandora.' + __name__)
@app.task(queue='encoding')
def cronjob(**kwargs): def cronjob(**kwargs):
if limit_rate('item.tasks.cronjob', 8 * 60 * 60): if limit_rate('item.tasks.cronjob', 8 * 60 * 60):
update_random_sort() update_random_sort()
update_random_clip_sort() update_random_clip_sort()
clear_cache.delay() clear_cache.delay()
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(timedelta(days=1), cronjob.s())
def update_random_sort(): def update_random_sort():
from . import models from . import models
if list(filter(lambda f: f['id'] == 'random', settings.CONFIG['itemKeys'])): if list(filter(lambda f: f['id'] == 'random', settings.CONFIG['itemKeys'])):
@ -50,7 +58,7 @@ def update_random_clip_sort():
cursor.execute(row) cursor.execute(row)
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_clips(public_id): def update_clips(public_id):
from . import models from . import models
try: try:
@ -59,7 +67,7 @@ def update_clips(public_id):
return return
item.clips.all().update(user=item.user.id) item.clips.all().update(user=item.user.id)
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_poster(public_id): def update_poster(public_id):
from . import models from . import models
try: try:
@ -77,7 +85,7 @@ def update_poster(public_id):
icon=item.icon.name icon=item.icon.name
) )
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_file_paths(public_id): def update_file_paths(public_id):
from . import models from . import models
try: try:
@ -86,7 +94,7 @@ def update_file_paths(public_id):
return return
item.update_file_paths() item.update_file_paths()
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_external(public_id): def update_external(public_id):
from . import models from . import models
try: try:
@ -95,7 +103,7 @@ def update_external(public_id):
return return
item.update_external() item.update_external()
@task(queue="encoding") @app.task(queue="encoding")
def update_timeline(public_id): def update_timeline(public_id):
from . import models from . import models
try: try:
@ -105,7 +113,7 @@ def update_timeline(public_id):
item.update_timeline(async_=False) item.update_timeline(async_=False)
Task.finish(item) Task.finish(item)
@task(queue="encoding") @app.task(queue="encoding")
def rebuild_timeline(public_id): def rebuild_timeline(public_id):
from . import models from . import models
i = models.Item.objects.get(public_id=public_id) i = models.Item.objects.get(public_id=public_id)
@ -113,7 +121,7 @@ def rebuild_timeline(public_id):
s.make_timeline() s.make_timeline()
i.update_timeline(async_=False) i.update_timeline(async_=False)
@task(queue="encoding") @app.task(queue="encoding")
def load_subtitles(public_id): def load_subtitles(public_id):
from . import models from . import models
try: try:
@ -126,7 +134,7 @@ def load_subtitles(public_id):
item.update_facets() item.update_facets()
@task(queue="encoding") @app.task(queue="encoding")
def extract_clip(public_id, in_, out, resolution, format, track=None): def extract_clip(public_id, in_, out, resolution, format, track=None):
from . import models from . import models
try: try:
@ -138,7 +146,7 @@ def extract_clip(public_id, in_, out, resolution, format, track=None):
return False return False
@task(queue="encoding") @app.task(queue="encoding")
def clear_cache(days=60): def clear_cache(days=60):
import subprocess import subprocess
path = os.path.join(settings.MEDIA_ROOT, 'media') path = os.path.join(settings.MEDIA_ROOT, 'media')
@ -152,7 +160,7 @@ def clear_cache(days=60):
subprocess.check_output(cmd) subprocess.check_output(cmd)
@task(ignore_results=True, queue='default') @app.task(ignore_results=True, queue='default')
def update_sitemap(base_url): def update_sitemap(base_url):
from . import models from . import models
sitemap = os.path.abspath(os.path.join(settings.MEDIA_ROOT, 'sitemap.xml.gz')) sitemap = os.path.abspath(os.path.join(settings.MEDIA_ROOT, 'sitemap.xml.gz'))
@ -350,3 +358,18 @@ def update_sitemap(base_url):
f.write(data) f.write(data)
with gzip.open(sitemap, 'wb') as f: with gzip.open(sitemap, 'wb') as f:
f.write(data) f.write(data)
@app.task(queue='default')
def bulk_edit(data, username):
from django.db import transaction
from . import models
from .views import edit_item
user = models.User.objects.get(username=username)
items = models.Item.objects.filter(public_id__in=data['id'])
for item in items:
if item.editable(user):
with transaction.atomic():
item.refresh_from_db()
response = edit_item(user, item, data, is_task=True)
return {}

View file

@ -71,7 +71,7 @@ def join_tiles(source_paths, durations, target_path):
if not w or large_tile_i < large_tile_n - 1: if not w or large_tile_i < large_tile_n - 1:
w = 60 w = 60
data['target_images']['large'] = data['target_images']['large'].resize( data['target_images']['large'] = data['target_images']['large'].resize(
(w, small_tile_h), Image.ANTIALIAS (w, small_tile_h), Image.LANCZOS
) )
if data['target_images']['small']: if data['target_images']['small']:
data['target_images']['small'].paste( data['target_images']['small'].paste(
@ -90,7 +90,7 @@ def join_tiles(source_paths, durations, target_path):
if data['full_tile_widths'][0]: if data['full_tile_widths'][0]:
resized = data['target_images']['large'].resize(( resized = data['target_images']['large'].resize((
data['full_tile_widths'][0], large_tile_h data['full_tile_widths'][0], large_tile_h
), Image.ANTIALIAS) ), Image.LANCZOS)
data['target_images']['full'].paste(resized, (data['full_tile_offset'], 0)) data['target_images']['full'].paste(resized, (data['full_tile_offset'], 0))
data['full_tile_offset'] += data['full_tile_widths'][0] data['full_tile_offset'] += data['full_tile_widths'][0]
data['full_tile_widths'] = data['full_tile_widths'][1:] data['full_tile_widths'] = data['full_tile_widths'][1:]
@ -196,7 +196,7 @@ def join_tiles(source_paths, durations, target_path):
#print(image_file) #print(image_file)
image_file = '%stimeline%s%dp.jpg' % (target_path, full_tile_mode, small_tile_h) image_file = '%stimeline%s%dp.jpg' % (target_path, full_tile_mode, small_tile_h)
data['target_images']['full'].resize( data['target_images']['full'].resize(
(full_tile_w, small_tile_h), Image.ANTIALIAS (full_tile_w, small_tile_h), Image.LANCZOS
).save(image_file) ).save(image_file)
#print(image_file) #print(image_file)

View file

@ -61,7 +61,7 @@ def sort_title(title):
title = sort_string(title) title = sort_string(title)
#title #title
title = re.sub('[\'!¿¡,\.;\-"\:\*\[\]]', '', title) title = re.sub(r'[\'!¿¡,\.;\-"\:\*\[\]]', '', title)
return title.strip() return title.strip()
def get_positions(ids, pos, decode_id=False): def get_positions(ids, pos, decode_id=False):

View file

@ -16,12 +16,14 @@ from wsgiref.util import FileWrapper
from django.conf import settings from django.conf import settings
from ox.utils import json, ET from ox.utils import json, ET
from oxdjango.decorators import login_required_json
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
from oxdjango.http import HttpFileResponse
import ox import ox
from oxdjango.api import actions
from oxdjango.decorators import login_required_json
from oxdjango.http import HttpFileResponse
from oxdjango.shortcuts import render_to_json_response, get_object_or_404_json, json_response
import oxdjango
from . import models from . import models
from . import utils from . import utils
from . import tasks from . import tasks
@ -32,7 +34,6 @@ from clip.models import Clip
from user.models import has_capability from user.models import has_capability
from changelog.models import add_changelog from changelog.models import add_changelog
from oxdjango.api import actions
def _order_query(qs, sort, prefix='sort__'): def _order_query(qs, sort, prefix='sort__'):
@ -308,7 +309,7 @@ def find(request, data):
responsive UI: First leave out `keys` to get totals as fast as possible, responsive UI: First leave out `keys` to get totals as fast as possible,
then pass `positions` to get the positions of previously selected items, then pass `positions` to get the positions of previously selected items,
finally make the query with the `keys` you need and an appropriate `range`. finally make the query with the `keys` you need and an appropriate `range`.
For more examples, see https://wiki.0x2620.org/wiki/pandora/QuerySyntax. For more examples, see https://code.0x2620.org/0x2620/pandora/wiki/QuerySyntax.
see: add, edit, get, lookup, remove, upload see: add, edit, get, lookup, remove, upload
''' '''
if settings.JSON_DEBUG: if settings.JSON_DEBUG:
@ -533,17 +534,18 @@ def get(request, data):
return render_to_json_response(response) return render_to_json_response(response)
actions.register(get) actions.register(get)
def edit_item(request, item, data): def edit_item(user, item, data, is_task=False):
data = data.copy()
update_clips = False update_clips = False
response = json_response(status=200, text='ok') response = json_response(status=200, text='ok')
if 'rightslevel' in data: if 'rightslevel' in data:
if request.user.profile.capability('canEditRightsLevel'): if user.profile.capability('canEditRightsLevel'):
item.level = int(data['rightslevel']) item.level = int(data['rightslevel'])
else: else:
response = json_response(status=403, text='permission denied') response = json_response(status=403, text='permission denied')
del data['rightslevel'] del data['rightslevel']
if 'user' in data: if 'user' in data:
if request.user.profile.get_level() in ('admin', 'staff') and \ if user.profile.get_level() in ('admin', 'staff') and \
models.User.objects.filter(username=data['user']).exists(): models.User.objects.filter(username=data['user']).exists():
new_user = models.User.objects.get(username=data['user']) new_user = models.User.objects.get(username=data['user'])
if new_user != item.user: if new_user != item.user:
@ -551,13 +553,13 @@ def edit_item(request, item, data):
update_clips = True update_clips = True
del data['user'] del data['user']
if 'groups' in data: if 'groups' in data:
if not request.user.profile.capability('canManageUsers'): if not user.profile.capability('canManageUsers'):
# Users wihtout canManageUsers can only add/remove groups they are not in # Users wihtout canManageUsers can only add/remove groups they are not in
groups = set([g.name for g in item.groups.all()]) groups = set([g.name for g in item.groups.all()])
user_groups = set([g.name for g in request.user.groups.all()]) user_groups = set([g.name for g in user.groups.all()])
other_groups = list(groups - user_groups) other_groups = list(groups - user_groups)
data['groups'] = [g for g in data['groups'] if g in user_groups] + other_groups data['groups'] = [g for g in data['groups'] if g in user_groups] + other_groups
r = item.edit(data) r = item.edit(data, is_task=is_task)
if r: if r:
r.wait() r.wait()
if update_clips: if update_clips:
@ -594,10 +596,10 @@ def add(request, data):
if p: if p:
p.wait() p.wait()
else: else:
i.make_poster() item.make_poster()
del data['title'] del data['title']
if data: if data:
response = edit_item(request, item, data) response = edit_item(request.user, item, data)
response['data'] = item.json() response['data'] = item.json()
add_changelog(request, request_data, item.public_id) add_changelog(request, request_data, item.public_id)
return render_to_json_response(response) return render_to_json_response(response)
@ -619,16 +621,16 @@ def edit(request, data):
see: add, find, get, lookup, remove, upload see: add, find, get, lookup, remove, upload
''' '''
if isinstance(data['id'], list): if isinstance(data['id'], list):
items = models.Item.objects.filter(public_id__in=data['id']) add_changelog(request, data)
t = tasks.bulk_edit.delay(data, request.user.username)
response = json_response(status=200, text='ok')
response['data']['taskId'] = t.task_id
else: else:
items = [get_object_or_404_json(models.Item, public_id=data['id'])] item = get_object_or_404_json(models.Item, public_id=data['id'])
for item in items:
if item.editable(request.user): if item.editable(request.user):
request_data = data.copy() add_changelog(request, data)
response = edit_item(request, item, data) response = edit_item(request.user, item, data)
response['data'] = item.json() response['data'] = item.json()
if item == items[0]:
add_changelog(request, request_data)
else: else:
response = json_response(status=403, text='permission denied') response = json_response(status=403, text='permission denied')
return render_to_json_response(response) return render_to_json_response(response)
@ -947,9 +949,11 @@ def timeline(request, id, size, position=-1, format='jpg', mode=None):
if not item.access(request.user): if not item.access(request.user):
return HttpResponseForbidden() return HttpResponseForbidden()
modes = [t['id'] for t in settings.CONFIG['timelines']]
if not mode: if not mode:
mode = 'antialias' mode = 'antialias'
modes = [t['id'] for t in settings.CONFIG['timelines']] if mode not in modes:
mode = modes[0]
if mode not in modes: if mode not in modes:
raise Http404 raise Http404
modes.pop(modes.index(mode)) modes.pop(modes.index(mode))
@ -1029,7 +1033,10 @@ def download(request, id, resolution=None, format='webm', part=None):
return HttpResponseForbidden() return HttpResponseForbidden()
elif r is True: elif r is True:
response = HttpResponse(FileWrapper(video), content_type=content_type) response = HttpResponse(FileWrapper(video), content_type=content_type)
response['Content-Length'] = os.path.getsize(video.name) try:
response['Content-Length'] = os.path.getsize(video.name)
except:
pass
else: else:
response = HttpFileResponse(r, content_type=content_type) response = HttpFileResponse(r, content_type=content_type)
else: else:
@ -1040,27 +1047,6 @@ def download(request, id, resolution=None, format='webm', part=None):
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % quote(filename.encode('utf-8')) response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % quote(filename.encode('utf-8'))
return response return response
def torrent(request, id, filename=None):
item = get_object_or_404(models.Item, public_id=id)
if not item.access(request.user):
return HttpResponseForbidden()
if not item.torrent:
raise Http404
if not filename or filename.endswith('.torrent'):
response = HttpResponse(item.get_torrent(request),
content_type='application/x-bittorrent')
filename = utils.safe_filename("%s.torrent" % item.get('title'))
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % quote(filename.encode('utf-8'))
return response
while filename.startswith('/'):
filename = filename[1:]
filename = filename.replace('/../', '/')
filename = item.path('torrent/%s' % filename)
filename = os.path.abspath(os.path.join(settings.MEDIA_ROOT, filename))
response = HttpFileResponse(filename)
response['Content-Disposition'] = "attachment; filename*=UTF-8''%s" % \
quote(os.path.basename(filename.encode('utf-8')))
return response
def video(request, id, resolution, format, index=None, track=None): def video(request, id, resolution, format, index=None, track=None):
resolution = int(resolution) resolution = int(resolution)
@ -1282,12 +1268,6 @@ def atom_xml(request):
el.text = "1:1" el.text = "1:1"
if has_capability(request.user, 'canDownloadVideo'): if has_capability(request.user, 'canDownloadVideo'):
if item.torrent:
el = ET.SubElement(entry, "link")
el.attrib['rel'] = 'enclosure'
el.attrib['type'] = 'application/x-bittorrent'
el.attrib['href'] = '%s/torrent/' % page_link
el.attrib['length'] = '%s' % ox.get_torrent_size(item.torrent.path)
# FIXME: loop over streams # FIXME: loop over streams
# for s in item.streams().filter(resolution=max(settings.CONFIG['video']['resolutions'])): # for s in item.streams().filter(resolution=max(settings.CONFIG['video']['resolutions'])):
for s in item.streams().filter(source=None): for s in item.streams().filter(source=None):
@ -1310,12 +1290,15 @@ def atom_xml(request):
'application/atom+xml' 'application/atom+xml'
) )
def oembed(request): def oembed(request):
format = request.GET.get('format', 'json') format = request.GET.get('format', 'json')
maxwidth = int(request.GET.get('maxwidth', 640)) maxwidth = int(request.GET.get('maxwidth', 640))
maxheight = int(request.GET.get('maxheight', 480)) maxheight = int(request.GET.get('maxheight', 480))
url = request.GET['url'] url = request.GET.get('url')
if not url:
raise Http404
parts = urlparse(url).path.split('/') parts = urlparse(url).path.split('/')
if len(parts) < 2: if len(parts) < 2:
raise Http404 raise Http404

7
pandora/itemlist/apps.py Normal file
View file

@ -0,0 +1,7 @@
from django.apps import AppConfig
class ItemListConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = 'itemlist'

Some files were not shown because too many files have changed in this diff Show more