Compare commits
5 commits
187d853b3a
...
3f9280e0ba
| Author | SHA1 | Date | |
|---|---|---|---|
| 3f9280e0ba | |||
| 2a5d741ccf | |||
| ce51e8c2c4 | |||
| 790ae53095 | |||
| b1db77de53 |
4 changed files with 300 additions and 184 deletions
|
|
@ -1015,7 +1015,7 @@ examples (config.SITENAME.jsonc) that are part of this pan.do/ra distribution.
|
|||
the system (from).
|
||||
*/
|
||||
"site": {
|
||||
"description": "This is a demo of pan.do/ra - a free, open source media archive. It allows you to manage large, decentralized collections of video, to collaboratively create metadata and time-based annotations, and to serve your archive as a cutting-edge web application.",
|
||||
"description": "T for Time - pan.do/ra",
|
||||
"email": {
|
||||
// E-mail address in contact form (to)
|
||||
"contact": "system@time.0x2620.org",
|
||||
|
|
|
|||
|
|
@ -1,156 +1,13 @@
|
|||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
from collections import defaultdict
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
|
||||
import item.models
|
||||
import itemlist.models
|
||||
|
||||
from ...render import get_srt
|
||||
|
||||
|
||||
def resolve_roman(s):
|
||||
extra = re.compile(r'^\d+(.*?)$').findall(s)
|
||||
if extra:
|
||||
extra = extra[0].lower()
|
||||
new = {
|
||||
'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5',
|
||||
'vi': '6', 'vii': 7, 'viii': '8', 'ix': '9', 'x': '10'
|
||||
}.get(extra, extra)
|
||||
return s.replace(extra, new)
|
||||
return s
|
||||
|
||||
def format_duration(duration, fps):
|
||||
return float('%0.5f' % (round(duration * fps) / fps))
|
||||
from ...render import generate_clips
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = 'generate symlinks to clips and clips.json'
|
||||
|
||||
def add_arguments(self, parser):
|
||||
parser.add_argument('--lang', action='store', dest='lang', default=None, help='subtitle language')
|
||||
parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in')
|
||||
parser.add_argument('--censored', action='store', dest='censored', default=None, help='censor items from list')
|
||||
|
||||
def handle(self, **options):
|
||||
prefix = options['prefix']
|
||||
lang = options["lang"]
|
||||
if lang:
|
||||
lang = lang.split(',')
|
||||
tlang = lang[1:]
|
||||
lang = lang[0]
|
||||
else:
|
||||
tlang = None
|
||||
if lang == "en":
|
||||
lang = None
|
||||
if options['censored']:
|
||||
censored_list = itemlist.models.List.get(options["censored"])
|
||||
censored = list(censored_list.get_items(censored_list.user).all().values_list('public_id', flat=True))
|
||||
clips = []
|
||||
for i in item.models.Item.objects.filter(sort__type='original'):
|
||||
original_target = ""
|
||||
qs = item.models.Item.objects.filter(data__title=i.data['title']).exclude(id=i.id)
|
||||
if qs.count() >= 1:
|
||||
clip = {}
|
||||
durations = []
|
||||
for e in item.models.Item.objects.filter(data__title=i.data['title']):
|
||||
if 'type' not in e.data:
|
||||
print("ignoring invalid video %s (no type)" % e)
|
||||
continue
|
||||
if not e.files.filter(selected=True).exists():
|
||||
continue
|
||||
source = e.files.filter(selected=True)[0].data.path
|
||||
ext = os.path.splitext(source)[1]
|
||||
type_ = e.data['type'][0].lower()
|
||||
target = os.path.join(prefix, type_, i.data['title'] + ext)
|
||||
os.makedirs(os.path.dirname(target), exist_ok=True)
|
||||
if os.path.islink(target):
|
||||
os.unlink(target)
|
||||
os.symlink(source, target)
|
||||
if type_ == "original":
|
||||
original_target = target
|
||||
if options['censored'] and e.public_id in censored:
|
||||
clip[type_ + "_censored"] = target
|
||||
target = '/srv/t_for_time/censored.mp4'
|
||||
clip[type_] = target
|
||||
durations.append(e.files.filter(selected=True)[0].duration)
|
||||
clip["duration"] = min(durations)
|
||||
if not clip["duration"]:
|
||||
print('!!', durations, clip)
|
||||
continue
|
||||
cd = format_duration(clip["duration"], 24)
|
||||
#if cd != clip["duration"]:
|
||||
# print(clip["duration"], '->', cd, durations, clip)
|
||||
clip["duration"] = cd
|
||||
clip['tags'] = i.data.get('tags', [])
|
||||
clip['editingtags'] = i.data.get('editingtags', [])
|
||||
name = os.path.basename(original_target)
|
||||
seqid = re.sub(r"Hotel Aporia_(\d+)", "S\\1_", name)
|
||||
seqid = re.sub(r"Night March_(\d+)", "S\\1_", seqid)
|
||||
seqid = re.sub(r"_(\d+)H_(\d+)", "_S\\1\\2_", seqid)
|
||||
seqid = seqid.split('_')[:2]
|
||||
seqid = [b[1:] if b[0] in ('B', 'S') else '0' for b in seqid]
|
||||
seqid[1] = resolve_roman(seqid[1])
|
||||
seqid[1] = ''.join([b for b in seqid[1] if b.isdigit()])
|
||||
if not seqid[1]:
|
||||
seqid[1] = '0'
|
||||
try:
|
||||
clip['seqid'] = int(''.join(['%06d' % int(b) for b in seqid]))
|
||||
except:
|
||||
print(name, seqid, 'failed')
|
||||
raise
|
||||
if "original" in clip and "foreground" in clip and "background" in clip:
|
||||
clips.append(clip)
|
||||
elif "original" in clip and "animation" in clip:
|
||||
clips.append(clip)
|
||||
else:
|
||||
print("ignoring incomplete video", i)
|
||||
|
||||
with open(os.path.join(prefix, 'clips.json'), 'w') as fd:
|
||||
json.dump(clips, fd, indent=2, ensure_ascii=False)
|
||||
|
||||
print("using", len(clips), "clips")
|
||||
|
||||
voice_over = defaultdict(dict)
|
||||
for vo in item.models.Item.objects.filter(
|
||||
data__type__contains="Voice Over",
|
||||
):
|
||||
fragment_id = int(vo.get('title').split('_')[0])
|
||||
source = vo.files.filter(selected=True)[0]
|
||||
batch = vo.get('batch')[0].replace('Text-', '')
|
||||
src = source.data.path
|
||||
target = os.path.join(prefix, 'voice_over', batch, '%s.wav' % fragment_id)
|
||||
os.makedirs(os.path.dirname(target), exist_ok=True)
|
||||
if os.path.islink(target):
|
||||
os.unlink(target)
|
||||
os.symlink(src, target)
|
||||
subs = []
|
||||
for sub in vo.annotations.filter(layer="subtitles", languages=lang).exclude(value="").order_by("start"):
|
||||
sdata = get_srt(sub, lang=tlang)
|
||||
subs.append(sdata)
|
||||
voice_over[fragment_id][batch] = {
|
||||
"src": target,
|
||||
"duration": format_duration(source.duration, 24),
|
||||
"subs": subs
|
||||
}
|
||||
with open(os.path.join(prefix, 'voice_over.json'), 'w') as fd:
|
||||
json.dump(voice_over, fd, indent=2, ensure_ascii=False)
|
||||
|
||||
if options['censored']:
|
||||
censored_mp4 = '/srv/t_for_time/censored.mp4'
|
||||
if not os.path.exists(censored_mp4):
|
||||
cmd = [
|
||||
"ffmpeg",
|
||||
"-nostats", "-loglevel", "error",
|
||||
"-f", "lavfi",
|
||||
"-i", "color=color=white:size=1920x1080:rate=24",
|
||||
"-t", "3600",
|
||||
"-c:v", "libx264",
|
||||
"-pix_fmt", "yuv420p",
|
||||
censored_mp4
|
||||
]
|
||||
subprocess.call(cmd)
|
||||
return generate_clips(options)
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ class Command(BaseCommand):
|
|||
def add_arguments(self, parser):
|
||||
parser.add_argument('--prefix', action='store', dest='prefix', default="/srv/t_for_time", help='prefix to build clips in')
|
||||
parser.add_argument('--offset', action='store', dest='offset', default="1024", help='inital offset in pi')
|
||||
parser.add_argument('--lang', action='store', dest='lang', default=None, help='subtitle language')
|
||||
|
||||
def handle(self, **options):
|
||||
update_subtitles(options)
|
||||
|
|
|
|||
334
render.py
334
render.py
|
|
@ -371,6 +371,20 @@ def get_offset_duration(prefix):
|
|||
duration += get_scene_duration(scene)
|
||||
return duration
|
||||
|
||||
def write_subtitles(data, folder, options):
|
||||
data = fix_overlaps(data)
|
||||
path = folder / "front.srt"
|
||||
if options.get("subtitle_format") == "srt":
|
||||
srt = ox.srt.encode(data)
|
||||
write_if_new(str(path), srt, 'b')
|
||||
else:
|
||||
if os.path.exists(path):
|
||||
os.unlink(path)
|
||||
path = folder / "front.ass"
|
||||
ass = ass_encode(data, options)
|
||||
write_if_new(str(path), ass, '')
|
||||
|
||||
|
||||
def render(root, scene, prefix='', options=None):
|
||||
if options is None: options = {}
|
||||
fps = 24
|
||||
|
|
@ -378,10 +392,8 @@ def render(root, scene, prefix='', options=None):
|
|||
scene_duration = int(get_scene_duration(scene) * fps)
|
||||
for timeline, data in scene.items():
|
||||
if timeline == "subtitles":
|
||||
path = os.path.join(root, prefix + "front.srt")
|
||||
data = fix_overlaps(data)
|
||||
srt = ox.srt.encode(data)
|
||||
write_if_new(path, srt, 'b')
|
||||
folder = Path(root) / prefix
|
||||
write_subtitles(data, folder, options)
|
||||
continue
|
||||
#print(timeline)
|
||||
project = KDEnliveProject(root)
|
||||
|
|
@ -480,8 +492,21 @@ def get_fragments(clips, voice_over, prefix):
|
|||
fragments.sort(key=lambda f: ox.sort_string(f['name']))
|
||||
return fragments
|
||||
|
||||
def parse_lang(lang):
|
||||
if lang and "," in lang:
|
||||
lang = lang.split(',')
|
||||
if isinstance(lang, list):
|
||||
tlang = lang[1:]
|
||||
lang = lang[0]
|
||||
else:
|
||||
tlang = None
|
||||
if lang == "en":
|
||||
lang = None
|
||||
return lang, tlang
|
||||
|
||||
|
||||
def render_all(options):
|
||||
options = load_defaults(options)
|
||||
prefix = options['prefix']
|
||||
duration = int(options['duration'])
|
||||
base = int(options['offset'])
|
||||
|
|
@ -771,7 +796,7 @@ def render_all(options):
|
|||
fn = base_prefix / fn
|
||||
if os.path.exists(fn):
|
||||
os.unlink(fn)
|
||||
join_subtitles(base_prefix)
|
||||
join_subtitles(base_prefix, options)
|
||||
|
||||
print("Duration - Target: %s Actual: %s" % (target_position, position))
|
||||
print(json.dumps(dict(stats), sort_keys=True, indent=2))
|
||||
|
|
@ -794,11 +819,39 @@ def add_translations(sub, lang):
|
|||
value += '\n' + tvalue
|
||||
return value
|
||||
|
||||
def get_srt(sub, offset=0, lang=None):
|
||||
def add_translations_dict(sub, langs):
|
||||
values = {}
|
||||
value = sub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
|
||||
if sub.languages:
|
||||
value = ox.strip_tags(value)
|
||||
values[sub.languages] = value
|
||||
else:
|
||||
values["en"] = value
|
||||
for slang in langs:
|
||||
slang_value = None if slang == "en" else slang
|
||||
if sub.languages == slang_value:
|
||||
continue
|
||||
|
||||
for tsub in sub.item.annotations.filter(
|
||||
layer="subtitles", start=sub.start, end=sub.end,
|
||||
languages=slang_value
|
||||
):
|
||||
tvalue = tsub.value.replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
|
||||
if tsub.languages:
|
||||
tvalue = ox.strip_tags(tvalue)
|
||||
values[slang] = tvalue
|
||||
return values
|
||||
|
||||
|
||||
def get_srt(sub, offset, lang, tlang):
|
||||
sdata = sub.json(keys=['in', 'out', 'value'])
|
||||
sdata['value'] = sdata['value'].replace('<br/>', '<br>').replace('<br>\n', '\n').replace('<br>', '\n').strip()
|
||||
if lang:
|
||||
sdata['value'] = add_translations(sub, lang)
|
||||
if tlang:
|
||||
sdata['value'] = add_translations(sub, tlang)
|
||||
langs = [lang]
|
||||
if tlang:
|
||||
langs += tlang
|
||||
sdata['values'] = add_translations_dict(sub, langs)
|
||||
if offset:
|
||||
sdata["in"] += offset
|
||||
sdata["out"] += offset
|
||||
|
|
@ -815,21 +868,55 @@ def fix_overlaps(data):
|
|||
previous = sub
|
||||
return data
|
||||
|
||||
def shift_clips(data, offset):
|
||||
for clip in data:
|
||||
clip['in'] += offset
|
||||
clip['out'] += offset
|
||||
|
||||
def scene_subtitles(scene, options):
|
||||
import item.models
|
||||
offset = 0
|
||||
subs = []
|
||||
lang, tlang = parse_lang(options["lang"])
|
||||
for clip in scene['audio-center']['A1']:
|
||||
if not clip.get("blank"):
|
||||
batch, fragment_id = clip['src'].replace('.wav', '').split('/')[-2:]
|
||||
vo = item.models.Item.objects.filter(
|
||||
data__batch__icontains=batch, data__title__startswith=fragment_id + '_'
|
||||
).first()
|
||||
if vo:
|
||||
#print("%s => %s %s" % (clip['src'], vo, vo.get('batch')))
|
||||
for sub in vo.annotations.filter(
|
||||
layer="subtitles"
|
||||
).filter(
|
||||
languages=None if lang == "en" else lang
|
||||
).exclude(value="").order_by("start"):
|
||||
sdata = get_srt(sub, offset, lang, tlang)
|
||||
subs.append(sdata)
|
||||
else:
|
||||
print("could not find vo for %s" % clip['src'])
|
||||
offset += clip['duration']
|
||||
return subs
|
||||
|
||||
|
||||
def load_defaults(options):
|
||||
path = os.path.join(options["prefix"], "options.json")
|
||||
if os.path.exists(path):
|
||||
with open(path) as fd:
|
||||
defaults = json.loads(fd)
|
||||
for key in defaults:
|
||||
if key not in options:
|
||||
options[key] = defaults[key]
|
||||
return options
|
||||
|
||||
|
||||
def update_subtitles(options):
|
||||
import item.models
|
||||
|
||||
options = load_defaults(options)
|
||||
prefix = Path(options['prefix'])
|
||||
base = int(options['offset'])
|
||||
lang = options["lang"]
|
||||
if lang and "," in lang:
|
||||
lang = lang.split(',')
|
||||
if isinstance(lang, list):
|
||||
tlang = lang[1:]
|
||||
lang = lang[0]
|
||||
else:
|
||||
tlang = None
|
||||
if lang == "en":
|
||||
lang = None
|
||||
lang, tlang = parse_lang(options["lang"])
|
||||
|
||||
_cache = os.path.join(prefix, "cache.json")
|
||||
if os.path.exists(_cache):
|
||||
|
|
@ -844,25 +931,59 @@ def update_subtitles(options):
|
|||
continue
|
||||
with open(scene_json) as fd:
|
||||
scene = json.load(fd)
|
||||
offset = 0
|
||||
subs = []
|
||||
for clip in scene['audio-center']['A1']:
|
||||
if not clip.get("blank"):
|
||||
batch, fragment_id = clip['src'].replace('.wav', '').split('/')[-2:]
|
||||
vo = item.models.Item.objects.filter(data__batch__icontains=batch, data__title__startswith=fragment_id + '_').first()
|
||||
if vo:
|
||||
#print("%s => %s %s" % (clip['src'], vo, vo.get('batch')))
|
||||
for sub in vo.annotations.filter(layer="subtitles").filter(languages=lang).exclude(value="").order_by("start"):
|
||||
sdata = get_srt(sub, offset, tlang)
|
||||
subs.append(sdata)
|
||||
else:
|
||||
print("could not find vo for %s" % clip['src'])
|
||||
offset += clip['duration']
|
||||
path = folder / "front.srt"
|
||||
data = fix_overlaps(subs)
|
||||
srt = ox.srt.encode(subs)
|
||||
write_if_new(str(path), srt, 'b')
|
||||
subs = scene_subtitles(scene, options)
|
||||
write_subtitles(subs, folder, options)
|
||||
|
||||
def ass_encode(subs, options):
|
||||
if "lang" in options:
|
||||
langs = options["lang"].split(',')
|
||||
else:
|
||||
langs = list(subs[0]["values"])
|
||||
print('ass_encode', langs, options)
|
||||
print(subs)
|
||||
|
||||
header = '''[Script Info]
|
||||
ScriptType: v4.00+
|
||||
PlayResX: 1920
|
||||
PlayResY: 1080
|
||||
ScaledBorderAndShadow: yes
|
||||
YCbCr Matrix: None
|
||||
|
||||
[V4+ Styles]
|
||||
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
|
||||
'''
|
||||
ass = header
|
||||
offset = options.get("sub_margin", 10)
|
||||
spacing = options.get("sub_spacing", 20)
|
||||
height = 42
|
||||
styles = []
|
||||
for lang in reversed(langs):
|
||||
if isinstance(options.get("font"), list) and lang in options["font"]:
|
||||
font = options["font"][lang]
|
||||
else:
|
||||
font = 'SimHei' if lang in ('zh', 'jp') else 'Menlo'
|
||||
if isinstance(options.get("font_size"), list) and lang in options["font_size"]:
|
||||
size = options["font_size"][lang]
|
||||
else:
|
||||
size = 46 if font == 'SimHei' else 42
|
||||
|
||||
styles.append(
|
||||
f'Style: {lang},{font},{size},&Hffffff,&Hffffff,&H0,&H0,0,0,0,0,100,100,0,0,1,1,0,2,10,10,{offset},1'
|
||||
)
|
||||
offset += size + spacing
|
||||
ass += '\n'.join(reversed(styles)) + '\n'
|
||||
events = [
|
||||
'Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text'
|
||||
]
|
||||
for sub in subs:
|
||||
start = ox.format_timecode(sub['in']).rstrip('0')
|
||||
stop = ox.format_timecode(sub['out']).rstrip('0')
|
||||
for lang in reversed(langs):
|
||||
value = sub['values'][lang]
|
||||
event = f'Dialogue: 0,{start},{stop},{lang},,0,0,0,,{value}'
|
||||
events.append(event)
|
||||
ass += '\n\n[Events]\n' + '\n'.join(events) + '\n'
|
||||
return ass
|
||||
|
||||
def update_m3u(render_prefix, exclude=[]):
|
||||
files = ox.sorted_strings(glob(render_prefix + "*/*/back.mp4"))
|
||||
|
|
@ -935,7 +1056,8 @@ def render_infinity(options):
|
|||
shutil.move(state_f + "~", state_f)
|
||||
|
||||
|
||||
def join_subtitles(base_prefix):
|
||||
def join_subtitles(base_prefix, options):
|
||||
'''
|
||||
subtitles = list(sorted(glob('%s/*/front.srt' % base_prefix)))
|
||||
data = []
|
||||
position = 0
|
||||
|
|
@ -945,3 +1067,141 @@ def join_subtitles(base_prefix):
|
|||
position += get_scene_duration(scene)
|
||||
with open(base_prefix / 'front.srt', 'wb') as fd:
|
||||
fd.write(ox.srt.encode(data))
|
||||
'''
|
||||
scenes = list(sorted(glob('%s/*/scene.json' % base_prefix)))
|
||||
data = []
|
||||
position = 0
|
||||
for scene in scenes:
|
||||
subs = scene_subtitles(scene, options)
|
||||
data += shift_clips(subs, position)
|
||||
position += get_scene_duration(scene)
|
||||
write_subtitles(data, base_prefix, options)
|
||||
|
||||
def resolve_roman(s):
|
||||
extra = re.compile(r'^\d+(.*?)$').findall(s)
|
||||
if extra:
|
||||
extra = extra[0].lower()
|
||||
new = {
|
||||
'i': '1', 'ii': '2', 'iii': '3', 'iv': '4', 'v': '5',
|
||||
'vi': '6', 'vii': 7, 'viii': '8', 'ix': '9', 'x': '10'
|
||||
}.get(extra, extra)
|
||||
return s.replace(extra, new)
|
||||
return s
|
||||
|
||||
def generate_clips(options):
|
||||
import item.models
|
||||
import itemlist.models
|
||||
|
||||
prefix = options['prefix']
|
||||
lang, tlang = parse_lang(options["lang"])
|
||||
if options['censored']:
|
||||
censored_list = itemlist.models.List.get(options["censored"])
|
||||
censored = list(censored_list.get_items(
|
||||
censored_list.user
|
||||
).all().values_list('public_id', flat=True))
|
||||
clips = []
|
||||
for i in item.models.Item.objects.filter(sort__type='original'):
|
||||
original_target = ""
|
||||
qs = item.models.Item.objects.filter(data__title=i.data['title']).exclude(id=i.id)
|
||||
if qs.count() >= 1:
|
||||
clip = {}
|
||||
durations = []
|
||||
for e in item.models.Item.objects.filter(data__title=i.data['title']):
|
||||
if 'type' not in e.data:
|
||||
print("ignoring invalid video %s (no type)" % e)
|
||||
continue
|
||||
if not e.files.filter(selected=True).exists():
|
||||
continue
|
||||
source = e.files.filter(selected=True)[0].data.path
|
||||
ext = os.path.splitext(source)[1]
|
||||
type_ = e.data['type'][0].lower()
|
||||
target = os.path.join(prefix, type_, i.data['title'] + ext)
|
||||
os.makedirs(os.path.dirname(target), exist_ok=True)
|
||||
if os.path.islink(target):
|
||||
os.unlink(target)
|
||||
os.symlink(source, target)
|
||||
if type_ == "original":
|
||||
original_target = target
|
||||
if options['censored'] and e.public_id in censored:
|
||||
clip[type_ + "_censored"] = target
|
||||
target = '/srv/t_for_time/censored.mp4'
|
||||
clip[type_] = target
|
||||
durations.append(e.files.filter(selected=True)[0].duration)
|
||||
clip["duration"] = min(durations)
|
||||
if not clip["duration"]:
|
||||
print('!!', durations, clip)
|
||||
continue
|
||||
cd = format_duration(clip["duration"], 24)
|
||||
#if cd != clip["duration"]:
|
||||
# print(clip["duration"], '->', cd, durations, clip)
|
||||
clip["duration"] = cd
|
||||
clip['tags'] = i.data.get('tags', [])
|
||||
clip['editingtags'] = i.data.get('editingtags', [])
|
||||
name = os.path.basename(original_target)
|
||||
seqid = re.sub(r"Hotel Aporia_(\d+)", "S\\1_", name)
|
||||
seqid = re.sub(r"Night March_(\d+)", "S\\1_", seqid)
|
||||
seqid = re.sub(r"_(\d+)H_(\d+)", "_S\\1\\2_", seqid)
|
||||
seqid = seqid.split('_')[:2]
|
||||
seqid = [b[1:] if b[0] in ('B', 'S') else '0' for b in seqid]
|
||||
seqid[1] = resolve_roman(seqid[1])
|
||||
seqid[1] = ''.join([b for b in seqid[1] if b.isdigit()])
|
||||
if not seqid[1]:
|
||||
seqid[1] = '0'
|
||||
try:
|
||||
clip['seqid'] = int(''.join(['%06d' % int(b) for b in seqid]))
|
||||
except:
|
||||
print(name, seqid, 'failed')
|
||||
raise
|
||||
if "original" in clip and "foreground" in clip and "background" in clip:
|
||||
clips.append(clip)
|
||||
elif "original" in clip and "animation" in clip:
|
||||
clips.append(clip)
|
||||
else:
|
||||
print("ignoring incomplete video", i)
|
||||
|
||||
with open(os.path.join(prefix, 'clips.json'), 'w') as fd:
|
||||
json.dump(clips, fd, indent=2, ensure_ascii=False)
|
||||
|
||||
print("using", len(clips), "clips")
|
||||
|
||||
voice_over = defaultdict(dict)
|
||||
for vo in item.models.Item.objects.filter(
|
||||
data__type__contains="Voice Over",
|
||||
):
|
||||
fragment_id = int(vo.get('title').split('_')[0])
|
||||
source = vo.files.filter(selected=True)[0]
|
||||
batch = vo.get('batch')[0].replace('Text-', '')
|
||||
src = source.data.path
|
||||
target = os.path.join(prefix, 'voice_over', batch, '%s.wav' % fragment_id)
|
||||
os.makedirs(os.path.dirname(target), exist_ok=True)
|
||||
if os.path.islink(target):
|
||||
os.unlink(target)
|
||||
os.symlink(src, target)
|
||||
subs = []
|
||||
for sub in vo.annotations.filter(
|
||||
layer="subtitles", languages=lang
|
||||
).exclude(value="").order_by("start"):
|
||||
sdata = get_srt(sub, 0, lang, tlang)
|
||||
subs.append(sdata)
|
||||
voice_over[fragment_id][batch] = {
|
||||
"src": target,
|
||||
"duration": format_duration(source.duration, 24),
|
||||
"subs": subs
|
||||
}
|
||||
with open(os.path.join(prefix, 'voice_over.json'), 'w') as fd:
|
||||
json.dump(voice_over, fd, indent=2, ensure_ascii=False)
|
||||
|
||||
if options['censored']:
|
||||
censored_mp4 = '/srv/t_for_time/censored.mp4'
|
||||
if not os.path.exists(censored_mp4):
|
||||
cmd = [
|
||||
"ffmpeg",
|
||||
"-nostats", "-loglevel", "error",
|
||||
"-f", "lavfi",
|
||||
"-i", "color=color=white:size=1920x1080:rate=24",
|
||||
"-t", "3600",
|
||||
"-c:v", "libx264",
|
||||
"-pix_fmt", "yuv420p",
|
||||
censored_mp4
|
||||
]
|
||||
subprocess.call(cmd)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue