This commit is contained in:
j 2017-01-04 18:26:18 +01:00
parent 9076cf2d69
commit 32c9f66d8e
2 changed files with 294 additions and 0 deletions

197
render.py Executable file
View file

@ -0,0 +1,197 @@
#!/usr/bin/python3
import os
import json
import subprocess
import string
from pi import random
from keywords import KEYWORDS
import ox
import ox.web.auth
base_url = 'https://cdosea.0x2620.org'
api = None
def get_api():
global api
if not api:
api = ox.API(base_url + '/api/')
api.signin(**ox.web.auth.get('cdosea'))
if os.path.exists('PATHS.json'):
PATHS = json.load(open('PATHS.json'))
else:
PATHS = {}
if os.path.exists('CLIPS.json'):
CLIPS = json.load(open('CLIPS.json'))
else:
CLIPS = {}
def get_path(id):
global PATHS
if id not in PATHS:
get_api()
info = api.findMedia({
'query': {
'conditions': [
{'key': 'id', 'operator': '==', 'value': id}
]
},
'keys': ['id', 'extension'],
'range': [0, 1]
})['data']['items'][0]
path = os.path.join('cache', '%s.%s' % (info['id'], info['extension']))
if not os.path.exists(path):
url = '%s/%s/download/source/' % (base_url, id)
print('get video', url)
PATHS[id] = path
with open('PATHS.json', 'w') as fd:
json.dump(PATHS, fd, indent=4, sort_keys=True)
return PATHS[id]
def get_clips(tag):
global CLIPS
if tag not in CLIPS:
get_api()
clips = api.findAnnotations({
'query': {
'conditions': [
{'key': 'layer', 'operator': '==', 'value': 'keywords'},
{'key': 'value', 'operator': '==', 'value': tag}
],
'operator': '&'
},
'keys': ['id', 'in', 'out'],
'range': [0, 10000]})['data']['items']
for clip in clips:
clip['path'] = get_path(clip['id'].split('/')[0])
clip['duration'] = clip['out'] - clip['in']
clip['tag'] = tag
CLIPS[tag] = list(sorted(clips, key=lambda c: c['id']))
with open('CLIPS.json', 'w') as fd:
json.dump(CLIPS, fd, indent=4, sort_keys=True)
return CLIPS[tag]
def random_choice(seq, items):
n = n_ = len(items) - 1
print('len', n)
if n == 0:
return items[0]
r = seq()
base = 10
while n > 10:
n /= 10
print(r)
r += seq()
base += 10
r = int(n_ * r / base)
print('result', r, items)
return items[r]
def splitint(number, by):
div = int(number/by)
mod = number % by
return [div + 1 if i > (by - 1 - mod) else div for i in range(by)]
def filter_clips(clips, duration, max_duration=0):
# 1 minute
blur = 0.5
low = 1
high = 10
# 2 minute
blur = 1
low = 2
high = 20
buckets = {}
clips_ = []
for tag in clips:
for clip in clips[tag]:
clip['tag'] = tag
clips_.append(clip)
clips_.sort(key=lambda c: c['duration'])
print(clips_)
size = splitint(len(clips_), 10)
p = 0
for i in range(10):
buckets[i+1] = clips_[p:+p+size[i]]
p += size[i]
clips_ = {}
print(buckets[duration])
for clip in buckets[duration]:
if clip['tag'] not in clips_:
clips_[clip['tag']] = []
clips_[clip['tag']].append(clip)
return clips_
def sequence(seq, letter):
tags = KEYWORDS[letter]
clips = {tag: get_clips(tag) for tag in tags}
result = {
'clips': [],
'text': [],
}
duration = 0
MAX_DURATION = 65 * 2
MIN_DURATION = 56 * 2
while duration < MAX_DURATION and not duration >= MIN_DURATION:
# clip duration: 1-10
n = seq()
if n == 0:
n = 10
max_duration = MAX_DURATION - duration
clips_n = filter_clips(clips, n, max_duration)
tags_n = list(sorted(clips_n.keys()))
if not tags_n:
print('NO tags for', letter, n)
tag = random_choice(seq, tags_n)
#if 'tiger' in tags_n:
# tag = 'tiger'
clip = random_choice(seq, clips_n[tag])
duration += clip['duration']
result['clips'].append(clip.copy())
for clip in result['clips']:
if seq() == 0:
clip['black'] = True
position = last_text = 0
while position < duration:
n = seq()
if n == 0:
blank = {'blank': True, 'duration': position - last_text}
result['text'].append(blank)
n = seq()
if n == 0:
n = 10
n = min(n, duration-position)
text = {
'text': random_choice(seq, tags_n),
'duration': n
}
result['text'].append(text)
position += n
last_text = position
else:
position += n
if last_text < duration:
blank = {'blank': True, 'duration': duration - last_text}
result['text'].append(blank)
return result
if __name__ == '__main__':
for n in range(10):
seq = random(n)
#for letter in string.ascii_uppercase:
for letter in ('T', 'W'):
r = sequence(seq, letter)
tjson = 'output/%s%d.json' % (letter, n)
with open(tjson, 'w') as fd:
json.dump(r, fd, indent=4, sort_keys=True)
print(json.dumps(r, indent=4, sort_keys=True))
#print(sum([c['duration'] for c in r['clips']]))
subprocess.call(['./render_mlt.py', tjson])

97
render_mlt.py Executable file
View file

@ -0,0 +1,97 @@
#!/usr/bin/python
import os
import time
import sys
import json
import mlt
from PyQt5 import QtWidgets
# Avoid segfault in webvfx
app = QtWidgets.QApplication(sys.argv)
mlt.mlt_log_set_level(40) # verbose
mlt.Factory.init()
tractor = mlt.Tractor()
tractor.mark_in = -1
tractor.mark_out = -1
multitrack = tractor.multitrack()
source = sys.argv[1]
target = source.replace('.json', '.xml')
with open(source) as fd:
data = json.load(fd)
video = mlt.Playlist()
overlay = mlt.Playlist()
fps = 60
profile = mlt.Profile("atsc_1080p_%d" % fps)
#profile.set_explicit(1)
# get profile from clip
#clip = mlt.Producer(profile, 'test.mp4')
#profile.from_producer(clip)
def add_color(playlist, color, duration):
red = mlt.Producer(profile, 'color:' + color)
red.set_in_and_out(0, duration)
playlist.append(red)
def add_clip(playlist, file_, in_, duration):
if not isinstance(file_, str):
file_ = file_.encode('utf-8')
clip = mlt.Producer(profile, file_)
clip.set_in_and_out(in_, in_+duration-1)
playlist.append(clip)
def add_blank(playlist, length):
playlist.blank(length)
def add_text(playlist, value, length):
if not isinstance(value, str):
value = value.encode('utf-8')
text = mlt.Producer(profile, 'webvfx:text.html')
text.set('transparent', 1)
text.set('title', value)
text.set('length', length)
playlist.append(text)
for clip in data['clips']:
if clip.get('black'):
# fixme seconds to fps! duration fame etc!!
frames = int(clip['duration'] * fps)
add_color(video, 'black', frames)
else:
print(clip['duration'], clip['path'])
if not os.path.exists(clip['path']):
print(clip['path'], 'is missing')
sys.exit(1)
# fixme seconds to fps!
in_ = int(clip['in'] * fps)
frames = int(clip['duration'] * fps)
add_clip(video, clip['path'], in_, frames)
for clip in data['text']:
if clip.get('blank'):
frames = int(clip['duration'] * fps)
add_blank(overlay, frames)
else:
frames = int(clip['duration'] * fps)
add_text(overlay, clip['text'].upper(), frames)
multitrack.connect(video, 0)
multitrack.connect(overlay, 1)
composite = mlt.Transition(profile, "composite")
#composite.set('fill', 0)
tractor.plant_transition(composite)
consumer = 'xml'
consumer = mlt.Consumer(profile, consumer, target)
consumer.connect(tractor)
#consumer.set("real_time", -2)
consumer.start()