use oxtimelines
This commit is contained in:
parent
f09e4205cf
commit
96ec35e51a
19 changed files with 393 additions and 164 deletions
2
README
2
README
|
@ -29,7 +29,7 @@ Get code from bazzar
|
|||
cd /srv/
|
||||
bzr branch http://code.0x2620.org/pandora pandora
|
||||
cd pandora
|
||||
virtualenv .
|
||||
virtualenv --system-site-packages .
|
||||
pip -E . install -r requirements.txt
|
||||
|
||||
cd static
|
||||
|
|
|
@ -16,6 +16,7 @@ import numpy as np
|
|||
import Image
|
||||
import ox
|
||||
import ox.image
|
||||
from ox.utils import json
|
||||
|
||||
img_extension='jpg'
|
||||
|
||||
|
@ -303,13 +304,27 @@ def resize_image(image_source, image_output, width=None, size=None):
|
|||
output.save(image_output)
|
||||
|
||||
|
||||
def timeline(video, prefix):
|
||||
cmd = ['oxtimeline', '-i', video, '-o', prefix]
|
||||
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
def timeline(
|
||||
video, prefix,
|
||||
modes=['antialias', 'slitscan', 'keyframes', 'audio', 'data'],
|
||||
#modes=['antialias', 'slitscan', 'audio', 'data'],
|
||||
size=[64, 16]
|
||||
):
|
||||
if isinstance(video, basestring):
|
||||
video = [video]
|
||||
cmd = ['../bin/oxtimelines',
|
||||
'-s', ','.join(map(str, reversed(sorted(size)))),
|
||||
'-m', ','.join(modes),
|
||||
'-o', prefix,
|
||||
'-c', os.path.join(prefix, 'cuts.json'),
|
||||
] + video
|
||||
#p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
print cmd
|
||||
p = subprocess.Popen(cmd)
|
||||
p.wait()
|
||||
|
||||
|
||||
def average_color(prefix, start=0, end=0):
|
||||
def average_color(prefix, start=0, end=0, mode='antialias'):
|
||||
height = 64
|
||||
frames = 0
|
||||
pixels = []
|
||||
|
@ -318,7 +333,9 @@ def average_color(prefix, start=0, end=0):
|
|||
if end:
|
||||
start = int(start * 25)
|
||||
end = int(end * 25)
|
||||
timelines = sorted(filter(lambda t: t!= '%s%sp.png'%(prefix,height), glob("%s%sp*.png"%(prefix, height))))
|
||||
mode = 'timeline' + mode
|
||||
timelines = sorted(filter(lambda t: t!= '%s%s%sp.jpg'%(prefix, mode, height),
|
||||
glob("%s%s%sp*.jpg"%(prefix, mode, height))))
|
||||
for image in timelines:
|
||||
start_offset = 0
|
||||
if start and frames + 1500 <= start:
|
||||
|
@ -352,8 +369,7 @@ def average_color(prefix, start=0, end=0):
|
|||
|
||||
|
||||
def average_volume(prefix, start=0, end=0):
|
||||
#FIXME: actually compute volume
|
||||
return 0
|
||||
return average_color(prefix, start, end, 'audio')[2]
|
||||
|
||||
|
||||
def get_distance(rgb0, rgb1):
|
||||
|
@ -363,31 +379,11 @@ def get_distance(rgb0, rgb1):
|
|||
|
||||
|
||||
def cuts(prefix):
|
||||
cuts = []
|
||||
distances = [0]
|
||||
fps = 25
|
||||
frames = 0
|
||||
height = 64
|
||||
width = 1500
|
||||
pixels = []
|
||||
timelines = sorted(filter(lambda t: t!= '%s%sp.png'%(prefix,height), glob("%s%sp*.png"%(prefix, height))))
|
||||
for image in timelines:
|
||||
timeline = Image.open(image)
|
||||
frames += timeline.size[0]
|
||||
pixels.append(timeline.load())
|
||||
for frame in range(1, frames):
|
||||
x = frame % width
|
||||
distance = 0
|
||||
image0 = int((frame - 1) / width)
|
||||
image1 = int(frame / width)
|
||||
for y in range(height):
|
||||
rgb0 = pixels[image0][(x - 1) % width, y]
|
||||
rgb1 = pixels[image1][x, y]
|
||||
distance += get_distance(rgb0, rgb1)
|
||||
distance = distance / height
|
||||
distances.append(distance)
|
||||
if distance >= 0.025 and abs(distance - distances[frame - 1]) >= 0.05:
|
||||
cuts.append(frame / fps)
|
||||
fname = os.path.join(prefix, 'cuts.json')
|
||||
if not os.path.exists(fname):
|
||||
return []
|
||||
with open(fname) as f:
|
||||
cuts = json.load(f)
|
||||
return cuts
|
||||
|
||||
|
||||
|
|
40
pandora/archive/management/commands/migrate_timelines.py
Normal file
40
pandora/archive/management/commands/migrate_timelines.py
Normal file
|
@ -0,0 +1,40 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
import os
|
||||
import re
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
from django.conf import settings
|
||||
import monkey_patch.models
|
||||
from ... import models
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
"""
|
||||
help = 'migrate timelines to new path'
|
||||
args = ''
|
||||
|
||||
def handle(self, **options):
|
||||
for root, folders, files in os.walk(settings.MEDIA_ROOT):
|
||||
for f in files:
|
||||
f = os.path.join(root, f)
|
||||
base, ext = os.path.splitext(os.path.basename(f))
|
||||
if base.startswith('timeline') and ext == '.png':
|
||||
if base in ('timeline.overview', 'timeline.overview.8'):
|
||||
print 'delete', f
|
||||
os.unlink(f)
|
||||
else:
|
||||
n = re.compile('timeline(\d+)p(\d+)').findall(base)
|
||||
if not n:
|
||||
n = re.compile('timeline(\d+)p').findall(base)
|
||||
target = 'timelineantialias%sp.jpg' % n[0]
|
||||
print f, target
|
||||
target = os.path.join(os.path.dirname(f), target)
|
||||
os.rename(f, target)
|
||||
else:
|
||||
n = tuple(map(int, n[0]))
|
||||
target = 'timelineantialias%dp%d.jpg' % n
|
||||
print f, target
|
||||
target = os.path.join(os.path.dirname(f), target)
|
||||
os.rename(f, target)
|
|
@ -393,9 +393,11 @@ class Stream(models.Model):
|
|||
|
||||
cuts = fields.TupleField(default=[])
|
||||
color = fields.TupleField(default=[])
|
||||
volume = models.FloatField(default=0)
|
||||
|
||||
@property
|
||||
def timeline_prefix(self):
|
||||
return os.path.join(settings.MEDIA_ROOT, self.path())
|
||||
return os.path.join(settings.MEDIA_ROOT, self.path(), 'timeline')
|
||||
|
||||
def name(self):
|
||||
|
@ -438,6 +440,7 @@ class Stream(models.Model):
|
|||
extract.timeline(self.video.path, self.timeline_prefix)
|
||||
self.cuts = tuple(extract.cuts(self.timeline_prefix))
|
||||
self.color = tuple(extract.average_color(self.timeline_prefix))
|
||||
self.volume= extract.average_volume(self.timeline_prefix)
|
||||
self.save()
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
|
|
34
pandora/item/management/commands/rebuild_timelines.py
Normal file
34
pandora/item/management/commands/rebuild_timelines.py
Normal file
|
@ -0,0 +1,34 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
|
||||
import os
|
||||
from os.path import join, dirname, basename, splitext, exists
|
||||
import time
|
||||
|
||||
from django.core.management.base import BaseCommand, CommandError
|
||||
from django.conf import settings
|
||||
|
||||
import monkey_patch.models
|
||||
from ... import models
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
"""
|
||||
rebuild timeline for all items.
|
||||
"""
|
||||
help = 'rebuild all timeines(use after updating oxtimelines)'
|
||||
args = ''
|
||||
|
||||
def handle(self, **options):
|
||||
offset = 0
|
||||
chunk = 50
|
||||
count = pos = models.Item.objects.count()
|
||||
while offset <= count:
|
||||
for i in models.Item.objects.all().order_by('id')[offset:offset+chunk]:
|
||||
print pos, i.itemId
|
||||
for s in i.streams():
|
||||
s.make_timeline()
|
||||
i.update_timeline()
|
||||
pos -= 1
|
||||
offset += chunk
|
||||
time.sleep(30) #keep load down
|
|
@ -16,7 +16,8 @@ class Command(BaseCommand):
|
|||
"""
|
||||
rebuild sort/search cache for all items.
|
||||
"""
|
||||
help = 'listen to rabbitmq and execute encoding tasks.'
|
||||
help = 'rebuild sort/search cache for all items.'
|
||||
"""
|
||||
args = ''
|
||||
|
||||
def handle(self, **options):
|
||||
|
|
|
@ -29,7 +29,7 @@ import ox.image
|
|||
import managers
|
||||
import utils
|
||||
import tasks
|
||||
from .timelines import join_timelines
|
||||
from .timelines import join_tiles
|
||||
from data_api import external_data
|
||||
|
||||
from archive import extract
|
||||
|
@ -900,8 +900,8 @@ class Item(models.Model):
|
|||
def timeline_prefix(self):
|
||||
videos = self.streams()
|
||||
if len(videos) == 1:
|
||||
return os.path.join(settings.MEDIA_ROOT, videos[0].path('timeline'))
|
||||
return os.path.join(settings.MEDIA_ROOT, self.path(), 'timeline')
|
||||
return os.path.join(settings.MEDIA_ROOT, videos[0].path(''))
|
||||
return os.path.join(settings.MEDIA_ROOT, self.path())
|
||||
|
||||
def get_files(self, user):
|
||||
files = self.files.all().select_related()
|
||||
|
@ -1025,19 +1025,20 @@ class Item(models.Model):
|
|||
if streams.count() == 1:
|
||||
self.data['color'] = streams[0].color
|
||||
self.data['cuts'] = streams[0].cuts
|
||||
self.data['volume'] = streams[0].volume
|
||||
else:
|
||||
#self.data['color'] = extract.average_color(self.timeline_prefix)
|
||||
#self.data['cuts'] = extract.cuts(self.timeline_prefix)
|
||||
self.data['cuts'] = []
|
||||
self.data['cuts'] = extract.cuts(self.timeline_prefix)
|
||||
self.data['volume'] = 0
|
||||
offset = 0
|
||||
color = [0, 0, 0]
|
||||
n = streams.count()
|
||||
for s in streams:
|
||||
for c in s.cuts:
|
||||
self.data['cuts'].append(c+offset)
|
||||
self.data['volume'] = s.volume * s.duration
|
||||
color = map(lambda a,b: (a+b)/n, color,ox.image.getRGB(s.color))
|
||||
offset += s.duration
|
||||
self.data['color'] = ox.image.getHSL(color)
|
||||
self.data['volume'] /= offset
|
||||
#extract.timeline_strip(self, self.data['cuts'], stream.info, self.timeline_prefix[:-8])
|
||||
self.select_frame()
|
||||
self.make_poster(True)
|
||||
|
@ -1082,7 +1083,7 @@ class Item(models.Model):
|
|||
streams = self.streams()
|
||||
if streams.count() > 1:
|
||||
timelines = [s.timeline_prefix for s in self.streams()]
|
||||
join_timelines(timelines, self.timeline_prefix)
|
||||
join_tiles(timelines, self.timeline_prefix)
|
||||
else:
|
||||
#remove joined timeline if it was created at some point
|
||||
for f in glob(os.path.join(settings.MEDIA_ROOT, self.path(), 'timeline*.png')):
|
||||
|
|
|
@ -1,114 +1,257 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
|
||||
from __future__ import division, with_statement
|
||||
|
||||
import math
|
||||
import os
|
||||
from glob import glob
|
||||
import re
|
||||
|
||||
import Image
|
||||
|
||||
import ox
|
||||
from utils import sorted_strings
|
||||
from ox.utils import json
|
||||
|
||||
|
||||
def getTiles(timeline_prefix, height=64):
|
||||
files = glob('%s%sp*.png' % (timeline_prefix, height))
|
||||
return sorted_strings(filter(lambda f: f!='%s%sp.png' % (timeline_prefix, height), files))
|
||||
|
||||
def loadTimeline(timeline_prefix, height=64):
|
||||
files = getTiles(timeline_prefix, height)
|
||||
f = Image.open(files[0])
|
||||
width = f.size[0]
|
||||
f = Image.open(files[-1])
|
||||
duration = f.size[0] + (len(files)-1)*width
|
||||
timeline = Image.new("RGB", (duration, height))
|
||||
pos = 0
|
||||
for f in files:
|
||||
part = Image.open(f)
|
||||
timeline.paste(part, (pos, 0, pos + part.size[0], height))
|
||||
pos += part.size[0]
|
||||
return timeline
|
||||
def join_tiles(source_paths, target_path):
|
||||
'''
|
||||
This is an implementation of a join_tiles function for new-style timelines.
|
||||
Timelines of files will be read from source_paths, the timeline of the item will
|
||||
be written to target_path.
|
||||
'''
|
||||
|
||||
def makeTiles(timeline_prefix, height=16, width=3600):
|
||||
files = getTiles(timeline_prefix, 64)
|
||||
def divide(num, by):
|
||||
# divide(100, 3) -> [33, 33, 34]
|
||||
arr = []
|
||||
div = int(num / by)
|
||||
mod = num % by
|
||||
for i in range(int(by)):
|
||||
arr.append(div + (i > by - 1 - mod))
|
||||
return arr
|
||||
|
||||
def get_file_info(file_name):
|
||||
for mode in modes:
|
||||
if re.match('^timeline' + mode + '64p\d+\.jpg', file_name):
|
||||
return {
|
||||
'file': file_name,
|
||||
'mode': mode,
|
||||
'index': int(file_name[11 + len(mode):-4])
|
||||
}
|
||||
return None
|
||||
|
||||
def save_and_open(data):
|
||||
# whenever a large tile is done or needed,
|
||||
# this function saves the previous large tile
|
||||
# (if any) and opens the next one (if any).
|
||||
# in between, whenever required, small tiles
|
||||
# are opened, rendered and saved, and the
|
||||
# large full tile is being generated.
|
||||
# 'keyframes' are only rendered in large size,
|
||||
# 'keyframeswide' only resized to small size.
|
||||
image_mode = 'L' if mode == 'audio' else 'RGB'
|
||||
small_mode = 'keyframes' if mode == 'keyframeswide' else mode
|
||||
large_tile_i = int(target_w / large_tile_w)
|
||||
# save previous large tile
|
||||
if large_tile_i > 0:
|
||||
large_tile_i -= 1
|
||||
if mode != 'keyframeswide':
|
||||
image_file = '%stimeline%s%dp%d.jpg' % (
|
||||
target_path, mode, large_tile_h, large_tile_i
|
||||
)
|
||||
data['target_images']['large'].save(image_file)
|
||||
#print image_file
|
||||
if mode != 'keyframes':
|
||||
# open small tile
|
||||
small_tile_i = int(large_tile_i / 60)
|
||||
small_tile_x = (large_tile_i % 60) * 60
|
||||
if small_tile_x == 0:
|
||||
if small_tile_i < small_tile_n - 1:
|
||||
w = small_tile_w
|
||||
else:
|
||||
w = small_tile_last_w
|
||||
data['target_images']['small'] = Image.new(image_mode, (w, small_tile_h))
|
||||
# paste large tile into small tile
|
||||
w = 60 if large_tile_i < large_tile_n - 1 else small_tile_last_w % 60
|
||||
data['target_images']['large'] = data['target_images']['large'].resize(
|
||||
(w, small_tile_h), Image.ANTIALIAS
|
||||
)
|
||||
data['target_images']['small'].paste(
|
||||
data['target_images']['large'], (small_tile_x, 0)
|
||||
)
|
||||
# save small tile
|
||||
if small_tile_x == small_tile_w - 60 or large_tile_i == large_tile_n - 1:
|
||||
image_file = '%stimeline%s%dp%d.jpg' % (
|
||||
target_path, small_mode, small_tile_h, small_tile_i
|
||||
)
|
||||
data['target_images']['small'].save(image_file)
|
||||
print image_file
|
||||
if mode == 'antialias':
|
||||
# render full tile
|
||||
resized = data['target_images']['large'].resize((
|
||||
data['full_tile_widths'][0], large_tile_h
|
||||
), Image.ANTIALIAS)
|
||||
data['target_images']['full'].paste(resized, (data['full_tile_offset'], 0))
|
||||
data['full_tile_offset'] += data['full_tile_widths'][0]
|
||||
data['full_tile_widths'] = data['full_tile_widths'][1:]
|
||||
large_tile_i += 1
|
||||
# open next large tile
|
||||
if large_tile_i < large_tile_n:
|
||||
w = large_tile_w if large_tile_i < large_tile_n - 1 else large_tile_last_w
|
||||
data['target_images']['large'] = Image.new(image_mode, (w, large_tile_h))
|
||||
|
||||
data = {}
|
||||
fps = 25
|
||||
part_step = 60
|
||||
output_width = width
|
||||
width = len(files) * part_step
|
||||
timeline = Image.new("RGB", (width, height))
|
||||
large_tile_w, large_tile_h = 1500, 64
|
||||
small_tile_w, small_tile_h = 3600, 16
|
||||
full_tile_w = 1920
|
||||
modes = ['antialias', 'slitscan', 'keyframes', 'keyframeswide', 'audio']
|
||||
source_files = {}
|
||||
for mode in modes:
|
||||
source_files[mode] = []
|
||||
|
||||
pos = 0
|
||||
for f in sorted_strings(files):
|
||||
part = Image.open(f)
|
||||
part_width = int(part.size[0] / fps)
|
||||
part = part.resize((part_width, height), Image.ANTIALIAS)
|
||||
timeline.paste(part, (pos, 0, pos+part_width, height))
|
||||
pos += part_width
|
||||
# read files
|
||||
durations = [0] * len(source_paths)
|
||||
frame_n = 0
|
||||
for i, path in enumerate(source_paths):
|
||||
file_info = map(get_file_info, os.listdir(path))
|
||||
file_info = filter(lambda x: x != None, file_info)
|
||||
for info in sorted(file_info, key=lambda x: x['index']):
|
||||
mode = info['mode']
|
||||
source_files[mode].append(path + info['file'])
|
||||
if mode == modes[0]:
|
||||
width = Image.open(source_files[mode][-1]).size[0]
|
||||
durations[i] += width / fps
|
||||
frame_n += width
|
||||
large_tile_n = int(math.ceil(frame_n / large_tile_w))
|
||||
large_tile_last_w = frame_n % large_tile_w
|
||||
small_tile_n = int(math.ceil(frame_n / fps / small_tile_w))
|
||||
small_tile_last_w = int(math.ceil(frame_n / fps)) % small_tile_w
|
||||
|
||||
timeline = timeline.crop((0, 0, pos, height))
|
||||
|
||||
pos = 0
|
||||
i = 0
|
||||
while pos < timeline.size[0]:
|
||||
end = min(pos+output_width, timeline.size[0])
|
||||
timeline.crop((pos, 0, end, timeline.size[1])).save('%s%sp%04d.png' % (timeline_prefix, timeline.size[1], i))
|
||||
pos += output_width
|
||||
i += 1
|
||||
|
||||
def makeTimelineOverview(timeline_prefix, width, inpoint=0, outpoint=0, duration=-1, height=16):
|
||||
input_scale = 25
|
||||
|
||||
timeline_file = '%s%sp.png' % (timeline_prefix, height)
|
||||
if outpoint > 0:
|
||||
timeline_file = '%s%sp.%d-%d.png' % (timeline_prefix, height, inpoint, outpoint)
|
||||
|
||||
timeline = loadTimeline(timeline_prefix)
|
||||
duration = timeline.size[0]
|
||||
|
||||
if inpoint<=0:
|
||||
inpoint = 0
|
||||
# open full timeline
|
||||
if large_tile_n == 1:
|
||||
data['full_tile_widths'] = [large_tile_last_w]
|
||||
else:
|
||||
inpoint = inpoint * input_scale
|
||||
if outpoint<=0:
|
||||
outpoint = duration
|
||||
else:
|
||||
outpoint = outpoint * input_scale
|
||||
w = full_tile_w
|
||||
n = large_tile_n
|
||||
if large_tile_last_w < large_tile_w:
|
||||
factor = full_tile_w / frame_n
|
||||
last_w = int(round(large_tile_last_w * factor))
|
||||
w -= last_w
|
||||
n -= 1
|
||||
data['full_tile_widths'] = divide(w, n)
|
||||
if large_tile_last_w < large_tile_w:
|
||||
data['full_tile_widths'].append(last_w)
|
||||
data['full_tile_offset'] = 0
|
||||
full_tile_image = Image.new('RGB', (full_tile_w, large_tile_h))
|
||||
|
||||
timeline = timeline.crop((inpoint, 0, outpoint, timeline.size[1])).resize((width, height), Image.ANTIALIAS)
|
||||
timeline.save(timeline_file)
|
||||
# main loop
|
||||
data['target_images'] = {'large': None, 'small': None, 'full': full_tile_image}
|
||||
for mode in modes:
|
||||
target_w = 0
|
||||
for source_file in source_files[mode]:
|
||||
source_image = Image.open(source_file)
|
||||
source_w = source_image.size[0]
|
||||
target_x = target_w % large_tile_w
|
||||
if target_x == 0:
|
||||
save_and_open(data)
|
||||
data['target_images']['large'].paste(source_image, (target_x, 0))
|
||||
target_w += source_w
|
||||
if target_x + source_w > large_tile_w:
|
||||
# target tile overflows into next source tile
|
||||
save_and_open(data)
|
||||
target_x -= large_tile_w
|
||||
data['target_images']['large'].paste(source_image, (target_x, 0))
|
||||
target_w += source_w
|
||||
save_and_open(data)
|
||||
|
||||
def join_timelines(timelines, prefix):
|
||||
height = 64
|
||||
width = 1500
|
||||
# save full timelines
|
||||
image_file = '%stimelineantialias%dp.jpg' % (target_path, large_tile_h)
|
||||
data['target_images']['full'].save(image_file)
|
||||
#print image_file
|
||||
image_file = '%stimelineantialias%dp.jpg' % (target_path, small_tile_h)
|
||||
data['target_images']['full'].resize(
|
||||
(full_tile_w, small_tile_h), Image.ANTIALIAS
|
||||
).save(image_file)
|
||||
#print image_file
|
||||
|
||||
ox.makedirs(os.path.dirname(prefix))
|
||||
for f in glob('%s*'%prefix):
|
||||
os.unlink(f)
|
||||
# join cuts
|
||||
cuts = []
|
||||
offset = 0
|
||||
for i, path in enumerate(source_paths):
|
||||
with open(os.path.join(path, 'cuts.json'), 'r') as f:
|
||||
path_cuts = json.load(f)
|
||||
if i > 0:
|
||||
cuts.append(offset)
|
||||
for cut in path_cuts:
|
||||
cuts.append(offset + cut)
|
||||
offset += durations[i]
|
||||
with open(os.path.join(target_path, 'cuts.json'), 'w') as f:
|
||||
# avoid float rounding artefacts
|
||||
f.write('[' + ', '.join(map(lambda x: '%.2f' % x, cuts)) + ']')
|
||||
|
||||
tiles = []
|
||||
for timeline in timelines:
|
||||
tiles += getTiles(timeline, height)
|
||||
def split_tiles(path, paths, durations):
|
||||
|
||||
timeline = Image.new("RGB", (2 * width, height))
|
||||
def is_timeline_file(file_name):
|
||||
return file_name.startswith('timeline') and file_name.endswith('.png')
|
||||
|
||||
pos = 0
|
||||
i = 0
|
||||
for tile in tiles:
|
||||
tile = Image.open(tile)
|
||||
timeline.paste(tile, (pos, 0, pos+tile.size[0], height))
|
||||
pos += tile.size[0]
|
||||
if pos >= width:
|
||||
timeline_name = '%s%sp%04d.png' % (prefix, height, i)
|
||||
timeline.crop((0, 0, width, height)).save(timeline_name)
|
||||
i += 1
|
||||
if pos > width:
|
||||
t = timeline.crop((width, 0, pos, height))
|
||||
timeline.paste(t, (0, 0, t.size[0], height))
|
||||
pos -= width
|
||||
if pos:
|
||||
timeline_name = '%s%sp%04d.png' % (prefix, height, i)
|
||||
timeline.crop((0, 0, pos, height)).save(timeline_name)
|
||||
file_names = filter(is_timeline_file, os.listdir(path))
|
||||
tiles = {}
|
||||
for file_name in file_names:
|
||||
mode = re.split('\d+', file_name[8:])[0]
|
||||
print file_name, mode
|
||||
split = re.split('[a-z]+', file_name[8 + len(mode):-4])
|
||||
height, index = map(lambda x: int(x) if len(x) else -1, split)
|
||||
if not mode in tiles:
|
||||
tiles[mode] = {}
|
||||
if not height in tiles[mode]:
|
||||
tiles[mode][height] = 0
|
||||
if index + 1 > tiles[mode][height]:
|
||||
tiles[mode][height] = index + 1
|
||||
print tiles
|
||||
|
||||
makeTiles(prefix, 16, 3600)
|
||||
makeTimelineOverview(prefix, 1920, height=16)
|
||||
makeTimelineOverview(prefix, 1920, height=64)
|
||||
# for each mode
|
||||
for mode in tiles:
|
||||
image_mode = 'L' if mode == 'audio' else 'RGB'
|
||||
# and for each size of that mode
|
||||
for i, height in enumerate(tiles[mode]):
|
||||
tile_width = 1500 if i == 0 else 3600
|
||||
px_per_sec = 25 if i == 0 else 1
|
||||
target_images = []
|
||||
target_data = []
|
||||
# and for each split item
|
||||
for item_index, duration in enumerate(durations):
|
||||
tile_index = 0
|
||||
px = int(math.ceil(duration * px_per_sec))
|
||||
# create a flat list of all target images
|
||||
# (and store the split item and tile index)
|
||||
while px:
|
||||
width = tile_width if px > tile_width else px
|
||||
target_images.append(
|
||||
Image.new(image_mode, (width, height))
|
||||
)
|
||||
target_data.append(
|
||||
{'item': item_index, 'tile': tile_index}
|
||||
)
|
||||
tile_index += 1
|
||||
px -= width
|
||||
target_index = 0
|
||||
offset = 0
|
||||
# for each source tile
|
||||
for source_index in range(tiles[mode][height]):
|
||||
source_image = Image.open('%stimeline%s%dp%d.png' % (
|
||||
path, mode, height, source_index
|
||||
))
|
||||
source_width = source_image.size[0]
|
||||
target_width = target_images[target_index].size[0]
|
||||
target_images[target_index].paste(source_image, (offset, 0))
|
||||
# paste it into as many target tiles as needed
|
||||
while source_width + offset > target_width:
|
||||
offset -= target_width
|
||||
target_index += 1
|
||||
target_width = target_images[target_index].size[0]
|
||||
target_images[target_index].paste(source_image, (offset, 0))
|
||||
for i, target_image in enumerate(target_images):
|
||||
file_name = '%stimeline%s%dp%d' % (
|
||||
paths[target_data[i]['item']], mode, height, target_data[i]['tile']
|
||||
)
|
||||
# target_image.save(file_name)
|
||||
print file_name, target_image.size
|
||||
|
||||
|
|
|
@ -9,8 +9,8 @@ urlpatterns = patterns("item.views",
|
|||
(r'^(?P<id>[A-Z0-9].*)/(?P<size>\d+)p(?P<position>[\d\.]*)\.jpg$', 'frame'),
|
||||
|
||||
#timelines
|
||||
(r'^(?P<id>[A-Z0-9].*)/timeline(?P<size>\d+)p(?P<position>\d+)\.png$', 'timeline'),
|
||||
(r'^(?P<id>[A-Z0-9].*)/timeline(?P<size>\d+)p\.png$', 'timeline_overview'),
|
||||
(r'^(?P<id>[A-Z0-9].*)/timeline(?P<mode>[a-z]*)(?P<size>\d+)p(?P<position>\d+)\.(?P<format>png|jpg)$', 'timeline'),
|
||||
(r'^(?P<id>[A-Z0-9].*)/timeline(?P<mode>[a-z]*)(?P<size>\d+)p\.(?P<format>png|jpg)$', 'timeline'),
|
||||
|
||||
#video
|
||||
(r'^(?P<id>[A-Z0-9].*)/(?P<resolution>\d+)p(?P<index>\d*)\.(?P<format>webm|ogv|mp4)$', 'video'),
|
||||
|
|
|
@ -713,20 +713,30 @@ def icon(request, id, size=None):
|
|||
response['Cache-Control'] = 'no-cache'
|
||||
return response
|
||||
|
||||
def timeline(request, id, size, position):
|
||||
def timeline(request, id, size, position=-1, format='jpg', mode=None):
|
||||
item = get_object_or_404(models.Item, itemId=id)
|
||||
if not item.access(request.user):
|
||||
return HttpResponseForbidden()
|
||||
timeline = '%s%sp%04d.png' %(item.timeline_prefix, size, int(position))
|
||||
return HttpFileResponse(timeline, content_type='image/png')
|
||||
|
||||
if not mode:
|
||||
mode = 'antialias'
|
||||
modes = [t['id'] for t in settings.CONFIG['timelines']]
|
||||
if mode not in modes:
|
||||
raise Http404
|
||||
modes.pop(modes.index(mode))
|
||||
|
||||
def timeline_overview(request, id, size):
|
||||
item = get_object_or_404(models.Item, itemId=id)
|
||||
if not item.access(request.user):
|
||||
return HttpResponseForbidden()
|
||||
timeline = '%s%sp.png' %(item.timeline_prefix, size)
|
||||
return HttpFileResponse(timeline, content_type='image/png')
|
||||
prefix = os.path.join(item.timeline_prefix, 'timeline')
|
||||
def timeline():
|
||||
timeline = '%s%s%sp' % (prefix, mode, size)
|
||||
if position > -1:
|
||||
timeline += '%d' % int(position)
|
||||
return timeline + '.jpg'
|
||||
|
||||
path = timeline()
|
||||
while modes and not os.path.exists(path):
|
||||
mode = modes.pop()
|
||||
path = timeline()
|
||||
return HttpFileResponse(path, content_type='image/jpeg')
|
||||
|
||||
def torrent(request, id, filename=None):
|
||||
item = get_object_or_404(models.Item, itemId=id)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
-e svn+http://code.djangoproject.com/svn/django/branches/releases/1.3.X#egg=django
|
||||
#South
|
||||
-e bzr+http://code.0x2620.org/python-ox/#egg=python-ox
|
||||
-e bzr+http://code.0x2620.org/oxtimeline/#egg=oxtimeline
|
||||
-e bzr+http://code.0x2620.org/oxtimelines/#egg=oxtimelines
|
||||
simplejson
|
||||
chardet
|
||||
celery>=2.4.2
|
||||
|
|
|
@ -28,12 +28,10 @@ pandora.ui.editor = function(data) {
|
|||
return '/' + pandora.user.ui.item + '/' + pandora.user.ui.videoResolution + 'p' + position + '.jpg';
|
||||
},
|
||||
getLargeTimelineURL: function(type, i) {
|
||||
type = '';
|
||||
return '/' + pandora.user.ui.item + '/timeline' + type + '64p' + i + '.png';
|
||||
return '/' + pandora.user.ui.item + '/timeline' + type + '64p' + i + '.jpg';
|
||||
},
|
||||
getSmallTimelineURL: function(type, i) {
|
||||
type = '';
|
||||
return '/' + pandora.user.ui.item + '/timeline' + type + '16p' + i + '.png';
|
||||
return '/' + pandora.user.ui.item + '/timeline' + type + '16p' + i + '.jpg';
|
||||
},
|
||||
height: pandora.$ui.contentPanel.size(1),
|
||||
id: 'editor',
|
||||
|
|
|
@ -309,8 +309,7 @@ pandora.ui.list = function() {
|
|||
duration: data.duration,
|
||||
find: isClipsQuery ? clipsQuery.conditions[0].value : '',
|
||||
getImageURL: function(type, i) {
|
||||
type = '';
|
||||
return '/' + data.id + '/timeline' + type + '16p' + i + '.png';
|
||||
return '/' + data.id + '/timeline' + type + '16p' + i + '.jpg';
|
||||
},
|
||||
position: pandora.user.ui.videoPoints[data.id]
|
||||
? pandora.user.ui.videoPoints[data.id].position : 0,
|
||||
|
|
|
@ -23,8 +23,7 @@ pandora.ui.player = function(data) {
|
|||
enableSubtitles: pandora.user.ui.videoSubtitles,
|
||||
find: pandora.user.ui.itemFind,
|
||||
getLargeTimelineURL: function(type, i) {
|
||||
type = '';
|
||||
return '/' + pandora.user.ui.item + '/timeline' + type + '64p' + i + '.png';
|
||||
return '/' + pandora.user.ui.item + '/timeline' + type + '64p' + i + '.jpg';
|
||||
},
|
||||
height: pandora.$ui.contentPanel.size(1),
|
||||
'in': pandora.user.ui.videoPoints[pandora.user.ui.item]['in'],
|
||||
|
@ -43,7 +42,7 @@ pandora.ui.player = function(data) {
|
|||
showLayers: Ox.clone(pandora.user.ui.showLayers),
|
||||
showUsers: pandora.site.annotations.showUsers,
|
||||
showTimeline: pandora.user.ui.showTimeline,
|
||||
smallTimelineURL: '/' + pandora.user.ui.item + '/timeline16p.png',
|
||||
smallTimelineURL: '/' + pandora.user.ui.item + '/timeline16p.jpg',
|
||||
subtitles: data.subtitles,
|
||||
timeline: pandora.user.ui.videoTimeline,
|
||||
tooltips: true,
|
||||
|
@ -136,4 +135,4 @@ pandora.ui.player = function(data) {
|
|||
}
|
||||
});
|
||||
|
||||
};
|
||||
};
|
||||
|
|
|
@ -24,8 +24,7 @@ pandora.ui.timeline = function(data) {
|
|||
return '/' + ui.item + '/' + ui.videoResolution + 'p' + position + '.jpg';
|
||||
},
|
||||
getLargeTimelineURL: function(type, i) {
|
||||
type = '';
|
||||
return '/' + ui.item + '/timeline' + type + '64p' + i + '.png';
|
||||
return '/' + ui.item + '/timeline' + type + '64p' + i + '.jpg';
|
||||
},
|
||||
height: pandora.$ui.contentPanel.size(1),
|
||||
layers: data.annotations,
|
||||
|
@ -40,7 +39,7 @@ pandora.ui.timeline = function(data) {
|
|||
showAnnotationsMap: ui.showAnnotationsMap,
|
||||
showLayers: Ox.clone(ui.showLayers),
|
||||
showUsers: pandora.site.annotations.showUsers,
|
||||
smallTimelineURL: '/' + ui.item + '/timeline16p.png',
|
||||
smallTimelineURL: '/' + ui.item + '/timeline16p.jpg',
|
||||
timeline: ui.videoTimeline,
|
||||
timelines: pandora.site.timelines,
|
||||
video: data.video,
|
||||
|
@ -110,4 +109,4 @@ pandora.ui.timeline = function(data) {
|
|||
}
|
||||
});
|
||||
|
||||
};
|
||||
};
|
||||
|
|
|
@ -76,7 +76,7 @@ pandora.ui.tv = function() {
|
|||
scaleToFill: pandora.user.ui.videoScale == 'fill',
|
||||
subtitles: videoOptions.subtitles,
|
||||
tooltips: true,
|
||||
timeline: '/' + result.data.item + '/timeline16p.png',
|
||||
timeline: '/' + result.data.item + '/timeline16p.jpg',
|
||||
title: pandora.site.site.name + ' — ' + (
|
||||
list || 'All ' + pandora.site.itemName.plural
|
||||
) + ' — '
|
||||
|
|
|
@ -21,7 +21,7 @@ pandora.ui.videoPreview = function(data) {
|
|||
height: data.height,
|
||||
position: data.position,
|
||||
scaleToFill: true,
|
||||
timeline: '/' + data.id + '/timeline16p.png',
|
||||
timeline: '/' + data.id + '/timeline16p.jpg',
|
||||
width: data.width
|
||||
});
|
||||
return that;
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
pandora_repos=http://code.0x2620.org/pandora/
|
||||
oxjs_repos=http://code.0x2620.org/oxjs/
|
||||
python_ox_repos=http://code.0x2620.org/python-ox/
|
||||
oxtimelines_repos=http://code.0x2620.org/oxtimelines/
|
||||
|
||||
cd `dirname $0`
|
||||
base=`pwd`
|
||||
|
@ -29,6 +30,11 @@ if [ -e src/python-ox ]; then
|
|||
new=$new`bzr revno`
|
||||
fi
|
||||
cd $base
|
||||
if [ -e src/oxtimelines ]; then
|
||||
cd src/oxtimelines
|
||||
bzr pull $oxtimelines_repos
|
||||
fi
|
||||
cd $base
|
||||
if [ $current -ne $new ]; then
|
||||
cd pandora
|
||||
./manage.py update_static
|
||||
|
|
|
@ -2,6 +2,6 @@
|
|||
CH="chroot $1"
|
||||
$CH bzr branch http://code.0x2620.org/pandora /srv/pandora
|
||||
$CH bzr branch http://code.0x2620.org/oxjs /srv/pandora/static/oxjs
|
||||
$CH virtualenv /srv/pandora
|
||||
$CH virtualenv --system-site-packages /srv/pandora
|
||||
$CH pip -E /srv/pandora install -r /srv/pandora/requirements.txt
|
||||
|
||||
|
|
Loading…
Reference in a new issue