add audio timeline, cleanup video timeline

This commit is contained in:
j 2010-11-03 22:20:24 +01:00
parent 399e7d328b
commit 35882f2234
7 changed files with 406 additions and 808 deletions

View file

@ -13,8 +13,9 @@ root = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
if os.path.exists(os.path.join(root, 'oxtimeline')): if os.path.exists(os.path.join(root, 'oxtimeline')):
sys.path.insert(0, root) sys.path.insert(0, root)
import ox
import oxtimeline import oxtimeline
from oxtimeline import audio, video
if __name__ == '__main__': if __name__ == '__main__':
parser = OptionParser() parser = OptionParser()
@ -22,6 +23,7 @@ if __name__ == '__main__':
parser.add_option('-y', '--height', dest='height', help='timeline height, defaults to 64px', default=64, type="int") parser.add_option('-y', '--height', dest='height', help='timeline height, defaults to 64px', default=64, type="int")
parser.add_option('-o', '--prefix', dest='prefix', help='prefix for timeline tiles') parser.add_option('-o', '--prefix', dest='prefix', help='prefix for timeline tiles')
parser.add_option('-i', '--input', dest='input', help='video input') parser.add_option('-i', '--input', dest='input', help='video input')
parser.add_option('-a', '--audio', action="store_true", dest="audio", default=False)
(opts, args) = parser.parse_args() (opts, args) = parser.parse_args()
if None in (opts.prefix, opts.input): if None in (opts.prefix, opts.input):
@ -29,11 +31,13 @@ if __name__ == '__main__':
sys.exit() sys.exit()
opts.input = os.path.abspath(opts.input) opts.input = os.path.abspath(opts.input)
timeline = oxgst.Timeline(opts.input) info = ox.avinfo(opts.input)
timeline.extract(opts.prefix, opts.width, opts.height) if not info['video'] or opts.audio:
audio.Timeline(opts.input, opts.prefix, opts.width, opts.height)
else:
video.Timeline(opts.input, opts.prefix, opts.width, opts.height)
oxtimeline.createTimelineMultiline(opts.prefix) oxtimeline.createTimelineMultiline(opts.prefix)
oxtimeline.makeTiles(opts.prefix, 16, 3600) oxtimeline.makeTiles(opts.prefix, 16, 3600)
oxtimeline.makeTimelineOverview(opts.prefix, 1920, height=16) oxtimeline.makeTimelineOverview(opts.prefix, 1920, height=16)
oxtimeline.makeTimelineOverview(opts.prefix, 1920, height=64) oxtimeline.makeTimelineOverview(opts.prefix, 1920, height=64)

View file

@ -1,6 +1,154 @@
from timeline import * # -*- coding: utf-8 -*-
import video # vi:si:et:sw=4:sts=4:ts=4
import info # GPL 2008-2010
from video import Video import gobject
from info import Info gobject.threads_init()
from glob import glob
import math
import os
import time
import pygst
pygst.require("0.10")
import gst
import Image
import video
import audio
def loadTimeline(timeline_prefix, height=64):
files = sorted(glob('%s.%s.*.png' % (timeline_prefix, height)))
f = Image.open(files[0])
width = f.size[0]
f = Image.open(files[-1])
duration = f.size[0] + (len(files)-1)*width
timeline = Image.new("RGBA", (duration, height))
pos = 0
for f in files:
part = Image.open(f)
timeline.paste(part, (pos, 0, pos + part.size[0], height))
pos += part.size[0]
return timeline
def createTimelineMultiline(timeline_prefix, width=600, height=16):
lineWidth = width
timlelineHeight = height
timeline = loadTimeline(timeline_prefix)
duration = timeline.size[0]
width = duration/25 #one pixel per second
timeline = timeline.resize((width, timlelineHeight), Image.ANTIALIAS).convert('RGBA')
lineHeight = timlelineHeight + 2 * 4
lines = int(math.ceil(width / lineWidth) + 1)
size = (lineWidth, lineHeight * lines)
timelineColor = (64, 64, 64)
i = Image.new("RGBA", size)
#padd end with nothing to fit to grid
t = Image.new("RGBA", (lineWidth * lines, timlelineHeight))
t.paste(timeline, (0, 0))
for currentLine in range(0, lines):
offset = currentLine * lineHeight + 4
toffset = currentLine * lineWidth
try:
tbox = t.crop((toffset, 0, toffset + lineWidth, timlelineHeight))
box = ((0, offset , tbox.size[0], offset + tbox.size[1]))
i.paste(tbox, box)
except:
broken = True
width = lineWidth
if currentLine == lines -1:
width = duration - (lines - 1) * lineWidth
box = ((0, offset , width, offset + timlelineHeight))
i.paste(timelineColor, box)
timeline_file = '%s.overview.png' % (timeline_prefix)
i.save(timeline_file, 'PNG')
timeline8_file = '%s.overview.8.png' % (timeline_prefix)
if lines < 8:
i.save(timeline8_file, 'PNG')
else:
i.crop((0,0,lineWidth, 8 * lineHeight)).save(timeline8_file, 'PNG')
def makeTimelineByFramesPerPixel(timeline_prefix, frames_per_pixel, inpoint=0, outpoint=0, height=16):
pos = 0
input_scale = 25
timeline_file = '%s.pixels.%s.png' % (timeline_prefix, width)
if outpoint > 0:
timeline_file = '%s.pixels.%s.%d-%d.png' % (timeline_prefix, width, inpoint, outpoint)
timeline = loadTimeline(timeline_prefix)
duration = timeline.size[0]
width = duration / frames_per_pixel
if inpoint<=0:
inpoint = 0
else:
inpoint = inpoint * input_scale
if outpoint<=0:
outpoint = pos
else:
outpoint = outpoint * input_scale
timeline = timeline.crop((inpoint, 0, outpoint, timeline.size[1])).resize((width, height), Image.ANTIALIAS)
timeline.save(timeline_file)
def makeTimelineOverview(timeline_prefix, width, inpoint=0, outpoint=0, duration=-1, height=16):
input_scale = 25
timeline_file = '%s.%s.png' % (timeline_prefix, height)
if outpoint > 0:
timeline_file = '%s.overview.%s.%d-%d.png' % (timeline_prefix, height, inpoint, outpoint)
timeline = loadTimeline(timeline_prefix)
duration = timeline.size[0]
if inpoint<=0:
inpoint = 0
else:
inpoint = inpoint * input_scale
if outpoint<=0:
outpoint = duration
else:
outpoint = outpoint * input_scale
timeline = timeline.crop((inpoint, 0, outpoint, timeline.size[1])).resize((width, height), Image.ANTIALIAS)
timeline.save(timeline_file)
def makeTiles(timeline_prefix, height=16, width=3600):
files = glob('%s.64.*.png' % timeline_prefix)
fps = 25
part_step = 60
output_width = width
width = len(files) * part_step
timeline = Image.new("RGBA", (width, height))
pos = 0
for f in sorted(files):
part = Image.open(f)
part_width = int(part.size[0] / fps)
part = part.resize((part_width, height), Image.ANTIALIAS)
timeline.paste(part, (pos, 0, pos+part_width, height))
pos += part_width
timeline = timeline.crop((0, 0, pos, height))
pos = 0
i = 0
while pos < timeline.size[0]:
end = min(pos+output_width, timeline.size[0])
timeline.crop((pos, 0, end, timeline.size[1])).save('%s.%s.%04d.png' % (timeline_prefix, timeline.size[1], i))
pos += output_width
i += 1

148
oxtimeline/audio.py Normal file
View file

@ -0,0 +1,148 @@
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
# GPL 2008-2010
from __future__ import division
import Image
import time
import math
from struct import unpack
import numpy as np
import gobject
import gst
class Audio(gst.Pipeline):
_data = []
position = 0
def __init__(self, uri, samplerate=22050, channels=1):
gst.Pipeline.__init__(self)
self.duration = -1
self.framerate = 25
self.samplerate = samplerate
self.channels = channels
self.uri = uri
self.src = gst.element_factory_make("filesrc")
self.src.props.location = self.uri
self.sbin = gst.element_factory_make("decodebin2")
self.queue = gst.element_factory_make("queue")
self.rate = gst.element_factory_make("audioresample")
self.conv = gst.element_factory_make("audioconvert")
self.sink = gst.element_factory_make("fakesink")
self.sink.props.signal_handoffs = True
self.sink.connect('handoff', self._data_callback)
self.add(self.src, self.sbin, self.conv, self.queue, self.rate, self.sink)
self.src.link(self.sbin)
self.sbin.connect('pad-added', self._sbinPadAddedCb)
self.set_state(gst.STATE_PAUSED)
self.get_state()
self.getDuration()
self.frames = int((float(self.duration) / gst.SECOND) * float(self.framerate))
def _sbinPadAddedCb(self, unused_sbin, pad):
caps = pad.get_caps()
if 'audio' in str(caps):
pad.link(self.queue.get_pad("sink"))
self.queue.link(self.conv)
self.conv.link(self.rate)
self.rate.link(self.sink,
gst.Caps("audio/x-raw-int,rate=%s,channels=%s,width=16,depth=16" %
(self.samplerate, self.channels)))
def _data_callback(self, sink, buff, pad):
timestamp = buff.timestamp
samples = buff.size // 2
fmt = "<" + str(samples) + "h"
data = unpack(fmt, buff.data)
data = self._data + list(data)
samples_per_pixel = self.samplerate / self.framerate
while len(data) > samples_per_pixel:
pixel = data[:samples_per_pixel]
pixel = np.asarray(pixel)
data = data[samples_per_pixel:]
p = np.sum(np.abs(pixel)) / samples_per_pixel
p = p / 256
height = int((p * self.tile_height) / 256) * 2
if p: p += 20
p = (p, p, p, 255)
tile = int(math.floor(float(self.position) / self.input_tile_width))
tilePos = self.position - (tile * self.input_tile_width)
crop = (self.tile_height-height) / 2
for i in range(crop, self.tile_height-crop):
self.tiles[tile].putpixel((tilePos, i), p)
self.position += 1
self._data = data
if self.mainloop and timestamp >= self.duration:
self.done()
def getDuration(self):
if self.duration < 0:
pads = self.sink.sink_pads()
q = gst.query_new_duration(gst.FORMAT_TIME)
for pad in pads:
if pad.get_peer() and pad.get_peer().query(q):
format, self.duration = q.parse_duration()
return self.duration
class Timeline(Audio):
def __init__(self, uri, prefix, width, height):
Audio.__init__(self, uri)
bus = self.get_bus()
bus.add_signal_watch()
self.watch_id = bus.connect("message", self.onBusMessage)
self.mainloop = gobject.MainLoop()
self.tile_width = width
self.tile_height = height
self.prefix = prefix
self.timeline_fps = 25
self.input_tile_width = int(math.ceil((float(self.framerate)/self.timeline_fps) * width))
ntiles = int(math.ceil(float(self.frames)/self.input_tile_width))
self.tiles = []
for i in range(ntiles):
tile = Image.new("RGBA", (self.input_tile_width, height), (0, 0, 0, 0))
self.tiles.append(tile)
self.set_state(gst.STATE_PLAYING)
self.mainloop.run()
for i in range(ntiles):
tile = self.tiles[i]
if tile.size[0] != self.tile_width:
tile = tile.resize((self.tile_width, self.tile_height), Image.ANTIALIAS)
if i < (ntiles-1):
frames = self.input_tile_width
else:
frames = self.frames-((ntiles-1)*self.input_tile_width)
tile_width = int(math.ceil(self.timeline_fps*frames)/float(self.framerate))
if -2 < self.tile_width - tile_width < 2:
tile_width = self.tile_width
tile = tile.crop((0, 0, tile_width, self.tile_height))
filename = "%s.%s.%04d.png" % (self.prefix, self.tile_height, i)
tile.save(filename)
def done(self):
self.mainloop.quit()
def onBusMessage(self, bus, message):
if message.src == self and message.type == gst.MESSAGE_EOS:
self.done()

View file

@ -1,14 +1,15 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4 # vi:si:et:sw=4:sts=4:ts=4
# GPL 2008 # GPL 2008
import gobject import gobject
import pygst import pygst
pygst.require("0.10") pygst.require("0.10")
import gst import gst
import Image import Image
class ImageSink(gst.BaseSink): class ImageSink(gst.BaseSink):
#def log(self, msg): #def log(self, msg):
# print msg # print msg

View file

@ -1,462 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Mode: Python; -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# pitivi/elements/singledecodebin.py
#
# Copyright (c) 2005, Edward Hervey <bilboed@bilboed.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Single-stream queue-less decodebin
"""
import gobject
import gst
def pad_compatible_stream(pad, stream):
"""
Checks whether the given pad is compatible with the given stream.
@param pad: The pad
@type pad: C{gst.Pad}
@param stream: The stream to match against.
@type stream: L{MultimediaStream}
@return: Whether the pad is compatible with the given stream
@rtype: C{bool}
"""
if stream == None:
# yes, None is the magical stream that takes everything
return True
# compatible caps
if stream.caps:
return not stream.caps.intersect(pad.get_caps()).is_empty()
raise Exception("Can't figure out compatibility since the stream doesn't have any caps")
class CachedFactoryList(object):
def __init__(self, factoryFilter=None):
self._factoryFilter = factoryFilter
self._factories = None
self._registry = gst.registry_get_default()
self._registry.connect("feature-added", self._registryFeatureAddedCb)
def get(self):
if self._factories is None:
self._buildFactories()
return self._factories
def _buildFactories(self):
# build the cache
#log.debug("utils", "Getting factories list")
factories = self._registry.get_feature_list(gst.ElementFactory)
if self._factoryFilter is not None:
#log.debug("utils", "filtering")
factories = filter(self._factoryFilter, factories)
#log.debug("utils", "Sorting by rank")
factories.sort(key=lambda factory: factory.get_rank(), reverse=True)
self._factories = factories
#log.debug("utils", "Cached factories is now %r", self._factories)
def _registryFeatureAddedCb(self, registry, feature):
# invalidate the cache
#log.warning("utils", "New feature added, invalidating cached factories")
self._factories = None
def is_raw(caps):
""" returns True if the caps are RAW """
rep = caps.to_string()
valid = ["video/x-raw", "audio/x-raw", "text/plain", "text/x-pango-markup"]
for val in valid:
if rep.startswith(val):
return True
return False
def factoryFilter(factory):
if factory.get_rank() < 64 :
return False
klass = factory.get_klass()
for cat in ("Demuxer", "Decoder", "Parse"):
if cat in klass:
return True
return False
_factoryCache = CachedFactoryList(factoryFilter)
class SingleDecodeBin(gst.Bin):
"""
A variant of decodebin.
* Only outputs one stream
* Doesn't contain any internal queue
"""
QUEUE_SIZE = 1 * gst.SECOND
__gsttemplates__ = (
gst.PadTemplate ("sinkpadtemplate",
gst.PAD_SINK,
gst.PAD_ALWAYS,
gst.caps_new_any()),
gst.PadTemplate ("srcpadtemplate",
gst.PAD_SRC,
gst.PAD_SOMETIMES,
gst.caps_new_any())
)
def __init__(self, caps=None, uri=None, stream=None, *args, **kwargs):
gst.Bin.__init__(self, *args, **kwargs)
if not caps:
caps = gst.caps_new_any()
self.caps = caps
self.stream = stream
self.typefind = gst.element_factory_make("typefind", "internal-typefind")
self.add(self.typefind)
self.uri = uri
if self.uri and gst.uri_is_valid(self.uri):
self.urisrc = gst.element_make_from_uri(gst.URI_SRC, uri, "urisrc")
self.log("created urisrc %s / %r" % (self.urisrc.get_name(),
self.urisrc))
self.add(self.urisrc)
# Set the blocksize to 512kbytes, this will only matter for push-based sources
if hasattr(self.urisrc.props, "blocksize"):
self.urisrc.props.blocksize = 524288
self.urisrc.link_pads_full("src", self.typefind, "sink",
gst.PAD_LINK_CHECK_NOTHING)
else:
self._sinkpad = gst.GhostPad("sink", self.typefind.get_pad("sink"))
self._sinkpad.set_active(True)
self.add_pad(self._sinkpad)
self.typefind.connect("have_type", self._typefindHaveTypeCb)
self._srcpad = None
self._dynamics = []
self._validelements = [] #added elements
self.debug("stream:%r" % self.stream)
self.pending_newsegment = False
self.eventProbeId = None
## internal methods
def _controlDynamicElement(self, element):
self.log("element:%s" % element.get_name())
self._dynamics.append(element)
element.connect("pad-added", self._dynamicPadAddedCb)
element.connect("no-more-pads", self._dynamicNoMorePadsCb)
def _findCompatibleFactory(self, caps):
"""
Returns a list of factories (sorted by rank) which can take caps as
input. Returns empty list if none are compatible
"""
self.debug("caps:%s" % caps.to_string())
res = []
for factory in _factoryCache.get():
for template in factory.get_static_pad_templates():
if template.direction == gst.PAD_SINK:
intersect = caps.intersect(template.static_caps.get())
if not intersect.is_empty():
res.append(factory)
break
self.debug("returning %r" % res)
return res
def _closeLink(self, element):
"""
Inspects element and tries to connect something on the srcpads.
If there are dynamic pads, it sets up a signal handler to
continue autoplugging when they become available.
"""
to_connect = []
dynamic = False
templates = element.get_pad_template_list()
for template in templates:
if not template.direction == gst.PAD_SRC:
continue
if template.presence == gst.PAD_ALWAYS:
pad = element.get_pad(template.name_template)
to_connect.append(pad)
elif template.presence == gst.PAD_SOMETIMES:
pad = element.get_pad(template.name_template)
if pad:
to_connect.append(pad)
else:
dynamic = True
else:
self.log("Template %s is a request pad, ignoring" % pad.name_template)
if dynamic:
self.debug("%s is a dynamic element" % element.get_name())
self._controlDynamicElement(element)
for pad in to_connect:
self._closePadLink(element, pad, pad.get_caps())
def _isDemuxer(self, element):
if not 'Demux' in element.get_factory().get_klass():
return False
potential_src_pads = 0
for template in element.get_pad_template_list():
if template.direction != gst.PAD_SRC:
continue
if template.presence == gst.PAD_REQUEST or \
"%" in template.name_template:
potential_src_pads += 2
break
else:
potential_src_pads += 1
return potential_src_pads > 1
def _plugDecodingQueue(self, pad):
queue = gst.element_factory_make("queue")
queue.props.max_size_time = self.QUEUE_SIZE
queue.props.max_size_buffers = 3
self.add(queue)
queue.sync_state_with_parent()
pad.link_full(queue.get_pad("sink"), gst.PAD_LINK_CHECK_NOTHING)
pad = queue.get_pad("src")
return pad
def _tryToLink1(self, source, pad, factories):
"""
Tries to link one of the factories' element to the given pad.
Returns the element that was successfully linked to the pad.
"""
self.debug("source:%s, pad:%s , factories:%r" % (source.get_name(),
pad.get_name(),
factories))
if self._isDemuxer(source):
pad = self._plugDecodingQueue(pad)
result = None
for factory in factories:
element = factory.create()
if not element:
self.warning("weren't able to create element from %r" % factory)
continue
sinkpad = element.get_pad("sink")
if not sinkpad:
continue
self.add(element)
element.set_state(gst.STATE_READY)
try:
pad.link(sinkpad)
except:
element.set_state(gst.STATE_NULL)
self.remove(element)
continue
self._closeLink(element)
element.set_state(gst.STATE_PAUSED)
result = element
break
return result
def _closePadLink(self, element, pad, caps):
"""
Finds the list of elements that could connect to the pad.
If the pad has the desired caps, it will create a ghostpad.
If no compatible elements could be found, the search will stop.
"""
self.debug("element:%s, pad:%s, caps:%s" % (element.get_name(),
pad.get_name(),
caps.to_string()))
if caps.is_empty():
self.log("unknown type")
return
if caps.is_any():
self.log("type is not know yet, waiting")
return
self.debug("stream %r" % (self.stream))
if caps.intersect(self.caps) and (self.stream is None or
(self.stream.pad_name == get_pad_id(pad))):
# This is the desired caps
if not self._srcpad:
self._wrapUp(element, pad)
elif is_raw(caps) and pad_compatible_stream(pad, self.stream):
self.log ("not the target stream, but compatible")
if not self._srcpad:
self._wrapUp(element, pad)
elif is_raw(caps):
self.log("We hit a raw caps which isn't the wanted one")
# FIXME : recursively remove everything until demux/typefind
else:
# Find something
if len(caps) > 1:
self.log("many possible types, delaying")
return
facts = self._findCompatibleFactory(caps)
if not facts:
self.log("unknown type")
return
self._tryToLink1(element, pad, facts)
def _wrapUp(self, element, pad):
"""
Ghost the given pad of element.
Remove non-used elements.
"""
if self._srcpad:
return
self._markValidElements(element)
gobject.idle_add(self._removeUnusedElements, self.typefind)
if pad.props.caps is not None:
caps = pad.props.caps
else:
caps = pad.get_caps()
self._srcpad = gst.GhostPad("src", pad)
self._srcpad.set_active(True)
if caps.is_fixed():
self._exposePad(target=pad)
else:
self._blockPad(target=pad)
def _exposePad(self, target):
self.log("ghosting pad %s" % target.get_name())
self.add_pad(self._srcpad)
self.post_message(gst.message_new_state_dirty(self))
def _blockPad(self, target):
# don't pass target as an argument to set_blocked_async. Avoids
# triggering a bug in gst-python where pad_block_destroy_data calls
# CPython without acquiring the GIL
self._target = target
self._eventProbeId = target.add_event_probe(self._padEventCb)
self._srcpad.set_blocked_async(True, self._padBlockedCb)
def _unblockPad(self, target):
target.remove_event_probe(self._eventProbeId)
self._eventProbeId = None
self._srcpad.set_blocked_async(False, self._padBlockedCb)
def _padBlockedCb(self, ghost, blocked):
if not blocked:
if self.pending_newsegment is not None:
self._srcpad.push_event(self.pending_newsegment)
self.pending_newsegment = None
return
self._exposePad(target=self._target)
self._unblockPad(target=self._target)
def _padEventCb(self, pad, event):
if event.type == gst.EVENT_TAG:
self.debug("dropping TAG event")
return False
if event.type != gst.EVENT_NEWSEGMENT:
self.warning("first event: %s is not a NEWSEGMENT, bailing out" %
event)
self._exposePad(target=pad)
self._unblockPad(target=pad)
return True
self.debug("stored pending newsegment")
self.pending_newsegment = event
return False
def _markValidElements(self, element):
"""
Mark this element and upstreams as valid
"""
self.log("element:%s" % element.get_name())
if element == self.typefind:
return
self._validelements.append(element)
# find upstream element
pad = list(element.sink_pads())[0]
parent = pad.get_peer().get_parent()
self._markValidElements(parent)
def _removeUnusedElements(self, element):
"""
Remove unused elements connected to srcpad(s) of element
"""
self.log("element:%r" % element)
for pad in list(element.src_pads()):
if pad.is_linked():
peer = pad.get_peer().get_parent()
if isinstance(peer, gst.Element):
self._removeUnusedElements(peer)
if not peer in self._validelements:
self.log("removing %s" % peer.get_name())
pad.unlink(pad.get_peer())
peer.set_state(gst.STATE_NULL)
self.remove(peer)
def _cleanUp(self):
self.log("")
if self._srcpad:
self.remove_pad(self._srcpad)
self._srcpad = None
self._target = None
for element in self._validelements:
element.set_state(gst.STATE_NULL)
self.remove(element)
self._validelements = []
## Overrides
def do_change_state(self, transition):
self.debug("transition:%r" % transition)
res = gst.Bin.do_change_state(self, transition)
if transition == gst.STATE_CHANGE_PAUSED_TO_READY:
self._cleanUp()
return res
## Signal callbacks
def _typefindHaveTypeCb(self, typefind, probability, caps):
self.debug("probability:%d, caps:%s" % (probability, caps.to_string()))
self._closePadLink(typefind, typefind.get_pad("src"), caps)
## Dynamic element Callbacks
def _dynamicPadAddedCb(self, element, pad):
self.log("element:%s, pad:%s" % (element.get_name(), pad.get_name()))
if not self._srcpad:
self._closePadLink(element, pad, pad.get_caps())
def _dynamicNoMorePadsCb(self, element):
self.log("element:%s" % element.get_name())
gobject.type_register(SingleDecodeBin)

View file

@ -1,223 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
# GPL 2008
import gobject
gobject.threads_init()
from glob import glob
import math
import os
import time
import pygst
pygst.require("0.10")
import gst
import Image
from singledecodebin import SingleDecodeBin
from imagesink import ImageSink
from video import Video
class Timeline(Video):
lastPos=0
def __init__(self, uri):
Video.__init__(self, uri, '25/1')
bus = self.get_bus()
bus.add_signal_watch()
self.watch_id = bus.connect("message", self.onBusMessage)
self.mainloop = gobject.MainLoop()
def extract(self, prefix, width, height):
self.tile_width = width
self.tile_height = height
self.prefix = prefix
self.timeline_fps = 25
self.input_tile_width = int(math.ceil((float(self.framerate)/self.timeline_fps) * width))
ntiles = int(math.ceil(float(self.frames)/self.input_tile_width))
self.tiles = []
for i in range(ntiles):
tile = Image.new("RGB", (self.input_tile_width, height))
self.tiles.append(tile)
self.set_state(gst.STATE_PLAYING)
self.mainloop.run()
for i in range(ntiles):
tile = self.tiles[i]
if tile.size[0] != self.tile_width:
tile = tile.resize((self.tile_width, self.tile_height), Image.ANTIALIAS)
if i < (ntiles-1):
frames = self.input_tile_width
else:
frames = self.frames-((ntiles-1)*self.input_tile_width)
tile_width = int(math.ceil(self.timeline_fps*frames)/float(self.framerate))
if -2 < self.tile_width - tile_width < 2:
tile_width = self.tile_width
tile = tile.crop((0, 0, tile_width, self.tile_height))
filename = "%s.%s.%04d.png" % (self.prefix, self.tile_height, i)
tile.save(filename)
def done(self):
self.mainloop.quit()
def _frameCb(self, unused_thsink, frame, timestamp):
self.log("image:%s, timestamp:%s" % (frame, gst.TIME_ARGS(timestamp)))
if not self._ready:
# we know we're prerolled when we get the initial thumbnail
self._ready = True
else:
_framePos = int(math.ceil((float(timestamp) / (gst.SECOND) * float(self.framerate))))
frame = frame.resize((1, self.tile_height), Image.ANTIALIAS)
for framePos in range(self.lastPos, _framePos):
tile = int(math.floor(float(framePos) / self.input_tile_width))
tilePos = framePos - (tile * self.input_tile_width)
for i in range(self.tile_height):
self.tiles[tile].putpixel((tilePos, i), frame.getpixel((0, i)))
self.lastPos = _framePos
if self.mainloop and timestamp >= self.duration:
self.done()
def onBusMessage(self, bus, message):
if message.src == self and message.type == gst.MESSAGE_EOS:
self.done()
def loadTimeline(timeline_prefix, height=64):
files = sorted(glob('%s.%s.*.png' % (timeline_prefix, height)))
f = Image.open(files[0])
width = f.size[0]
f = Image.open(files[-1])
duration = f.size[0] + (len(files)-1)*width
timeline = Image.new("RGB", (duration, height))
pos = 0
for f in files:
part = Image.open(f)
timeline.paste(part, (pos, 0, pos + part.size[0], height))
pos += part.size[0]
return timeline
def createTimelineMultiline(timeline_prefix, width=600, height=16):
lineWidth = width
timlelineHeight = height
timeline = loadTimeline(timeline_prefix)
duration = timeline.size[0]
width = duration/25 #one pixel per second
timeline = timeline.resize((width, timlelineHeight), Image.ANTIALIAS).convert('RGBA')
lineHeight = timlelineHeight + 2 * 4
lines = int(math.ceil(width / lineWidth) + 1)
size = (lineWidth, lineHeight * lines)
timelineColor = (64, 64, 64)
i = Image.new("RGBA", size)
#padd end with nothing to fit to grid
t = Image.new("RGBA", (lineWidth * lines, timlelineHeight))
t.paste(timeline, (0, 0))
for currentLine in range(0, lines):
offset = currentLine * lineHeight + 4
toffset = currentLine * lineWidth
try:
tbox = t.crop((toffset, 0, toffset + lineWidth, timlelineHeight))
box = ((0, offset , tbox.size[0], offset + tbox.size[1]))
i.paste(tbox, box)
except:
broken = True
width = lineWidth
if currentLine == lines -1:
width = duration - (lines - 1) * lineWidth
box = ((0, offset , width, offset + timlelineHeight))
i.paste(timelineColor, box)
timeline_file = '%s.overview.png' % (timeline_prefix)
i.save(timeline_file, 'PNG')
timeline8_file = '%s.overview.8.png' % (timeline_prefix)
if lines < 8:
i.save(timeline8_file, 'PNG')
else:
i.crop((0,0,lineWidth, 8 * lineHeight)).save(timeline8_file, 'PNG')
def makeTimelineByFramesPerPixel(timeline_prefix, frames_per_pixel, inpoint=0, outpoint=0, height=16):
pos = 0
input_scale = 25
timeline_file = '%s.pixels.%s.png' % (timeline_prefix, width)
if outpoint > 0:
timeline_file = '%s.pixels.%s.%d-%d.png' % (timeline_prefix, width, inpoint, outpoint)
timeline = loadTimeline(timeline_prefix)
duration = timeline.size[0]
width = duration / frames_per_pixel
if inpoint<=0:
inpoint = 0
else:
inpoint = inpoint * input_scale
if outpoint<=0:
outpoint = pos
else:
outpoint = outpoint * input_scale
timeline = timeline.crop((inpoint, 0, outpoint, timeline.size[1])).resize((width, height), Image.ANTIALIAS)
timeline.save(timeline_file)
def makeTimelineOverview(timeline_prefix, width, inpoint=0, outpoint=0, duration=-1, height=16):
input_scale = 25
timeline_file = '%s.%s.png' % (timeline_prefix, height)
if outpoint > 0:
timeline_file = '%s.overview.%s.%d-%d.png' % (timeline_prefix, height, inpoint, outpoint)
timeline = loadTimeline(timeline_prefix)
duration = timeline.size[0]
if inpoint<=0:
inpoint = 0
else:
inpoint = inpoint * input_scale
if outpoint<=0:
outpoint = duration
else:
outpoint = outpoint * input_scale
timeline = timeline.crop((inpoint, 0, outpoint, timeline.size[1])).resize((width, height), Image.ANTIALIAS)
timeline.save(timeline_file)
def makeTiles(timeline_prefix, height=16, width=3600):
files = glob('%s.64.*.png' % timeline_prefix)
fps = 25
part_step = 60
output_width = width
width = len(files) * part_step
timeline = Image.new("RGB", (width, height))
pos = 0
for f in sorted(files):
part = Image.open(f)
part_width = int(part.size[0] / fps)
part = part.resize((part_width, height), Image.ANTIALIAS)
timeline.paste(part, (pos, 0, pos+part_width, height))
pos += part_width
timeline = timeline.crop((0, 0, pos, height))
pos = 0
i = 0
while pos < timeline.size[0]:
end = min(pos+output_width, timeline.size[0])
timeline.crop((pos, 0, end, timeline.size[1])).save('%s.%s.%04d.png' % (timeline_prefix, timeline.size[1], i))
pos += output_width
i += 1

View file

@ -1,45 +1,30 @@
#!/usr/bin/python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4 # vi:si:et:sw=4:sts=4:ts=4
# GPL 2008 # GPL 2008-2010
import gobject from __future__ import division
gobject.threads_init()
import pygst
pygst.require("0.10")
import gst
import Image import Image
import time import time
import math
import gobject
import gst
from singledecodebin import SingleDecodeBin
from imagesink import ImageSink from imagesink import ImageSink
class Video(gst.Pipeline): class Video(gst.Pipeline):
#def log(self, msg): duration = -1
# print msg
def __init__(self, uri, framerate='25/1'): def __init__(self, uri, height=64, framerate=gst.Fraction(25, 1)):
gst.Pipeline.__init__(self) gst.Pipeline.__init__(self)
self.height = height
self.framerate = framerate self.framerate = framerate
self.duration = -1
# queue of timestamps
self._queue = []
# queue callbacks
self.callback = {}
# extracted frames
self._frames = {}
# true only if we are prerolled self.src = gst.element_factory_make("filesrc")
self._ready = False self.src.props.location = uri
self.sbin = gst.element_factory_make("decodebin2")
uri = 'file://' + uri
self.log("uri : %s" % uri)
self.uri = uri
self.sbin = SingleDecodeBin(caps=gst.Caps("video/x-raw-rgb;video/x-raw-yuv"),
uri=self.uri)
self.csp = gst.element_factory_make("ffmpegcolorspace") self.csp = gst.element_factory_make("ffmpegcolorspace")
self.scale = gst.element_factory_make("videoscale") self.scale = gst.element_factory_make("videoscale")
self.rate = gst.element_factory_make("videorate") self.rate = gst.element_factory_make("videorate")
@ -48,52 +33,39 @@ class Video(gst.Pipeline):
self.sink = ImageSink() self.sink = ImageSink()
self.sink.connect('frame', self._frameCb) self.sink.connect('frame', self._frameCb)
self.add(self.sbin, self.csp, self.queue, self.scale, self.rate, self.sink) self.add(self.src, self.sbin, self.csp, self.queue, self.scale, self.rate, self.sink)
self.src.link(self.sbin)
self.sbin.connect('pad-added', self._sbinPadAddedCb) self.sbin.connect('pad-added', self._sbinPadAddedCb)
self.set_state(gst.STATE_PAUSED) self.set_state(gst.STATE_PAUSED)
self.get_state() self.get_state()
self.width = self.sink.width
self.height = self.sink.height
self.framerate = self.sink.framerate
self.getDuration() self.getDuration()
self.frames = int((float(self.duration) / gst.SECOND) * float(self.framerate)) self.frames = int((float(self.duration) / gst.SECOND) * float(self.framerate))
def _sbinPadAddedCb(self, unused_sbin, pad): def _sbinPadAddedCb(self, unused_sbin, pad):
self.log("pad : %s" % pad)
pad.link(self.queue.get_pad("sink"))
caps = pad.get_caps() caps = pad.get_caps()
width = caps[0]["width"] if 'video' in str(caps):
height = caps[0]["height"] pad.link(self.queue.get_pad("sink"))
caps = pad.get_caps()
if width % 4:
width += 4 - width % 4
if height % 4:
height += 4 - height % 4
self.queue.link(self.scale)
self.scale.link(self.rate)
self.rate.link(self.csp, gst.Caps("video/x-raw-rgb;video/x-raw-yuv,framerate=%s,width=%s,height=%s" % (self.framerate, width, height)))
self.csp.link(self.sink)
self.queue.link(self.scale)
self.scale.link(self.rate)
height = self.height
if 'width' in caps[0].keys() and isinstance(caps[0]['width'], int):
width = int(caps[0]['width']/caps[0]['height'] * height)
else:
width = int(4/3 * height)
if width % 4:
width += 4 - width % 4
if height % 4:
height += 4 - height % 4
self.rate.link(self.csp, gst.Caps("video/x-raw-rgb;video/x-raw-yuv,framerate=%s/%s,width=%s,height=%s" % (self.framerate.num, self.framerate.denom, width, height)))
self.csp.link(self.sink)
def _frameCb(self, unused_thsink, frame, timestamp): def _frameCb(self, unused_thsink, frame, timestamp):
self.log("image:%s, timestamp:%s" % (frame, gst.TIME_ARGS(timestamp))) pass
if not self._ready:
# we know we're prerolled when we get the initial thumbnail
self._ready = True
elif timestamp in self.callback and self.callback[timestamp]:
self.callback[timestamp](frame, timestamp)
del self.callback[timestamp]
if timestamp in self._queue:
self._queue.remove(timestamp)
if self._queue:
# still some more thumbnails to process
gobject.idle_add(self._getFrame, self._queue.pop(0))
def getDuration(self): def getDuration(self):
if self.duration < 0: if self.duration < 0:
@ -104,59 +76,69 @@ class Video(gst.Pipeline):
format, self.duration = q.parse_duration() format, self.duration = q.parse_duration()
return self.duration return self.duration
def getFrame(self, timestamp, callback): class Timeline(Video):
""" _ready = False
Queue a frame request for the given timestamp, lastPos=0
callback is called once frame is extracted. timeline_fps = 25
returns False if timestamp > duration
"""
self.log("timestamp %s" % gst.TIME_ARGS(timestamp))
if self.duration < 0:
self.getDuration()
if timestamp > self.duration:
self.log("timestamp %s > duration %s" % (timestamp, self.duration))
return False
self.callback[timestamp] = callback def __init__(self, uri, prefix, width, height):
Video.__init__(self, uri, height, gst.Fraction(self.timeline_fps, 1))
if self._queue or not self._ready: bus = self.get_bus()
self.log('ready') bus.add_signal_watch()
self._queue.append(timestamp) self.watch_id = bus.connect("message", self.onBusMessage)
else:
self._queue.append(timestamp)
self._getFrame(timestamp)
return True
def _getFrame(self, timestamp):
if not self._ready:
return
self.log("timestamp : %s" % gst.TIME_ARGS(timestamp))
self.seek(1.0, gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE,
gst.SEEK_TYPE_SET, timestamp,
gst.SEEK_TYPE_NONE, -1)
return False
def frame(self, timestamp):
self.mainloop = gobject.MainLoop() self.mainloop = gobject.MainLoop()
self._frames[timestamp] = None
def callback(frame, timestamp): self.tile_width = width
self._frames[timestamp] = frame self.tile_height = height
self.mainloop.quit() self.prefix = prefix
self._quit = False self.input_tile_width = int(math.ceil((float(self.framerate)/self.timeline_fps) * width))
def quit(): ntiles = int(math.ceil(float(self.frames)/self.input_tile_width))
if self._quit: self.tiles = []
self.mainloop.quit() for i in range(ntiles):
return False tile = Image.new("RGB", (self.input_tile_width, height))
else: self.tiles.append(tile)
self._quit = True
return True self.set_state(gst.STATE_PLAYING)
gobject.timeout_add(1000, quit) self.mainloop.run()
if self.getFrame(timestamp, callback):
self.mainloop.run() for i in range(ntiles):
frame = self._frames[timestamp] tile = self.tiles[i]
del self._frames[timestamp] if tile.size[0] != self.tile_width:
return frame tile = tile.resize((self.tile_width, self.tile_height), Image.ANTIALIAS)
if i < (ntiles-1):
frames = self.input_tile_width
else:
frames = self.frames-((ntiles-1)*self.input_tile_width)
tile_width = int(math.ceil(self.timeline_fps*frames)/float(self.framerate))
if -2 < self.tile_width - tile_width < 2:
tile_width = self.tile_width
tile = tile.crop((0, 0, tile_width, self.tile_height))
filename = "%s.%s.%04d.png" % (self.prefix, self.tile_height, i)
tile.save(filename)
def done(self):
self.mainloop.quit()
def _frameCb(self, unused_thsink, frame, timestamp):
if not self._ready:
# we know we're prerolled when we get the initial thumbnail
self._ready = True
else:
_framePos = int(math.ceil((float(timestamp) / (gst.SECOND) * float(self.framerate))))
frame = frame.resize((1, self.tile_height), Image.ANTIALIAS)
for framePos in range(self.lastPos, _framePos):
tile = int(math.floor(float(framePos) / self.input_tile_width))
tilePos = framePos - (tile * self.input_tile_width)
for i in range(self.tile_height):
self.tiles[tile].putpixel((tilePos, i), frame.getpixel((0, i)))
self.lastPos = _framePos
if self.mainloop and timestamp >= self.duration:
self.done()
def onBusMessage(self, bus, message):
if message.src == self and message.type == gst.MESSAGE_EOS:
self.done()