use extract_frame from pad.ma

This commit is contained in:
j 2008-07-03 18:40:06 +02:00
parent ba183e2dfd
commit 5f9a4ab588
2 changed files with 154 additions and 164 deletions

View File

@ -169,7 +169,7 @@ def extract_still(movie_file, png_file, inpoint):
inpoint = time2ms(inpoint)
extractClipScript = abspath(join(dirname(__file__), "tools/extract_frame.py"))
cmd = '''%s "%s" "%s" %s 0 -1''' % (extractClipScript, movie_file, png_file, inpoint)
run_command(cmd.encode('utf-8'))
run_command(cmd.encode('utf-8'), 100)
def extract_poster_still(movie_file, png_file, inpoint):
ext = movie_file.split('.')[-1]

View File

@ -1,7 +1,7 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vi:si:et:sw=2:sts=2:ts=2
# GPL written 2008 by j@pad.ma
# vi:si:et:sw=4:sts=4:ts=4
# GPL 2008
import gobject
gobject.threads_init()
@ -14,187 +14,177 @@ import Image
DEBUG=0
class ExtractFrame:
video = None
audio = None
info = {}
frame_img = None
frame_size = None
duration = 0
height = 0
width = 128
def __init__(self, videofile, frame, frame_pos, width=128, height=0):
self.width = width
self.height = height
self.frame_file = frame
self.frame_pos = frame_pos
self.mainloop = gobject.MainLoop()
self.pipeline = gst.parse_launch('filesrc name=input ! decodebin name=dbin')
self.input = self.pipeline.get_by_name('input')
self.input.props.location = videofile
self.dbin = self.pipeline.get_by_name('dbin')
video = None
audio = None
info = {}
frame_img = None
frame_size = None
duration = 0
height = 0
width = 128
def __init__(self, videofile, frame, frame_pos, width=128):
self.width = width
self.frame_file = frame
self.frame_pos = frame_pos
self.mainloop = gobject.MainLoop()
self.pipeline = gst.parse_launch('filesrc name=input ! decodebin name=dbin')
self.input = self.pipeline.get_by_name('input')
self.input.props.location = videofile
self.dbin = self.pipeline.get_by_name('dbin')
self.bus = self.pipeline.get_bus()
self.dbin.connect('new-decoded-pad', self.demux_pad_added)
self.bus = self.pipeline.get_bus()
self.dbin.connect('new-decoded-pad', self.demux_pad_added)
self.bus.add_signal_watch()
self.watch_id = self.bus.connect("message", self.onBusMessage)
self.bus.add_signal_watch()
self.watch_id = self.bus.connect("message", self.onBusMessage)
self.pipeline.set_state(gst.STATE_PAUSED)
self.pipeline.get_state()
self.pipeline.set_state(gst.STATE_PAUSED)
self.pipeline.get_state()
def run(self):
self.pipeline.set_state(gst.STATE_PLAYING)
self.pipeline.get_state()
def run(self):
self.pipeline.set_state(gst.STATE_PLAYING)
self.pipeline.get_state()
#duration
pads = None
if self.video:
pads = self.video.sink_pads()
elif self.audio:
pads = self.audio.sink_pads()
if pads:
q = gst.query_new_duration(gst.FORMAT_TIME)
for pad in pads:
if pad.get_peer() and pad.get_peer().query(q):
format, self.duration = q.parse_duration()
self.info["duration"] = self.duration/gst.MSECOND
#duration
pads = None
if self.video:
pads = self.video.sink_pads()
elif self.audio:
pads = self.audio.sink_pads()
if pads:
q = gst.query_new_duration(gst.FORMAT_TIME)
for pad in pads:
if pad.get_peer() and pad.get_peer().query(q):
format, self.duration = q.parse_duration()
self.info["duration"] = self.duration/gst.MSECOND
#seek
if self.frame_pos > self.duration:
self.debug('seek point greater than file duration %s' % (self.duration/gst.MSECOND))
return
#seek
if self.frame_pos > self.duration:
self.debug('seek point greater than file duration %s' % (self.duration/gst.MSECOND))
return
if (self.duration - self.frame_pos) < gst.SECOND:
seek_pos = self.duration- 10 * gst.SECOND
else:
seek_pos = self.frame_pos - 10 * gst.SECOND
seek_pos = max(0, seek_pos)
#extract
self.debug('seek tp %s'%seek_pos)
self.seek(seek_pos)
self.debug('get frame tp %s'%seek_pos)
self.frame_ho = self.video.connect("handoff", self.get_frame_cb)
self.pipeline.set_state(gst.STATE_PLAYING)
self.pipeline.get_state()
if (self.duration - self.frame_pos) < gst.SECOND:
seek_pos = self.duration- 10 * gst.SECOND
else:
seek_pos = self.frame_pos - 10 * gst.SECOND
seek_pos = max(0, seek_pos)
#extract
self.debug('seek tp %s'%seek_pos)
self.seek(seek_pos)
self.debug('get frame tp %s'%seek_pos)
self.frame_ho = self.video.connect("handoff", self.get_frame_cb)
self.pipeline.set_state(gst.STATE_PLAYING)
self.pipeline.get_state()
self.mainloop.run()
return self.info
self.mainloop.run()
return self.info
def seek(self, seek_pos):
event = gst.event_new_seek(1.0, gst.FORMAT_TIME,
gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE,
gst.SEEK_TYPE_SET, seek_pos,
gst.SEEK_TYPE_NONE, 0)
def seek(self, seek_pos):
event = gst.event_new_seek(1.0, gst.FORMAT_TIME,
gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE,
gst.SEEK_TYPE_SET, seek_pos,
gst.SEEK_TYPE_NONE, 0)
res = self.video.send_event(event)
if res:
self.pipeline.set_new_stream_time(0L)
else:
gst.error("seek to %r failed" % frame_pos)
res = self.video.send_event(event)
if res:
self.pipeline.set_new_stream_time(0L)
else:
gst.error("seek to %r failed" % frame_pos)
def get_frame_cb(self, sink, frame_buffer, pad):
caps = sink.sink_pads().next().get_negotiated_caps()
for s in caps:
input_size = (s['width'], s['height'])
if self.width > 0:
self.frame_size = self.scaleto(s['width'], s['height'])
else:
self.frame_size = None
#Why are the last frames broken, aka have green pixels
save_last_frame = (4*gst.SECOND/float(s['framerate']))
if (self.duration-self.frame_pos) < save_last_frame:
self.frame_pos = self.duration-save_last_frame
position, format = sink.query_position(gst.FORMAT_TIME)
if not self.frame_img or position <= self.frame_pos:
self.frame_img = Image.fromstring('RGB', input_size, frame_buffer)
else:
self.video.disconnect(self.frame_ho)
self.bus.post(gst.message_new_eos(self.pipeline))
def get_frame_cb(self, sink, frame_buffer, pad):
caps = sink.sink_pads().next().get_negotiated_caps()
for s in caps:
input_size = (s['width'], s['height'])
if self.width > 0:
self.frame_size = self.scaleto(s['width'], s['height'])
else:
self.frame_size = None
#Why are the last frames broken, aka have green pixels
save_last_frame = (4*gst.SECOND/float(s['framerate']))
if (self.duration-self.frame_pos) < save_last_frame:
self.frame_pos = self.duration-save_last_frame
position, format = sink.query_position(gst.FORMAT_TIME)
if not self.frame_img or position <= self.frame_pos:
self.frame_img = Image.fromstring('RGB', input_size, frame_buffer)
else:
self.video.disconnect(self.frame_ho)
self.bus.post(gst.message_new_eos(self.pipeline))
def scaleto(self, width, height):
if self.width:
height = int(self.width / (float(width) / height))
height = height - height % 2
return (self.width, height)
else:
width = int(self.height * (float(width) / height))
width = width - width % 2
return (width, self.height)
def scaleto(self, width, height):
height = int(self.width / (float(width) / height))
height = height - height % 2
self.height = height
return (self.width, height)
def get_audio_info_cb(self, sink, buffer, pad):
caps = sink.sink_pads().next().get_negotiated_caps()
for s in caps:
self.info["channels"] = s['channels']
self.info["samplerate"] = s['rate']
self.audio.disconnect(self.audio_cb)
def get_audio_info_cb(self, sink, buffer, pad):
caps = sink.sink_pads().next().get_negotiated_caps()
for s in caps:
self.info["channels"] = s['channels']
self.info["samplerate"] = s['rate']
self.audio.disconnect(self.audio_cb)
def get_frame_info_cb(self, sink, buffer, pad):
caps = sink.sink_pads().next().get_negotiated_caps()
for s in caps:
self.info["width"] = s['width']
self.info["height"] = s['height']
self.info["framerate"] = float(s['framerate'])
self.video.disconnect(self.video_cb)
def get_frame_info_cb(self, sink, buffer, pad):
caps = sink.sink_pads().next().get_negotiated_caps()
for s in caps:
self.info["width"] = s['width']
self.info["height"] = s['height']
self.info["framerate"] = float(s['framerate'])
self.info["pixel-aspect-ratio"] = "%d:%d" % (s['pixel-aspect-ratio'].num, s['pixel-aspect-ratio'].denom)
self.video.disconnect(self.video_cb)
def demux_pad_added(self, element, pad, bool):
caps = pad.get_caps()
structure = caps[0]
stream_type = structure.get_name()
if stream_type.startswith('video'):
colorspace = gst.element_factory_make("ffmpegcolorspace");
self.pipeline.add (colorspace);
colorspace.set_state (gst.STATE_PLAYING);
pad.link (colorspace.get_pad("sink"));
def demux_pad_added(self, element, pad, bool):
caps = pad.get_caps()
structure = caps[0]
stream_type = structure.get_name()
if stream_type.startswith('video'):
colorspace = gst.element_factory_make("ffmpegcolorspace");
self.pipeline.add (colorspace);
colorspace.set_state (gst.STATE_PLAYING);
pad.link (colorspace.get_pad("sink"));
self.video = gst.element_factory_make("fakesink")
self.video.props.signal_handoffs = True
self.pipeline.add(self.video)
self.video.set_state (gst.STATE_PLAYING);
colorspace.link (self.video, gst.caps_from_string('video/x-raw-rgb'));
self.video_cb = self.video.connect("handoff", self.get_frame_info_cb)
elif stream_type.startswith('audio'):
self.audio = gst.element_factory_make("fakesink")
self.audio.props.signal_handoffs = True
self.pipeline.add(self.audio)
self.audio.set_state (gst.STATE_PLAYING);
pad.link(self.audio.get_pad('sink'))
self.audio_cb = self.audio.connect("handoff", self.get_audio_info_cb)
self.video = gst.element_factory_make("fakesink")
self.video.props.signal_handoffs = True
self.pipeline.add(self.video)
self.video.set_state (gst.STATE_PLAYING);
colorspace.link (self.video, gst.caps_from_string('video/x-raw-rgb'));
self.video_cb = self.video.connect("handoff", self.get_frame_info_cb)
elif stream_type.startswith('audio'):
self.audio = gst.element_factory_make("fakesink")
self.audio.props.signal_handoffs = True
self.pipeline.add(self.audio)
self.audio.set_state (gst.STATE_PLAYING);
pad.link(self.audio.get_pad('sink'))
self.audio_cb = self.audio.connect("handoff", self.get_audio_info_cb)
def onBusMessage(self, bus, message):
if message.src == self.pipeline and message.type == gst.MESSAGE_EOS:
self.quit()
def onBusMessage(self, bus, message):
if message.src == self.pipeline and message.type == gst.MESSAGE_EOS:
self.quit()
def quit(self):
if self.frame_img:
if self.frame_size
img = self.frame_img.resize(self.frame_size, Image.ANTIALIAS)
else:
img = self.frame_img
img.save(self.frame_file)
self.debug('frame saved at %s' % self.frame_file)
self.pipeline.set_state(gst.STATE_NULL)
self.pipeline.get_state()
self.mainloop.quit()
def quit(self):
if self.frame_img:
if self.frame_size:
img = self.frame_img.resize(self.frame_size, Image.ANTIALIAS)
else:
img = self.frame_img
img.save(self.frame_file)
self.debug('frame saved at %s' % self.frame_file)
self.pipeline.set_state(gst.STATE_NULL)
self.pipeline.get_state()
self.mainloop.quit()
def debug(self, msg):
if DEBUG:
print msg
def debug(self, msg):
if DEBUG:
print msg
if __name__ == "__main__":
import sys
width = 128
height = 0
inputFile = sys.argv[1]
outputFile = sys.argv[2]
offset = int(float(sys.argv[3]) * gst.MSECOND)
if len(sys.argv) > 4:
width = int(sys.argv[4])
if len(sys.argv) > 5:
height = int(sys.argv[5])
f = ExtractFrame(inputFile, outputFile, offset, width, height)
f.run()
import sys
width = 128
inputFile = sys.argv[1]
outputFile = sys.argv[2]
offset = int(float(sys.argv[3]) * gst.MSECOND)
if len(sys.argv) > 4:
width = int(sys.argv[4])
f = ExtractFrame(inputFile, outputFile, offset, width)
f.run()