use gst.extend.discoverer for oxinfo

This commit is contained in:
j 2009-01-27 15:22:25 +05:30
parent 0f5252e7a7
commit 82ba870e29
2 changed files with 42 additions and 115 deletions

View file

@ -16,18 +16,18 @@ if os.path.exists(os.path.join(root, 'oxgst')):
from oxgst import Info from oxgst import Info
if __name__ == "__main__": if __name__ == "__main__":
parser = OptionParser() parser = OptionParser()
parser.add_option('-f', '--format', dest='format', help='output format: cfg, json, xml default: cfg') parser.add_option('-f', '--format', dest='format', help='output format: cfg, json, xml default: cfg')
(opts, args) = parser.parse_args() (opts, args) = parser.parse_args()
if not args: if not args:
parser.print_help() parser.print_help()
sys.exit() sys.exit(1)
inputFile = args[0] inputFile = args[0]
i = Info(inputFile) if not os.path.exists(inputFile):
info = i.metadata sys.exit(1)
info = Info(inputFile)
if opts.format == 'xml': if opts.format == 'xml':
xml = ET.Element("gstinfo") xml = ET.Element("gstinfo")
el = ET.SubElement(xml, "path") el = ET.SubElement(xml, "path")

View file

@ -9,125 +9,52 @@ import os
import pygst import pygst
pygst.require("0.10") pygst.require("0.10")
import gst import gst
from gst.extend import discoverer
codec_list = { codec_list = {
'MPEG-1 layer 3': 'MPEG-1 layer 3', 'MPEG-1 layer 3': 'MPEG-1 Audio layer 3',
'MPEG-1 layer 3 audio': 'MPEG-1 layer 3', 'MPEG-1 layer 3 audio': 'MPEG-1 Audio layer 3',
'MPEG 1 Audio, Layer 2': 'MPEG-1 Audio layer 2',
'VP6 Flash video': 'VP6', 'VP6 Flash video': 'VP6',
'AC-3 audio': 'AC-3', 'AC-3 audio': 'AC-3',
'Uncompressed 16-bit PCM audio': 'Uncompressed 16-bit PCM', 'Uncompressed 16-bit PCM audio': 'Uncompressed 16-bit PCM',
'Generic DV': 'DV Video', 'Generic DV': 'DV Video',
} }
class Info: class Info(dict):
video_done = True def __init__(self, path):
audio_done = True self['size'] = os.stat(path).st_size
video = None mainloop = gobject.MainLoop()
audio = None def discovered(d, is_media):
metadata = {} if is_media:
tags = {} if d.is_video:
#self['video caps'] = d.videocaps
def __init__(self, videofile): #self['framerate (fps)'] = '%s:%s' % (d.videorate.num, d.videorate.denom)
self.mainloop = gobject.MainLoop() self['width'] = d.videowidth
self.pipeline = gst.parse_launch('filesrc name=input ! decodebin name=dbin') self['height'] = d.videoheight
self.input = self.pipeline.get_by_name('input') self['duration'] = d.videolength/gst.MSECOND
self.input.props.location = videofile self['framerate'] = float(d.videorate)
self.dbin = self.pipeline.get_by_name('dbin') s = d.videocaps[0]
if os.path.exists(videofile):
self.metadata['size'] = os.stat(videofile).st_size
else:
self.metadata['size'] = 0:
if self.metadata['size'] != 0:
self.bus = self.pipeline.get_bus()
self.dbin.connect('new-decoded-pad', self.demux_pad_added)
self.bus.add_signal_watch()
self.watch_id = self.bus.connect("message", self.onBusMessage)
self.pipeline.set_state(gst.STATE_PAUSED)
self.pipeline.get_state()
#duration
pads = None
if self.video:
pads = self.video.sink_pads()
elif self.audio:
pads = self.audio.sink_pads()
if pads:
q = gst.query_new_duration(gst.FORMAT_TIME)
for pad in pads:
if pad.get_peer() and pad.get_peer().query(q):
format, self.duration = q.parse_duration()
self.metadata["duration"] = self.duration/gst.MSECOND
self.mainloop.run()
if 'video-codec' in self.tags:
self.metadata['video-codec'] = codec_list.get(self.tags['video-codec'], self.tags['video-codec'])
if 'audio-codec' in self.tags:
self.metadata['audio-codec'] = codec_list.get(self.tags['audio-codec'], self.tags['audio-codec'])
def get_audio_info_cb(self, sink, buffer, pad):
caps = sink.sink_pads().next().get_negotiated_caps()
for s in caps:
self.metadata["channels"] = s['channels']
self.metadata["samplerate"] = s['rate']
self.audio.disconnect(self.audio_cb)
self.audio_done = True
if self.audio_done and self.video_done:
self.bus.post(gst.message_new_eos(self.pipeline))
def get_frame_info_cb(self, sink, buffer, pad):
caps = sink.sink_pads().next().get_negotiated_caps()
for s in caps:
self.metadata["width"] = s['width']
self.metadata["height"] = s['height']
self.metadata["framerate"] = float(s['framerate'])
if 'pixel-aspect-ratio' in s.keys(): if 'pixel-aspect-ratio' in s.keys():
self.metadata["pixel-aspect-ratio"] = "%d:%d" % (s['pixel-aspect-ratio'].num, s['pixel-aspect-ratio'].denom) self["pixel-aspect-ratio"] = "%d:%d" % (s['pixel-aspect-ratio'].num, s['pixel-aspect-ratio'].denom)
self.video.disconnect(self.video_cb) if d.is_audio:
self.video_done = True #self['audio caps'] = d.audiocaps
if self.audio_done and self.video_done: #self['audio format'] = d.audiofloat and 'floating-point' or 'integer'
self.bus.post(gst.message_new_eos(self.pipeline)) #self['sample width (bits)'] = d.audiowidth
#self['sample depth (bits)'] = d.audiodepth
self['samplerate'] = d.audiorate
self['duration'] = max(self.get('duration', 0), d.audiolength/gst.MSECOND)
self['channels'] = d.audiochannels
def demux_pad_added(self, element, pad, bool): if 'video-codec' in d.tags:
caps = pad.get_caps() self['video-codec'] = codec_list.get(d.tags['video-codec'], d.tags['video-codec'])
structure = caps[0] if 'audio-codec' in d.tags:
stream_type = structure.get_name() self['audio-codec'] = codec_list.get(d.tags['audio-codec'], d.tags['audio-codec'])
if stream_type.startswith('video'): mainloop.quit()
colorspace = gst.element_factory_make("ffmpegcolorspace");
self.pipeline.add (colorspace);
colorspace.set_state (gst.STATE_PLAYING);
pad.link (colorspace.get_pad("sink"));
self.video_done = False
self.video = gst.element_factory_make("fakesink")
self.video.props.signal_handoffs = True
self.pipeline.add(self.video)
self.video.set_state (gst.STATE_PLAYING);
colorspace.link (self.video);
self.video_cb = self.video.connect("handoff", self.get_frame_info_cb)
elif stream_type.startswith('audio'):
self.audio_done = False
self.audio = gst.element_factory_make("fakesink")
self.audio.props.signal_handoffs = True
self.pipeline.add(self.audio)
self.audio.set_state (gst.STATE_PLAYING);
pad.link(self.audio.get_pad('sink'))
self.audio_cb = self.audio.connect("handoff", self.get_audio_info_cb)
def quit(self):
self.pipeline.set_state(gst.STATE_NULL)
self.pipeline.get_state()
self.mainloop.quit()
def onBusMessage(self, bus, message):
if message.type == gst.MESSAGE_TAG:
for key in message.parse_tag().keys():
self.tags[key] = message.structure[key]
if message.type == gst.MESSAGE_ERROR:
self.quit()
if message.src == self.pipeline and message.type == gst.MESSAGE_EOS:
self.quit()
d = discoverer.Discoverer(path)
d.connect('discovered', discovered)
d.discover()
mainloop.run()