python-ox/ox/web/youtube.py

116 lines
4.1 KiB
Python
Raw Normal View History

2010-07-07 23:25:57 +00:00
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
2011-12-27 16:29:13 +00:00
from urllib import quote, unquote_plus
2010-07-07 23:25:57 +00:00
import re
2011-12-27 16:29:13 +00:00
from xml.dom.minidom import parseString
2010-07-07 23:25:57 +00:00
import feedparser
from ox.cache import read_url, cache_timeout
2010-07-07 23:25:57 +00:00
2012-08-15 15:15:40 +00:00
def video_url(youtubeId, format='mp4', timeout=cache_timeout):
2010-12-27 13:26:11 +00:00
"""
youtubeId - if of video
format - video format, options: webm, 1080p, 720p, mp4, high
"""
2010-08-12 21:53:33 +00:00
fmt = None
2010-07-07 23:25:57 +00:00
if format == '1080p':
fmt=37
if format == '720p':
fmt=22
elif format == 'mp4':
fmt=18
elif format == 'high':
fmt=35
2011-12-27 16:29:13 +00:00
elif format == 'webm':
streams = videos(youtubeId, 'webm')
return streams[max(streams.keys())]['url']
2010-08-13 12:58:15 +00:00
2011-12-27 16:29:13 +00:00
streams = videos(youtubeId)
if str(fmt) in streams:
return streams[str(fmt)]['url']
2010-07-07 23:25:57 +00:00
2011-12-27 16:29:13 +00:00
def find(query, max_results=10, offset=1, orderBy='relevance'):
2010-07-07 23:25:57 +00:00
query = quote(query)
url = "http://gdata.youtube.com/feeds/api/videos?vq=%s&orderby=%s&start-index=%s&max-results=%s" % (query, orderBy, offset, max_results)
data = read_url(url)
2010-07-07 23:25:57 +00:00
fd = feedparser.parse(data)
videos = []
2011-12-27 16:29:13 +00:00
for item in fd.entries:
id = item['id'].split('/')[-1]
title = item['title']
description = item['description']
videos.append((title, id, description))
2010-07-07 23:25:57 +00:00
if len(videos) >= max_results:
return videos
return videos
2011-12-27 16:29:13 +00:00
def info(id):
info = {}
url = "http://gdata.youtube.com/feeds/api/videos/%s?v=2" % id
data = read_url(url)
2011-12-27 16:29:13 +00:00
xml = parseString(data)
info['url'] = 'http://www.youtube.com/watch?v=%s' % id
info['title'] = xml.getElementsByTagName('title')[0].firstChild.data
info['description'] = xml.getElementsByTagName('media:description')[0].firstChild.data
info['date'] = xml.getElementsByTagName('published')[0].firstChild.data.split('T')[0]
info['author'] = "http://www.youtube.com/user/%s"%xml.getElementsByTagName('name')[0].firstChild.data
info['categories'] = []
for cat in xml.getElementsByTagName('media:category'):
info['categories'].append(cat.firstChild.data)
info['keywords'] = xml.getElementsByTagName('media:keywords')[0].firstChild.data.split(', ')
url = "http://www.youtube.com/watch?v=%s" % id
data = read_url(url)
2011-12-27 16:29:13 +00:00
match = re.compile('<h4>License:</h4>(.*?)</p>', re.DOTALL).findall(data)
if match:
info['license'] = match[0].strip()
info['license'] = re.sub('<.+?>', '', info['license']).strip()
2010-07-07 23:25:57 +00:00
2011-12-27 16:29:13 +00:00
url = "http://www.youtube.com/api/timedtext?hl=en&type=list&tlangs=1&v=%s&asrs=1"%id
data = read_url(url)
2011-12-27 16:29:13 +00:00
xml = parseString(data)
languages = [t.getAttribute('lang_code') for t in xml.getElementsByTagName('track')]
if languages:
info['subtitles'] = {}
for language in languages:
url = "http://www.youtube.com/api/timedtext?hl=en&v=%s&type=track&lang=%s&name&kind"%(id, language)
data = read_url(url)
2011-12-27 16:29:13 +00:00
xml = parseString(data)
subs = []
for t in xml.getElementsByTagName('text'):
start = float(t.getAttribute('start'))
duration = t.getAttribute('dur')
if not duration:
duration = '2'
end = start + float(duration)
text = t.firstChild.data
subs.append({
'start': start,
'end': end,
'value': text,
})
info['subtitles'][language] = subs
return info
def videos(id, format=''):
stream_type = {
'flv': 'video/x-flv',
'webm': 'video/webm',
'mp4': 'video/mp4'
}.get(format)
url = "http://www.youtube.com/watch?v=%s" % id
data = read_url(url)
2011-12-27 16:29:13 +00:00
match = re.compile('"url_encoded_fmt_stream_map": "(.*?)"').findall(data)
streams = {}
for x in match[0].split(','):
stream = {}
for s in x.split('\\u0026'):
key, value = s.split('=')
value = unquote_plus(value)
stream[key] = value
if not stream_type or stream['type'].startswith(stream_type):
streams[stream['itag']] = stream
return streams