56 lines
2.2 KiB
Python
56 lines
2.2 KiB
Python
# -*- Mode: Python; -*-
|
|
# -*- coding: utf-8 -*-
|
|
# vi:si:et:sw=2:sts=2:ts=2
|
|
from urllib import quote
|
|
import xml.etree.ElementTree as ET
|
|
|
|
import feedparser
|
|
from oxutils.cache import getUrl
|
|
from oxutils import findString
|
|
|
|
def getVideoUrl(youtubeId, format='mp4'):
|
|
url = 'http://www.youtube.com/api2_rest?method=youtube.videos.get_video_token&video_id=' + youtubeId
|
|
data = getUrl(url)
|
|
xml = ET.fromstring(data)
|
|
youtubeKey = xml.find('t').text
|
|
if format == 'mp4':
|
|
fmt=18
|
|
url = "http://youtube.com/get_video.php?video_id=%s&t=%s&fmt=%s"%(youtubeId, youtubeKey, fmt)
|
|
else:
|
|
url = "http://youtube.com/get_video.php?video_id=%s&t=%s"%(youtubeId, youtubeKey)
|
|
return url
|
|
|
|
def getMovieInfo(youtubeId):
|
|
url = "http://gdata.youtube.com/feeds/api/videos/%s " % youtubeId
|
|
data = getUrl(url)
|
|
fd = feedparser.parse(data)
|
|
return getInfoFromAtom(fd.entries[0])
|
|
|
|
def getInfoFromAtom(entry):
|
|
info = dict()
|
|
info['title'] = entry['title']
|
|
info['description'] = entry['description']
|
|
info['author'] = entry['author']
|
|
info['published'] = entry['published_parsed']
|
|
info['keywords'] = entry['media_keywords'].split(', ')
|
|
info['url'] = entry['links'][0]['href']
|
|
info['id'] = findString(info['url'], "/watch?v=")
|
|
info['thumbnail'] = "http://img.youtube.com/vi/%s/0.jpg" % info['id']
|
|
info['flv'] = getVideoUrl(info['id'], 'flv')
|
|
info['mp4'] = getVideoUrl(info['id'], 'mp4')
|
|
info['embed'] = '''<object width="425" height="355"><param name="movie" value="http://www.youtube.com/v/%s&hl=en"></param><param name="wmode" value="transparent"></param><embed src="http://www.youtube.com/v/%s&hl=en" type="application/x-shockwave-flash" wmode="transparent" width="425" height="355"></embed></object>''' % (info['id'], info['id'])
|
|
return info
|
|
|
|
def find(query, max_results=10, offset=1, orderBy='relevance'):
|
|
query = quote(query)
|
|
url = "http://gdata.youtube.com/feeds/api/videos?vq=%s&orderby=%s&start-index=%s&max-results=%s"%(query, orderBy, offset, max_results)
|
|
data = getUrl(url)
|
|
fd = feedparser.parse(data)
|
|
videos = []
|
|
for entry in fd.entries:
|
|
v = getInfoFromAtom(entry)
|
|
videos.append(v)
|
|
if len(videos) >= max_results:
|
|
return videos
|
|
return videos
|
|
|