python-oxweb/oxweb/youtube.py

87 lines
3.3 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
2008-06-19 09:47:02 +00:00
# vi:si:et:sw=4:sts=4:ts=4
from urllib import quote
import xml.etree.ElementTree as ET
2008-09-30 13:58:21 +00:00
import re
2008-04-30 14:22:01 +00:00
import feedparser
2008-09-30 13:58:21 +00:00
from oxlib.cache import getUrl, getUrlUnicode
from oxlib import findString, findRe
2008-06-19 09:47:02 +00:00
2008-04-30 14:20:29 +00:00
def getVideoUrl(youtubeId, format='mp4'):
2008-06-19 09:47:02 +00:00
url = 'http://www.youtube.com/api2_rest?method=youtube.videos.get_video_token&video_id=' + youtubeId
data = getUrl(url)
xml = ET.fromstring(data)
youtubeKey = xml.find('t').text
if format == 'mp4':
fmt=18
url = "http://youtube.com/get_video.php?video_id=%s&t=%s&fmt=%s"%(youtubeId, youtubeKey, fmt)
else:
url = "http://youtube.com/get_video.php?video_id=%s&t=%s"%(youtubeId, youtubeKey)
return url
def getMovieInfo(youtubeId):
2008-06-19 09:47:02 +00:00
url = "http://gdata.youtube.com/feeds/api/videos/%s " % youtubeId
data = getUrl(url)
fd = feedparser.parse(data)
return getInfoFromAtom(fd.entries[0])
2008-09-30 13:58:21 +00:00
'''
def getInfoFromAtom(entry):
2008-06-19 09:47:02 +00:00
info = dict()
info['title'] = entry['title']
info['description'] = entry['description']
info['author'] = entry['author']
info['published'] = entry['published_parsed']
info['keywords'] = entry['media_keywords'].split(', ')
info['url'] = entry['links'][0]['href']
info['id'] = findString(info['url'], "/watch?v=")
info['thumbnail'] = "http://img.youtube.com/vi/%s/0.jpg" % info['id']
info['flv'] = getVideoUrl(info['id'], 'flv')
info['mp4'] = getVideoUrl(info['id'], 'mp4')
2008-09-30 13:58:21 +00:00
info['embed'] = '<object width="425" height="355"><param name="movie" value="http://www.youtube.com/v/%s&hl=en"></param><param name="wmode" value="transparent"></param><embed src="http://www.youtube.com/v/%s&hl=en" type="application/x-shockwave-flash" wmode="transparent" width="425" height="355"></embed></object>' % (info['id'], info['id'])
2008-06-19 09:47:02 +00:00
return info
def find(query, max_results=10, offset=1, orderBy='relevance'):
2008-06-19 09:47:02 +00:00
query = quote(query)
url = "http://gdata.youtube.com/feeds/api/videos?vq=%s&orderby=%s&start-index=%s&max-results=%s"%(query, orderBy, offset, max_results)
data = getUrl(url)
fd = feedparser.parse(data)
videos = []
for entry in fd.entries:
v = getInfoFromAtom(entry)
videos.append(v)
if len(videos) >= max_results:
return videos
return videos
2008-09-30 13:58:21 +00:00
'''
def find(query, max_results=10, offset=1, orderBy='relevance', video_url_base=None):
url = "http://youtube.com/results?search_query=%s&search=Search" % quote(query)
data = getUrlUnicode(url)
regx = re.compile(''' <a href="/watch.v=(.*?)" title="(.*?)" ''')
regx = re.compile('''<a href="/watch\?v=(\w*?)" ><img src="(.*?)" class="vimg120" title="(.*?)" alt="video">''')
id_title = regx.findall(data)
data_flat = data.replace('\n', ' ')
videos = {}
for video in id_title:
vid = video[0]
if vid not in videos:
v = dict()
v['id'] = vid
v['link'] = "http//youtube.com/watch.v=%s" % v['id']
v['title'] = video[2].strip()
if video_url_base:
v['video_link'] = "%s/%s" % (video_url_base, v['id'])
else:
v['video_url'] = get_video_url(v['id'])
v['description'] = findRe(data, 'BeginvidDesc%s">(.*?)</span>' % v['id']).strip().replace('<b>', ' ').replace('</b>', '')
v['thumbnail'] = video[1]
videos[vid] = v
if len(videos) >= max_results:
return videos.values()
return videos.values()