From d523862c9d69fcf0fd02170bc8097c63871da363 Mon Sep 17 00:00:00 2001
From: j <0x006A@0x2620.org>
Date: Tue, 7 Oct 2008 23:07:49 +0200
Subject: [PATCH] use gdata atom feeds
---
README | 2 +-
oxweb/youtube.py | 41 +++++++++++++++++++----------------------
setup.py | 1 -
3 files changed, 20 insertions(+), 24 deletions(-)
diff --git a/README b/README
index 08edf66..166b37a 100644
--- a/README
+++ b/README
@@ -5,7 +5,7 @@ Depends:
python-oxutils
python-beautifulsoup (http://www.crummy.com/software/BeautifulSoup/)
python-feedparser (http://www.feedparser.org/)
-
+ (there seam to be some issues if not using the one from ubuntu/debian)
Test:
nosetests --with-doctest oxweb
diff --git a/oxweb/youtube.py b/oxweb/youtube.py
index 4595498..31e388e 100644
--- a/oxweb/youtube.py
+++ b/oxweb/youtube.py
@@ -24,13 +24,6 @@ def getVideoKey(youtubeId):
return re.match(".*[?&]t=([^&]+)", location).groups()[0]
else:
return False
-
-def getVideoKeyLegacyAPI(videoId):
- url = 'http://www.youtube.com/api2_rest?method=youtube.videos.get_video_token&video_id=' + youtubeId
- data = getUrl(url)
- xml = ET.fromstring(data)
- youtubeKey = xml.find('t').text
- return youtubeKey
def getVideoUrl(youtubeId, format='mp4'):
youtubeKey = getVideoKey(youtubeId)
@@ -41,47 +34,50 @@ def getVideoUrl(youtubeId, format='mp4'):
url = "http://youtube.com/get_video.php?video_id=%s&t=%s" % (youtubeId, youtubeKey)
return url
-'''
-def getMovieInfo(youtubeId):
- url = "http://gdata.youtube.com/feeds/api/videos/%s " % youtubeId
+def getMovieInfo(youtubeId, video_url_base=None):
+ url = "http://gdata.youtube.com/feeds/api/videos/%s" % youtubeId
data = getUrl(url)
fd = feedparser.parse(data)
- return getInfoFromAtom(fd.entries[0])
+ return getInfoFromAtom(fd.entries[0], video_url_base)
-def getInfoFromAtom(entry):
+def getInfoFromAtom(entry, video_url_base=None):
info = dict()
info['title'] = entry['title']
info['description'] = entry['description']
info['author'] = entry['author']
- info['published'] = entry['published_parsed']
+ #info['published'] = entry['published_parsed']
info['keywords'] = entry['media_keywords'].split(', ')
info['url'] = entry['links'][0]['href']
info['id'] = findString(info['url'], "/watch?v=")
info['thumbnail'] = "http://img.youtube.com/vi/%s/0.jpg" % info['id']
- info['flv'] = getVideoUrl(info['id'], 'flv')
- info['mp4'] = getVideoUrl(info['id'], 'mp4')
+ if video_url_base:
+ info['flv'] = "%s/%s.%s" % (video_url_base, info['id'], 'flv')
+ info['mp4'] = "%s/%s.%s" % (video_url_base, info['id'], 'mp4')
+ else:
+ info['flv'] = getVideoUrl(info['id'], 'flv')
+ info['mp4'] = getVideoUrl(info['id'], 'mp4')
info['embed'] = '' % (info['id'], info['id'])
return info
-def find(query, max_results=10, offset=1, orderBy='relevance'):
+def find(query, max_results=10, offset=1, orderBy='relevance', video_url_base=None):
query = quote(query)
- url = "http://gdata.youtube.com/feeds/api/videos?vq=%s&orderby=%s&start-index=%s&max-results=%s"%(query, orderBy, offset, max_results)
- data = getUrl(url)
+ url = "http://gdata.youtube.com/feeds/api/videos?vq=%s&orderby=%s&start-index=%s&max-results=%s" % (query, orderBy, offset, max_results)
+ data = getUrlUnicode(url)
fd = feedparser.parse(data)
videos = []
for entry in fd.entries:
- v = getInfoFromAtom(entry)
+ v = getInfoFromAtom(entry, video_url_base)
videos.append(v)
if len(videos) >= max_results:
return videos
return videos
-'''
+'''
def find(query, max_results=10, offset=1, orderBy='relevance', video_url_base=None):
url = "http://youtube.com/results?search_query=%s&search=Search" % quote(query)
data = getUrlUnicode(url)
- regx = re.compile(''' ''')
+ regx = re.compile(' ')
id_title = regx.findall(data)
data_flat = data.replace('\n', ' ')
videos = {}
@@ -102,4 +98,5 @@ def find(query, max_results=10, offset=1, orderBy='relevance', video_url_base=No
if len(videos) >= max_results:
return videos.values()
return videos.values()
+'''
diff --git a/setup.py b/setup.py
index e4a39fe..00f07e1 100644
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,6 @@ setup(
zip_safe=False,
install_requires=[
'oxlib',
- 'feedparser',
'beautifulsoup',
],
keywords = [