From 68f7621ac7c1e11a0dadd8026c9bb54954d1baf1 Mon Sep 17 00:00:00 2001 From: j <0x006A@0x2620.org> Date: Thu, 3 Jul 2008 11:24:49 +0200 Subject: [PATCH] rename oxutils -> oxlib --- oxweb/criterion.py | 6 +++--- oxweb/dailymotion.py | 2 +- oxweb/google.py | 8 ++++---- oxweb/imdb.py | 14 +++++++------- oxweb/impawards.py | 8 ++++---- oxweb/itunes.py | 8 ++++---- oxweb/lyricsfly.py | 6 +++--- oxweb/mininova.py | 10 +++++----- oxweb/opensubtitles.py | 10 +++++----- oxweb/spiegel.py | 22 +++++++++++----------- oxweb/thepiratebay.py | 10 +++++----- oxweb/torrent.py | 2 +- oxweb/wikipedia.py | 4 ++-- oxweb/youtube.py | 4 ++-- setup.py | 4 ++-- 15 files changed, 59 insertions(+), 59 deletions(-) diff --git a/oxweb/criterion.py b/oxweb/criterion.py index d91d637..4585d1c 100644 --- a/oxweb/criterion.py +++ b/oxweb/criterion.py @@ -2,9 +2,9 @@ # vi:si:et:sw=4:sts=4:ts=4 import re -from oxutils.cache import getUrlUnicode -from oxutils.html import stripTags -from oxutils.text import findRe, removeSpecialCharacters +from oxlib.cache import getUrlUnicode +from oxlib.html import stripTags +from oxlib.text import findRe, removeSpecialCharacters import imdb diff --git a/oxweb/dailymotion.py b/oxweb/dailymotion.py index f50b5eb..7f9f3cb 100644 --- a/oxweb/dailymotion.py +++ b/oxweb/dailymotion.py @@ -2,7 +2,7 @@ # vi:si:et:sw=4:sts=4:ts=4 import re from urllib import unquote -from oxutils.cache import getUrl +from oxlib.cache import getUrl def getVideoUrl(url): diff --git a/oxweb/google.py b/oxweb/google.py index 1fa910a..1f0e065 100644 --- a/oxweb/google.py +++ b/oxweb/google.py @@ -8,8 +8,8 @@ import weakref import threading import Queue -import oxutils -from oxutils import stripTags +import oxlib +from oxlib import stripTags ''' @@ -27,9 +27,9 @@ FIXME: how search depper than first page? ''' DEFAULT_MAX_RESULTS = 10 -def getUrl(url, data=None, headers=oxutils.net.DEFAULT_HEADERS): +def getUrl(url, data=None, headers=oxlib.net.DEFAULT_HEADERS): google_timeout=24*60*60 - return oxutils.cache.getUrl(url, data, headers, google_timeout) + return oxlib.cache.getUrl(url, data, headers, google_timeout) def quote_plus(s): return urllib.quote_plus(s.encode('utf-8')) diff --git a/oxweb/imdb.py b/oxweb/imdb.py index 40e7eec..26c07a4 100644 --- a/oxweb/imdb.py +++ b/oxweb/imdb.py @@ -8,11 +8,11 @@ import time from BeautifulSoup import BeautifulSoup import chardet -import oxutils -from oxutils import stripTags, decodeHtml, findRe, findString -from oxutils.cache import getUrl, getUrlUnicode -from oxutils.normalize import normalizeTitle, normalizeImdbId -from oxutils import * +import oxlib +from oxlib import stripTags, decodeHtml, findRe, findString +from oxlib.cache import getUrl, getUrlUnicode +from oxlib.normalize import normalizeTitle, normalizeImdbId +from oxlib import * import google @@ -614,7 +614,7 @@ def guess(title, director=''): return url[28:35] try: - req = urllib2.Request(imdb_url, None, oxutils.net.DEFAULT_HEADERS) + req = urllib2.Request(imdb_url, None, oxlib.net.DEFAULT_HEADERS) u = urllib2.urlopen(req) data = u.read() return_url = u.url @@ -629,7 +629,7 @@ def guess(title, director=''): return imdb_id imdb_url = 'http://www.imdb.com/find?q=%s;s=tt;site=aka' % quote(title.encode('utf-8')) - req = urllib2.Request(imdb_url, None, oxutils.net.DEFAULT_HEADERS) + req = urllib2.Request(imdb_url, None, oxlib.net.DEFAULT_HEADERS) u = urllib2.urlopen(req) data = u.read() return_url = u.url diff --git a/oxweb/impawards.py b/oxweb/impawards.py index 32eb346..e9e3928 100644 --- a/oxweb/impawards.py +++ b/oxweb/impawards.py @@ -2,9 +2,9 @@ # encoding: utf-8 import re -from oxutils.cache import getUrlUnicode -from oxutils.html import stripTags -from oxutils.text import findRe +from oxlib.cache import getUrlUnicode +from oxlib.html import stripTags +from oxlib.text import findRe import imdb @@ -61,7 +61,7 @@ def parsePosterPage(html, year): def archivePosters(): import os - from oxutils.net import getUrl + from oxlib.net import getUrl pathname = '/Volumes/Rolux Home/Desktop/Data/impawards.com' html = getUrlUnicode('http://impawards.com/archives/latest.html', timeout = 0) pages = int(findRe(html, '')) diff --git a/oxweb/itunes.py b/oxweb/itunes.py index 9cd7d99..ffabeff 100644 --- a/oxweb/itunes.py +++ b/oxweb/itunes.py @@ -3,10 +3,10 @@ import re import urllib -from oxutils.cache import getUrl -from oxutils.html import decodeHtml, stripTags -from oxutils.text import findRe -from oxutils.text import findString +from oxlib.cache import getUrl +from oxlib.html import decodeHtml, stripTags +from oxlib.text import findRe +from oxlib.text import findString # to sniff itunes traffic, use something like diff --git a/oxweb/lyricsfly.py b/oxweb/lyricsfly.py index 2b2fe8b..46c9545 100644 --- a/oxweb/lyricsfly.py +++ b/oxweb/lyricsfly.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 -from oxutils.cache import getUrl -from oxutils.html import decodeHtml -from oxutils.text import findRe +from oxlib.cache import getUrl +from oxlib.html import decodeHtml +from oxlib.text import findRe def getLyrics(title, artist): diff --git a/oxweb/mininova.py b/oxweb/mininova.py index 36357e0..816904d 100644 --- a/oxweb/mininova.py +++ b/oxweb/mininova.py @@ -5,10 +5,10 @@ import re import socket from urllib import quote -from oxutils.cache import getUrl, getUrlUnicode -from oxutils import findRe, cache, stripTags, decodeHtml, getTorrentInfo, intValue, normalizeNewlines -from oxutils.normalize import normalizeImdbId -import oxutils +from oxlib.cache import getUrl, getUrlUnicode +from oxlib import findRe, cache, stripTags, decodeHtml, getTorrentInfo, intValue, normalizeNewlines +from oxlib.normalize import normalizeImdbId +import oxlib from torrent import Torrent @@ -55,7 +55,7 @@ def getId(mininovaId): def exists(mininovaId): mininovaId = getId(mininovaId) - data = oxutils.net.getUrl("http://www.mininova.org/tor/%s" % mininovaId) + data = oxlib.net.getUrl("http://www.mininova.org/tor/%s" % mininovaId) if not data or 'Torrent not found...' in data: return False if 'tracker of this torrent requires registration.' in data: diff --git a/oxweb/opensubtitles.py b/oxweb/opensubtitles.py index abb3cee..12ec5a8 100644 --- a/oxweb/opensubtitles.py +++ b/oxweb/opensubtitles.py @@ -3,9 +3,9 @@ import re import feedparser -from oxutils.cache import getUrl, getUrlUnicode -import oxutils -from oxutils.lang import langCode2To3, langTo3Code +from oxlib.cache import getUrl, getUrlUnicode +import oxlib +from oxlib.lang import langCode2To3, langTo3Code def findSubtitlesByImdb(imdb, parts = 1, language = "eng"): if len(language) == 2: @@ -26,7 +26,7 @@ def findSubtitlesByImdb(imdb, parts = 1, language = "eng"): if opensubtitleId: opensubtitleId = opensubtitleId[0] else: - opensubtitleId = oxutils.findRe(data, '/en/subtitles/(.*?)/') + opensubtitleId = oxlib.findRe(data, '/en/subtitles/(.*?)/') return opensubtitleId def downloadSubtitleById(opensubtitle_id): @@ -34,7 +34,7 @@ def downloadSubtitleById(opensubtitle_id): data = getUrl('http://www.opensubtitles.org/en/subtitles/%s' % opensubtitle_id) reg_exp = 'href="(/en/download/file/.*?)">(.*?)' for f in re.compile(reg_exp, re.DOTALL).findall(data): - name = oxutils.stripTags(f[1]).split('\n')[0] + name = oxlib.stripTags(f[1]).split('\n')[0] url = "http://www.opensubtitles.com%s" % f[0] srts[name] = getUrlUnicode(url) return srts diff --git a/oxweb/spiegel.py b/oxweb/spiegel.py index 296c7b0..b345c43 100644 --- a/oxweb/spiegel.py +++ b/oxweb/spiegel.py @@ -6,9 +6,9 @@ import time from BeautifulSoup import BeautifulSoup -import oxutils.cache -from oxutils.html import decodeHtml, stripTags -import oxutils.net +import oxlib.cache +from oxlib.html import decodeHtml, stripTags +import oxlib.net def getNews(year, month, day): @@ -23,9 +23,9 @@ def getNews(year, month, day): for section in sections: url = 'http://www.spiegel.de/%s/0,1518,archiv-%d-%03d,00.html' % (section, year, day) if date == time.strftime('%d.%m.%Y', time.localtime()): - html = oxutils.net.getUrl(url) + html = oxlib.net.getUrl(url) else: - html = oxutils.cache.getUrl(url) + html = oxlib.cache.getUrl(url) for item in re.compile('
', re.DOTALL).findall(item)[0]).strip() try: @@ -102,11 +102,11 @@ def formatSubsection(string): def getIssue(year, week): coverUrl = 'http://www.spiegel.de/static/epaper/SP/%d/%d/ROSPANZ%d%03d0001-312.jpg' % (year, week, year, week) - if not oxutils.net.exists(coverUrl): + if not oxlib.net.exists(coverUrl): return None url = 'http://service.spiegel.de/digas/servlet/epaper?Q=SP&JG=%d&AG=%d&SE=1&AN=INHALT' % (year, week) contents = [] - soup = BeautifulSoup(oxutils.cache.getUrl(url)) + soup = BeautifulSoup(oxlib.cache.getUrl(url)) for item in soup('a', {'href': re.compile('http://service.spiegel.de/digas/servlet/epaper\?Q=SP&JG=')}): item = str(item) page = int(re.compile('&SE=(.*?)"').findall(item)[0]) @@ -116,7 +116,7 @@ def getIssue(year, week): pages = page + 2 for page in range(1, pages + 10): url = 'http://www.spiegel.de/static/epaper/SP/%d/%d/ROSPANZ%d%03d%04d-205.jpg' % (year, week, year, week, page) - if oxutils.cache.exists(url): + if oxlib.cache.exists(url): pageUrl[page] = url else: pageUrl[page] = '' @@ -164,7 +164,7 @@ def archiveIssues(): f.close() filename = '%s/Der Spiegel %d %02d.jpg' % (dirname, y, w) if not os.path.exists(filename): - data = oxutils.cache.getUrl(issue['coverUrl']) + data = oxlib.cache.getUrl(issue['coverUrl']) f = open(filename, 'w') f.write(data) f.close() @@ -173,7 +173,7 @@ def archiveIssues(): if url: filename = '%s/Der Spiegel %d %02d %03d.jpg' % (dirname, y, w, page) if not os.path.exists(filename): - data = oxutils.cache.getUrl(url) + data = oxlib.cache.getUrl(url) f = open(filename, 'w') f.write(data) f.close() @@ -244,7 +244,7 @@ def archiveNews(): f.close() filename = dirname + '/' + new['imageUrl'].split('/')[-1] if not os.path.exists(filename): - data = oxutils.cache.getUrl(new['imageUrl']) + data = oxlib.cache.getUrl(new['imageUrl']) f = open(filename, 'w') f.write(data) f.close() diff --git a/oxweb/thepiratebay.py b/oxweb/thepiratebay.py index e6f52c3..6fea4e4 100644 --- a/oxweb/thepiratebay.py +++ b/oxweb/thepiratebay.py @@ -6,10 +6,10 @@ import socket from urllib import quote, urlencode from urllib2 import URLError -from oxutils.cache import getUrl, getUrlUnicode -from oxutils import findRe, cache, stripTags, decodeHtml, getTorrentInfo, normalizeNewlines -from oxutils.normalize import normalizeImdbId -import oxutils +from oxlib.cache import getUrl, getUrlUnicode +from oxlib import findRe, cache, stripTags, decodeHtml, getTorrentInfo, normalizeNewlines +from oxlib.normalize import normalizeImdbId +import oxlib from torrent import Torrent @@ -63,7 +63,7 @@ def getId(piratebayId): def exists(piratebayId): piratebayId = getId(piratebayId) - return oxutils.net.exists("http://thepiratebay.org/tor/%s" % piratebayId) + return oxlib.net.exists("http://thepiratebay.org/tor/%s" % piratebayId) def getData(piratebayId): _key_map = { diff --git a/oxweb/torrent.py b/oxweb/torrent.py index 51ce3c9..9188716 100644 --- a/oxweb/torrent.py +++ b/oxweb/torrent.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 -from oxutils import intValue +from oxlib import intValue class Torrent(dict): diff --git a/oxweb/wikipedia.py b/oxweb/wikipedia.py index 1d969bf..100c8db 100644 --- a/oxweb/wikipedia.py +++ b/oxweb/wikipedia.py @@ -3,8 +3,8 @@ from urllib import urlencode import simplejson -from oxutils.cache import getUrl, getUrlUnicode -from oxutils import findRe, decodeHtml +from oxlib.cache import getUrl, getUrlUnicode +from oxlib import findRe, decodeHtml def getMovieId(title, director='', year=''): diff --git a/oxweb/youtube.py b/oxweb/youtube.py index c17ebc3..449a607 100644 --- a/oxweb/youtube.py +++ b/oxweb/youtube.py @@ -4,8 +4,8 @@ from urllib import quote import xml.etree.ElementTree as ET import feedparser -from oxutils.cache import getUrl -from oxutils import findString +from oxlib.cache import getUrl +from oxlib import findString def getVideoUrl(youtubeId, format='mp4'): diff --git a/setup.py b/setup.py index 5275875..b9b6df2 100644 --- a/setup.py +++ b/setup.py @@ -11,8 +11,8 @@ setup( description="collection of scrapers for various websites", author="0x", author_email="code@0xdb.org", - url="http://code.0xdb.org/ox", - download_url="http://code.0xdb.org/ox/download", + url="http://code.0xdb.org/oxweb", + download_url="http://code.0xdb.org/oxweb/download", license="GPLv3", packages=find_packages(), zip_safe=False,