rename, use namespaces
This commit is contained in:
parent
208250d863
commit
0d354d2574
15 changed files with 7 additions and 7 deletions
74
ox/torrent/__init__.py
Normal file
74
ox/torrent/__init__.py
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
# GPL 2007-2009
|
||||
|
||||
from threading import Event
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
from bencode import bencode, bdecode
|
||||
|
||||
__all__ = ['createTorrent', 'getInfoHash', 'getTorrentInfoFromFile', 'getTorrentInfo', 'getFiles', 'getTorrentSize']
|
||||
|
||||
def createTorrent(file, url, params = {}, flag = Event(),
|
||||
progress = lambda x: None, progress_percent = 1):
|
||||
"Creates a torrent for a given file, using url as tracker url"
|
||||
from makemetafile import make_meta_file
|
||||
return make_meta_file(file, url, params, flag, progress, progress_percent)
|
||||
|
||||
def getInfoHash(torrentFile):
|
||||
"Returns Torrent Info Hash from torrent file"
|
||||
metainfo_file = open(torrentFile, 'rb')
|
||||
metainfo = bdecode(metainfo_file.read())
|
||||
info = metainfo['info']
|
||||
return hashlib.sha1(bencode(info)).hexdigest()
|
||||
|
||||
def getTorrentInfoFromFile(torrentFile):
|
||||
f = open(torrentFile, 'rb')
|
||||
data = f.read()
|
||||
f.close()
|
||||
tinfo = getTorrentInfo(data)
|
||||
tinfo['timestamp'] = os.stat(torrentFile).st_ctime
|
||||
return tinfo
|
||||
|
||||
def getTorrentInfo(data):
|
||||
"Returns Torrent Info from torrent file"
|
||||
tinfo = {}
|
||||
metainfo = bdecode(data)
|
||||
info = metainfo['info']
|
||||
piece_length = info['piece length']
|
||||
if info.has_key('length'):
|
||||
# let's assume we just have one file
|
||||
file_length = info['length']
|
||||
else:
|
||||
# let's assume we have a directory structure
|
||||
file_length = 0;
|
||||
for f in info['files']:
|
||||
file_length += f['length']
|
||||
for key in info:
|
||||
if key != 'pieces':
|
||||
tinfo[key] = info[key]
|
||||
for key in metainfo:
|
||||
if key != 'info':
|
||||
tinfo[key] = metainfo[key]
|
||||
tinfo['size'] = file_length
|
||||
tinfo['hash'] = hashlib.sha1(bencode(info)).hexdigest()
|
||||
tinfo['announce'] = metainfo['announce']
|
||||
return tinfo
|
||||
|
||||
def getFiles(data):
|
||||
files = []
|
||||
info = getTorrentInfo(data)
|
||||
if 'files' in info:
|
||||
for f in info['files']:
|
||||
path = [info['name'], ]
|
||||
path.extend(f['path'])
|
||||
files.append(os.path.join(*path))
|
||||
else:
|
||||
files.append(info['name'])
|
||||
return files
|
||||
|
||||
def getTorrentSize(torrentFile):
|
||||
"Returns Size of files in torrent file in bytes"
|
||||
return getTorrentInfo(torrentFile)['size']
|
||||
|
||||
320
ox/torrent/bencode.py
Normal file
320
ox/torrent/bencode.py
Normal file
|
|
@ -0,0 +1,320 @@
|
|||
# Written by Petru Paler, Uoti Urpala, Ross Cohen and John Hoffman
|
||||
# see LICENSE.txt for license information
|
||||
|
||||
from types import IntType, LongType, StringType, ListType, TupleType, DictType
|
||||
try:
|
||||
from types import BooleanType
|
||||
except ImportError:
|
||||
BooleanType = None
|
||||
try:
|
||||
from types import UnicodeType
|
||||
except ImportError:
|
||||
UnicodeType = None
|
||||
from cStringIO import StringIO
|
||||
|
||||
def decode_int(x, f):
|
||||
f += 1
|
||||
newf = x.index('e', f)
|
||||
try:
|
||||
n = int(x[f:newf])
|
||||
except:
|
||||
n = long(x[f:newf])
|
||||
if x[f] == '-':
|
||||
if x[f + 1] == '0':
|
||||
raise ValueError
|
||||
elif x[f] == '0' and newf != f+1:
|
||||
raise ValueError
|
||||
return (n, newf+1)
|
||||
|
||||
def decode_string(x, f):
|
||||
colon = x.index(':', f)
|
||||
try:
|
||||
n = int(x[f:colon])
|
||||
except (OverflowError, ValueError):
|
||||
n = long(x[f:colon])
|
||||
if x[f] == '0' and colon != f+1:
|
||||
raise ValueError
|
||||
colon += 1
|
||||
return (x[colon:colon+n], colon+n)
|
||||
|
||||
def decode_unicode(x, f):
|
||||
s, f = decode_string(x, f+1)
|
||||
return (s.decode('UTF-8'),f)
|
||||
|
||||
def decode_list(x, f):
|
||||
r, f = [], f+1
|
||||
while x[f] != 'e':
|
||||
v, f = decode_func[x[f]](x, f)
|
||||
r.append(v)
|
||||
return (r, f + 1)
|
||||
|
||||
def decode_dict(x, f):
|
||||
r, f = {}, f+1
|
||||
lastkey = None
|
||||
while x[f] != 'e':
|
||||
k, f = decode_string(x, f)
|
||||
#why is this needed
|
||||
#if lastkey >= k:
|
||||
# raise ValueError
|
||||
lastkey = k
|
||||
r[k], f = decode_func[x[f]](x, f)
|
||||
return (r, f + 1)
|
||||
|
||||
decode_func = {}
|
||||
decode_func['l'] = decode_list
|
||||
decode_func['d'] = decode_dict
|
||||
decode_func['i'] = decode_int
|
||||
decode_func['0'] = decode_string
|
||||
decode_func['1'] = decode_string
|
||||
decode_func['2'] = decode_string
|
||||
decode_func['3'] = decode_string
|
||||
decode_func['4'] = decode_string
|
||||
decode_func['5'] = decode_string
|
||||
decode_func['6'] = decode_string
|
||||
decode_func['7'] = decode_string
|
||||
decode_func['8'] = decode_string
|
||||
decode_func['9'] = decode_string
|
||||
#decode_func['u'] = decode_unicode
|
||||
|
||||
def bdecode(x, sloppy = 1):
|
||||
try:
|
||||
r, l = decode_func[x[0]](x, 0)
|
||||
# except (IndexError, KeyError):
|
||||
except (IndexError, KeyError, ValueError):
|
||||
raise ValueError, "bad bencoded data"
|
||||
if not sloppy and l != len(x):
|
||||
raise ValueError, "bad bencoded data"
|
||||
return r
|
||||
|
||||
def test_bdecode():
|
||||
try:
|
||||
bdecode('0:0:')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('ie')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('i341foo382e')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
assert bdecode('i4e') == 4L
|
||||
assert bdecode('i0e') == 0L
|
||||
assert bdecode('i123456789e') == 123456789L
|
||||
assert bdecode('i-10e') == -10L
|
||||
try:
|
||||
bdecode('i-0e')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('i123')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('i6easd')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('35208734823ljdahflajhdf')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('2:abfdjslhfld')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
assert bdecode('0:') == ''
|
||||
assert bdecode('3:abc') == 'abc'
|
||||
assert bdecode('10:1234567890') == '1234567890'
|
||||
try:
|
||||
bdecode('02:xy')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('l')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
assert bdecode('le') == []
|
||||
try:
|
||||
bdecode('leanfdldjfh')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
assert bdecode('l0:0:0:e') == ['', '', '']
|
||||
try:
|
||||
bdecode('relwjhrlewjh')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
assert bdecode('li1ei2ei3ee') == [1, 2, 3]
|
||||
assert bdecode('l3:asd2:xye') == ['asd', 'xy']
|
||||
assert bdecode('ll5:Alice3:Bobeli2ei3eee') == [['Alice', 'Bob'], [2, 3]]
|
||||
try:
|
||||
bdecode('d')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('defoobar')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
assert bdecode('de') == {}
|
||||
assert bdecode('d3:agei25e4:eyes4:bluee') == {'age': 25, 'eyes': 'blue'}
|
||||
assert bdecode('d8:spam.mp3d6:author5:Alice6:lengthi100000eee') == {'spam.mp3': {'author': 'Alice', 'length': 100000}}
|
||||
try:
|
||||
bdecode('d3:fooe')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('di1e0:e')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('d1:b0:1:a0:e')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('d1:a0:1:a0:e')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('i03e')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('l01:ae')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('9999:x')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('l0:')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('d0:0:')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
bdecode('d0:')
|
||||
assert 0
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
bencached_marker = []
|
||||
|
||||
class Bencached:
|
||||
def __init__(self, s):
|
||||
self.marker = bencached_marker
|
||||
self.bencoded = s
|
||||
|
||||
BencachedType = type(Bencached('')) # insufficient, but good as a filter
|
||||
|
||||
def encode_bencached(x,r):
|
||||
assert x.marker == bencached_marker
|
||||
r.append(x.bencoded)
|
||||
|
||||
def encode_int(x,r):
|
||||
r.extend(('i',str(x),'e'))
|
||||
|
||||
def encode_bool(x,r):
|
||||
encode_int(int(x),r)
|
||||
|
||||
def encode_string(x,r):
|
||||
r.extend((str(len(x)),':',x))
|
||||
|
||||
def encode_unicode(x,r):
|
||||
#r.append('u')
|
||||
encode_string(x.encode('UTF-8'),r)
|
||||
|
||||
def encode_list(x,r):
|
||||
r.append('l')
|
||||
for e in x:
|
||||
encode_func[type(e)](e, r)
|
||||
r.append('e')
|
||||
|
||||
def encode_dict(x,r):
|
||||
r.append('d')
|
||||
ilist = x.items()
|
||||
ilist.sort()
|
||||
for k,v in ilist:
|
||||
r.extend((str(len(k)),':',k))
|
||||
encode_func[type(v)](v, r)
|
||||
r.append('e')
|
||||
|
||||
encode_func = {}
|
||||
encode_func[BencachedType] = encode_bencached
|
||||
encode_func[IntType] = encode_int
|
||||
encode_func[LongType] = encode_int
|
||||
encode_func[StringType] = encode_string
|
||||
encode_func[ListType] = encode_list
|
||||
encode_func[TupleType] = encode_list
|
||||
encode_func[DictType] = encode_dict
|
||||
if BooleanType:
|
||||
encode_func[BooleanType] = encode_bool
|
||||
if UnicodeType:
|
||||
encode_func[UnicodeType] = encode_unicode
|
||||
|
||||
def bencode(x):
|
||||
r = []
|
||||
try:
|
||||
encode_func[type(x)](x, r)
|
||||
except:
|
||||
print "*** error *** could not encode type %s (value: %s)" % (type(x), x)
|
||||
assert 0
|
||||
return ''.join(r)
|
||||
|
||||
def test_bencode():
|
||||
assert bencode(4) == 'i4e'
|
||||
assert bencode(0) == 'i0e'
|
||||
assert bencode(-10) == 'i-10e'
|
||||
assert bencode(12345678901234567890L) == 'i12345678901234567890e'
|
||||
assert bencode('') == '0:'
|
||||
assert bencode('abc') == '3:abc'
|
||||
assert bencode('1234567890') == '10:1234567890'
|
||||
assert bencode([]) == 'le'
|
||||
assert bencode([1, 2, 3]) == 'li1ei2ei3ee'
|
||||
assert bencode([['Alice', 'Bob'], [2, 3]]) == 'll5:Alice3:Bobeli2ei3eee'
|
||||
assert bencode({}) == 'de'
|
||||
assert bencode({'age': 25, 'eyes': 'blue'}) == 'd3:agei25e4:eyes4:bluee'
|
||||
assert bencode({'spam.mp3': {'author': 'Alice', 'length': 100000}}) == 'd8:spam.mp3d6:author5:Alice6:lengthi100000eee'
|
||||
try:
|
||||
bencode({1: 'foo'})
|
||||
assert 0
|
||||
except AssertionError:
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
import psyco
|
||||
psyco.bind(bdecode)
|
||||
psyco.bind(bencode)
|
||||
except ImportError:
|
||||
pass
|
||||
100
ox/torrent/btformats.py
Normal file
100
ox/torrent/btformats.py
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
# Written by Bram Cohen
|
||||
# see LICENSE.txt for license information
|
||||
|
||||
from types import StringType, LongType, IntType, ListType, DictType
|
||||
from re import compile
|
||||
|
||||
reg = compile(r'^[^/\\.~][^/\\]*$')
|
||||
|
||||
ints = (LongType, IntType)
|
||||
|
||||
def check_info(info):
|
||||
if type(info) != DictType:
|
||||
raise ValueError, 'bad metainfo - not a dictionary'
|
||||
pieces = info.get('pieces')
|
||||
if type(pieces) != StringType or len(pieces) % 20 != 0:
|
||||
raise ValueError, 'bad metainfo - bad pieces key'
|
||||
piecelength = info.get('piece length')
|
||||
if type(piecelength) not in ints or piecelength <= 0:
|
||||
raise ValueError, 'bad metainfo - illegal piece length'
|
||||
name = info.get('name')
|
||||
if type(name) != StringType:
|
||||
raise ValueError, 'bad metainfo - bad name'
|
||||
if not reg.match(name):
|
||||
raise ValueError, 'name %s disallowed for security reasons' % name
|
||||
if info.has_key('files') == info.has_key('length'):
|
||||
raise ValueError, 'single/multiple file mix'
|
||||
if info.has_key('length'):
|
||||
length = info.get('length')
|
||||
if type(length) not in ints or length < 0:
|
||||
raise ValueError, 'bad metainfo - bad length'
|
||||
else:
|
||||
files = info.get('files')
|
||||
if type(files) != ListType:
|
||||
raise ValueError
|
||||
for f in files:
|
||||
if type(f) != DictType:
|
||||
raise ValueError, 'bad metainfo - bad file value'
|
||||
length = f.get('length')
|
||||
if type(length) not in ints or length < 0:
|
||||
raise ValueError, 'bad metainfo - bad length'
|
||||
path = f.get('path')
|
||||
if type(path) != ListType or path == []:
|
||||
raise ValueError, 'bad metainfo - bad path'
|
||||
for p in path:
|
||||
if type(p) != StringType:
|
||||
raise ValueError, 'bad metainfo - bad path dir'
|
||||
if not reg.match(p):
|
||||
raise ValueError, 'path %s disallowed for security reasons' % p
|
||||
for i in xrange(len(files)):
|
||||
for j in xrange(i):
|
||||
if files[i]['path'] == files[j]['path']:
|
||||
raise ValueError, 'bad metainfo - duplicate path'
|
||||
|
||||
def check_message(message):
|
||||
if type(message) != DictType:
|
||||
raise ValueError
|
||||
check_info(message.get('info'))
|
||||
if type(message.get('announce')) != StringType:
|
||||
raise ValueError
|
||||
|
||||
def check_peers(message):
|
||||
if type(message) != DictType:
|
||||
raise ValueError
|
||||
if message.has_key('failure reason'):
|
||||
if type(message['failure reason']) != StringType:
|
||||
raise ValueError
|
||||
return
|
||||
peers = message.get('peers')
|
||||
if type(peers) == ListType:
|
||||
for p in peers:
|
||||
if type(p) != DictType:
|
||||
raise ValueError
|
||||
if type(p.get('ip')) != StringType:
|
||||
raise ValueError
|
||||
port = p.get('port')
|
||||
if type(port) not in ints or p <= 0:
|
||||
raise ValueError
|
||||
if p.has_key('peer id'):
|
||||
id = p['peer id']
|
||||
if type(id) != StringType or len(id) != 20:
|
||||
raise ValueError
|
||||
elif type(peers) != StringType or len(peers) % 6 != 0:
|
||||
raise ValueError
|
||||
interval = message.get('interval', 1)
|
||||
if type(interval) not in ints or interval <= 0:
|
||||
raise ValueError
|
||||
minint = message.get('min interval', 1)
|
||||
if type(minint) not in ints or minint <= 0:
|
||||
raise ValueError
|
||||
if type(message.get('tracker id', '')) != StringType:
|
||||
raise ValueError
|
||||
npeers = message.get('num peers', 0)
|
||||
if type(npeers) not in ints or npeers < 0:
|
||||
raise ValueError
|
||||
dpeers = message.get('done peers', 0)
|
||||
if type(dpeers) not in ints or dpeers < 0:
|
||||
raise ValueError
|
||||
last = message.get('last', 0)
|
||||
if type(last) not in ints or last < 0:
|
||||
raise ValueError
|
||||
270
ox/torrent/makemetafile.py
Normal file
270
ox/torrent/makemetafile.py
Normal file
|
|
@ -0,0 +1,270 @@
|
|||
# Written by Bram Cohen
|
||||
# multitracker extensions by John Hoffman
|
||||
# see LICENSE.txt for license information
|
||||
|
||||
from os.path import getsize, split, join, abspath, isdir
|
||||
from os import listdir
|
||||
from hashlib import sha1 as sha
|
||||
from copy import copy
|
||||
from string import strip
|
||||
from bencode import bencode
|
||||
from btformats import check_info
|
||||
from threading import Event
|
||||
from time import time
|
||||
from traceback import print_exc
|
||||
try:
|
||||
from sys import getfilesystemencoding
|
||||
ENCODING = getfilesystemencoding()
|
||||
except:
|
||||
from sys import getdefaultencoding
|
||||
ENCODING = getdefaultencoding()
|
||||
|
||||
defaults = [
|
||||
('announce_list', '',
|
||||
'a list of announce URLs - explained below'),
|
||||
('httpseeds', '',
|
||||
'a list of http seed URLs - explained below'),
|
||||
('piece_size_pow2', 0,
|
||||
"which power of 2 to set the piece size to (0 = automatic)"),
|
||||
('comment', '',
|
||||
"optional human-readable comment to put in .torrent"),
|
||||
('filesystem_encoding', '',
|
||||
"optional specification for filesystem encoding " +
|
||||
"(set automatically in recent Python versions)"),
|
||||
('target', '',
|
||||
"optional target file for the torrent")
|
||||
]
|
||||
|
||||
default_piece_len_exp = 18
|
||||
|
||||
ignore = ['core', 'CVS']
|
||||
|
||||
def print_announcelist_details():
|
||||
print (' announce_list = optional list of redundant/backup tracker URLs, in the format:')
|
||||
print (' url[,url...][|url[,url...]...]')
|
||||
print (' where URLs separated by commas are all tried first')
|
||||
print (' before the next group of URLs separated by the pipe is checked.')
|
||||
print (" If none is given, it is assumed you don't want one in the metafile.")
|
||||
print (' If announce_list is given, clients which support it')
|
||||
print (' will ignore the <announce> value.')
|
||||
print (' Examples:')
|
||||
print (' http://tracker1.com|http://tracker2.com|http://tracker3.com')
|
||||
print (' (tries trackers 1-3 in order)')
|
||||
print (' http://tracker1.com,http://tracker2.com,http://tracker3.com')
|
||||
print (' (tries trackers 1-3 in a randomly selected order)')
|
||||
print (' http://tracker1.com|http://backup1.com,http://backup2.com')
|
||||
print (' (tries tracker 1 first, then tries between the 2 backups randomly)')
|
||||
print ('')
|
||||
print (' httpseeds = optional list of http-seed URLs, in the format:')
|
||||
print (' url[|url...]')
|
||||
|
||||
def make_meta_file(file, url, params = {}, flag = Event(),
|
||||
progress = lambda x: None, progress_percent = 1):
|
||||
if params.has_key('piece_size_pow2'):
|
||||
piece_len_exp = params['piece_size_pow2']
|
||||
else:
|
||||
piece_len_exp = default_piece_len_exp
|
||||
if params.has_key('target') and params['target'] != '':
|
||||
f = params['target']
|
||||
else:
|
||||
a, b = split(file)
|
||||
if b == '':
|
||||
f = a + '.torrent'
|
||||
else:
|
||||
f = join(a, b + '.torrent')
|
||||
|
||||
if piece_len_exp == 0: # automatic
|
||||
size = calcsize(file)
|
||||
if size > 8L*1024*1024*1024: # > 8 gig =
|
||||
piece_len_exp = 21 # 2 meg pieces
|
||||
elif size > 2*1024*1024*1024: # > 2 gig =
|
||||
piece_len_exp = 20 # 1 meg pieces
|
||||
elif size > 512*1024*1024: # > 512M =
|
||||
piece_len_exp = 19 # 512K pieces
|
||||
elif size > 64*1024*1024: # > 64M =
|
||||
piece_len_exp = 18 # 256K pieces
|
||||
elif size > 16*1024*1024: # > 16M =
|
||||
piece_len_exp = 17 # 128K pieces
|
||||
elif size > 4*1024*1024: # > 4M =
|
||||
piece_len_exp = 16 # 64K pieces
|
||||
else: # < 4M =
|
||||
piece_len_exp = 15 # 32K pieces
|
||||
piece_length = 2 ** piece_len_exp
|
||||
|
||||
encoding = None
|
||||
if params.has_key('filesystem_encoding'):
|
||||
encoding = params['filesystem_encoding']
|
||||
if not encoding:
|
||||
encoding = ENCODING
|
||||
if not encoding:
|
||||
encoding = 'ascii'
|
||||
|
||||
info = makeinfo(file, piece_length, encoding, flag, progress, progress_percent)
|
||||
if flag.isSet():
|
||||
return
|
||||
check_info(info)
|
||||
h = open(f, 'wb')
|
||||
data = {'info': info, 'announce': strip(url), 'creation date': long(time())}
|
||||
|
||||
if params.has_key('comment') and params['comment']:
|
||||
data['comment'] = params['comment']
|
||||
|
||||
if params.has_key('real_announce_list'): # shortcut for progs calling in from outside
|
||||
data['announce-list'] = params['real_announce_list']
|
||||
elif params.has_key('announce_list') and params['announce_list']:
|
||||
l = []
|
||||
for tier in params['announce_list'].split('|'):
|
||||
l.append(tier.split(','))
|
||||
data['announce-list'] = l
|
||||
|
||||
if params.has_key('real_httpseeds'): # shortcut for progs calling in from outside
|
||||
data['httpseeds'] = params['real_httpseeds']
|
||||
elif params.has_key('httpseeds') and params['httpseeds']:
|
||||
data['httpseeds'] = params['httpseeds'].split('|')
|
||||
|
||||
if params.has_key('url-list') and params['url-list']:
|
||||
data['url-list'] = params['url-list'].split('|')
|
||||
|
||||
if params.has_key('playtime') and params['playtime']:
|
||||
data['info']['playtime'] = params['playtime']
|
||||
|
||||
h.write(bencode(data))
|
||||
h.close()
|
||||
|
||||
def calcsize(file):
|
||||
if not isdir(file):
|
||||
return getsize(file)
|
||||
total = 0L
|
||||
for s in subfiles(abspath(file)):
|
||||
total += getsize(s[1])
|
||||
return total
|
||||
|
||||
|
||||
def uniconvertl(l, e):
|
||||
r = []
|
||||
try:
|
||||
for s in l:
|
||||
r.append(uniconvert(s, e))
|
||||
except UnicodeError:
|
||||
raise UnicodeError('bad filename: '+join(*l))
|
||||
return r
|
||||
|
||||
def uniconvert(s, e):
|
||||
try:
|
||||
if s.__class__.__name__ != 'unicode':
|
||||
s = unicode(s,e)
|
||||
except UnicodeError:
|
||||
raise UnicodeError('bad filename: '+s)
|
||||
return s.encode('utf-8')
|
||||
|
||||
def makeinfo(file, piece_length, encoding, flag, progress, progress_percent=1):
|
||||
file = abspath(file)
|
||||
if isdir(file):
|
||||
subs = subfiles(file)
|
||||
subs.sort()
|
||||
pieces = []
|
||||
sh = sha()
|
||||
done = 0L
|
||||
fs = []
|
||||
totalsize = 0.0
|
||||
totalhashed = 0L
|
||||
for p, f in subs:
|
||||
totalsize += getsize(f)
|
||||
|
||||
for p, f in subs:
|
||||
pos = 0L
|
||||
size = getsize(f)
|
||||
fs.append({'length': size, 'path': uniconvertl(p, encoding)})
|
||||
h = open(f, 'rb')
|
||||
while pos < size:
|
||||
a = min(size - pos, piece_length - done)
|
||||
sh.update(h.read(a))
|
||||
if flag.isSet():
|
||||
return
|
||||
done += a
|
||||
pos += a
|
||||
totalhashed += a
|
||||
|
||||
if done == piece_length:
|
||||
pieces.append(sh.digest())
|
||||
done = 0
|
||||
sh = sha()
|
||||
if progress_percent:
|
||||
progress(totalhashed / totalsize)
|
||||
else:
|
||||
progress(a)
|
||||
h.close()
|
||||
if done > 0:
|
||||
pieces.append(sh.digest())
|
||||
return {'pieces': ''.join(pieces),
|
||||
'piece length': piece_length, 'files': fs,
|
||||
'name': uniconvert(split(file)[1], encoding) }
|
||||
else:
|
||||
size = getsize(file)
|
||||
pieces = []
|
||||
p = 0L
|
||||
h = open(file, 'rb')
|
||||
while p < size:
|
||||
x = h.read(min(piece_length, size - p))
|
||||
if flag.isSet():
|
||||
return
|
||||
pieces.append(sha(x).digest())
|
||||
p += piece_length
|
||||
if p > size:
|
||||
p = size
|
||||
if progress_percent:
|
||||
progress(float(p) / size)
|
||||
else:
|
||||
progress(min(piece_length, size - p))
|
||||
h.close()
|
||||
return {'pieces': ''.join(pieces),
|
||||
'piece length': piece_length, 'length': size,
|
||||
'name': uniconvert(split(file)[1], encoding) }
|
||||
|
||||
def subfiles(d):
|
||||
r = []
|
||||
stack = [([], d)]
|
||||
while len(stack) > 0:
|
||||
p, n = stack.pop()
|
||||
if isdir(n):
|
||||
for s in listdir(n):
|
||||
if s not in ignore and s[:1] != '.':
|
||||
stack.append((copy(p) + [s], join(n, s)))
|
||||
else:
|
||||
r.append((p, n))
|
||||
return r
|
||||
|
||||
|
||||
def completedir(dir, url, params = {}, flag = Event(),
|
||||
vc = lambda x: None, fc = lambda x: None):
|
||||
files = listdir(dir)
|
||||
files.sort()
|
||||
ext = '.torrent'
|
||||
if params.has_key('target'):
|
||||
target = params['target']
|
||||
else:
|
||||
target = ''
|
||||
|
||||
togen = []
|
||||
for f in files:
|
||||
if f[-len(ext):] != ext and (f + ext) not in files:
|
||||
togen.append(join(dir, f))
|
||||
|
||||
total = 0
|
||||
for i in togen:
|
||||
total += calcsize(i)
|
||||
|
||||
subtotal = [0]
|
||||
def callback(x, subtotal = subtotal, total = total, vc = vc):
|
||||
subtotal[0] += x
|
||||
vc(float(subtotal[0]) / total)
|
||||
for i in togen:
|
||||
fc(i)
|
||||
try:
|
||||
t = split(i)[-1]
|
||||
if t not in ignore and t[0] != '.':
|
||||
if target != '':
|
||||
params['target'] = join(target,t+ext)
|
||||
make_meta_file(i, url, params, flag, progress = callback, progress_percent = 0)
|
||||
except ValueError:
|
||||
print_exc()
|
||||
Loading…
Add table
Add a link
Reference in a new issue