use tor hidden service instead of ed25515 as peer id
This commit is contained in:
parent
cc258fb5ee
commit
7c1e5c691a
23 changed files with 1139 additions and 324 deletions
|
|
@ -8,7 +8,6 @@ import OpenSSL
|
|||
|
||||
import settings
|
||||
|
||||
|
||||
def get_fingerprint():
|
||||
with open(settings.ssl_cert_path) as fd:
|
||||
data = fd.read()
|
||||
|
|
@ -17,7 +16,7 @@ def get_fingerprint():
|
|||
|
||||
def generate_ssl():
|
||||
key = OpenSSL.crypto.PKey()
|
||||
key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
|
||||
key.generate_key(OpenSSL.crypto.TYPE_RSA, 1024)
|
||||
with open(settings.ssl_key_path, 'wb') as fd:
|
||||
os.chmod(settings.ssl_key_path, 0o600)
|
||||
fd.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
|
||||
|
|
|
|||
|
|
@ -1,37 +1,165 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
|
||||
import os
|
||||
|
||||
import tornado
|
||||
from tornado.web import Application
|
||||
from tornado.httpserver import HTTPServer
|
||||
from tornado.ioloop import PeriodicCallback
|
||||
|
||||
from oxtornado import run_async
|
||||
from utils import valid, get_public_ipv6
|
||||
from websocket import trigger_event
|
||||
from . import cert
|
||||
from socketserver import ThreadingMixIn
|
||||
from threading import Thread
|
||||
import base64
|
||||
import db
|
||||
import directory
|
||||
import gzip
|
||||
import hashlib
|
||||
import http.server
|
||||
import io
|
||||
import json
|
||||
from . import nodeapi
|
||||
import os
|
||||
import socket
|
||||
import socketserver
|
||||
|
||||
from Crypto.PublicKey import RSA
|
||||
from Crypto.Util.asn1 import DerSequence
|
||||
from OpenSSL.crypto import dump_privatekey, FILETYPE_ASN1
|
||||
from OpenSSL.SSL import (
|
||||
Context, Connection, TLSv1_2_METHOD,
|
||||
VERIFY_PEER, VERIFY_FAIL_IF_NO_PEER_CERT, VERIFY_CLIENT_ONCE
|
||||
)
|
||||
|
||||
import settings
|
||||
import state
|
||||
import user
|
||||
|
||||
from . import nodeapi
|
||||
from .sslsocket import fileobject
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger('oml.node.server')
|
||||
|
||||
|
||||
class NodeHandler(tornado.web.RequestHandler):
|
||||
def get_service_id(key):
|
||||
'''
|
||||
service_id is the first half of the sha1 of the rsa public key encoded in base32
|
||||
'''
|
||||
# compute sha1 of public key and encode first half in base32
|
||||
pub_der = DerSequence()
|
||||
pub_der.decode(dump_privatekey(FILETYPE_ASN1, key))
|
||||
public_key = RSA.construct((pub_der._seq[1], pub_der._seq[2])).exportKey('DER')[22:]
|
||||
service_id = base64.b32encode(hashlib.sha1(public_key).digest()[:10]).lower().decode()
|
||||
return service_id
|
||||
|
||||
def initialize(self):
|
||||
pass
|
||||
class TLSTCPServer(socketserver.TCPServer):
|
||||
|
||||
@tornado.web.asynchronous
|
||||
@tornado.gen.coroutine
|
||||
def post(self):
|
||||
def _accept(self, connection, x509, errnum, errdepth, ok):
|
||||
# client_id is validated in request
|
||||
return True
|
||||
|
||||
def __init__(self, server_address, HandlerClass, bind_and_activate=True):
|
||||
socketserver.TCPServer.__init__(self, server_address, HandlerClass)
|
||||
ctx = Context(TLSv1_2_METHOD)
|
||||
ctx.use_privatekey_file (settings.ssl_key_path)
|
||||
ctx.use_certificate_file(settings.ssl_cert_path)
|
||||
# only allow clients with cert:
|
||||
ctx.set_verify(VERIFY_PEER | VERIFY_CLIENT_ONCE | VERIFY_FAIL_IF_NO_PEER_CERT, self._accept)
|
||||
#ctx.set_verify(VERIFY_PEER | VERIFY_CLIENT_ONCE, self._accept)
|
||||
self.socket = Connection(ctx, socket.socket(self.address_family, self.socket_type))
|
||||
if bind_and_activate:
|
||||
self.server_bind()
|
||||
self.server_activate()
|
||||
|
||||
def shutdown_request(self,request):
|
||||
try:
|
||||
request.shutdown()
|
||||
except:
|
||||
pass
|
||||
|
||||
class NodeServer(ThreadingMixIn, TLSTCPServer):
|
||||
allow_reuse_address = True
|
||||
|
||||
|
||||
def api_call(action, user_id, args):
|
||||
with db.session():
|
||||
u = user.models.User.get(user_id)
|
||||
if action in (
|
||||
'requestPeering', 'acceptPeering', 'rejectPeering', 'removePeering'
|
||||
) or (u and u.peered):
|
||||
content = getattr(nodeapi, 'api_' + action)(user_id, *args)
|
||||
else:
|
||||
if u and u.pending:
|
||||
logger.debug('ignore request from pending peer[%s] %s (%s)',
|
||||
user_id, action, args)
|
||||
content = {}
|
||||
else:
|
||||
content = None
|
||||
return content
|
||||
|
||||
class Handler(http.server.SimpleHTTPRequestHandler):
|
||||
|
||||
def setup(self):
|
||||
self.connection = self.request
|
||||
self.rfile = fileobject(self.connection, 'rb', self.rbufsize)
|
||||
self.wfile = fileobject(self.connection, 'wb', self.wbufsize)
|
||||
|
||||
def version_string(self):
|
||||
return settings.USER_AGENT
|
||||
|
||||
def do_HEAD(self):
|
||||
return self.do_GET()
|
||||
|
||||
def do_GET(self):
|
||||
import item.models
|
||||
id = self.path.split('/')[-1] if self.path.startswith('/get/') else None
|
||||
if id and len(id) == 32 and id.isalnum():
|
||||
with db.session():
|
||||
i = item.models.Item.get(id)
|
||||
if not i:
|
||||
self.send_response(404, 'Not Found')
|
||||
self.send_header('Content-type', 'text/plain')
|
||||
self.end_headers()
|
||||
self.wfile.write(b'404 - Not Found')
|
||||
return
|
||||
path = i.get_path()
|
||||
mimetype = {
|
||||
'epub': 'application/epub+zip',
|
||||
'pdf': 'application/pdf',
|
||||
'txt': 'text/plain',
|
||||
}.get(path.split('.')[-1], None)
|
||||
self.send_response(200, 'OK')
|
||||
self.send_header('Content-Type', mimetype)
|
||||
self.send_header('X-Node-Protocol', settings.NODE_PROTOCOL)
|
||||
self.send_header('Content-Length', str(os.path.getsize(path)))
|
||||
self.end_headers()
|
||||
logger.debug('GET file %s', id)
|
||||
with open(path, 'rb') as f:
|
||||
while 1:
|
||||
data = f.read(16384)
|
||||
if not data:
|
||||
break
|
||||
self.wfile.write(data)
|
||||
else:
|
||||
self.send_response(200, 'OK')
|
||||
self.send_header('Content-type', 'text/plain')
|
||||
self.send_header('X-Node-Protocol', settings.NODE_PROTOCOL)
|
||||
self.end_headers()
|
||||
self.wfile.write('Open Media Library\n'.encode())
|
||||
|
||||
def gzip_data(self, data):
|
||||
encoding = self.headers.get('Accept-Encoding')
|
||||
if encoding.find('gzip') != -1:
|
||||
self.send_header('Content-Encoding', 'gzip')
|
||||
bytes_io = io.BytesIO()
|
||||
gzip_file = gzip.GzipFile(fileobj=bytes_io, mode='wb')
|
||||
gzip_file.write(data)
|
||||
gzip_file.close()
|
||||
result = bytes_io.getvalue()
|
||||
bytes_io.close()
|
||||
return result
|
||||
else:
|
||||
return data
|
||||
|
||||
def gunzip_data(self, data):
|
||||
bytes_io = io.BytesIO(data)
|
||||
gzip_file = gzip.GzipFile(fileobj=bytes_io, mode='rb')
|
||||
result = gzip_file.read()
|
||||
gzip_file.close()
|
||||
return result
|
||||
|
||||
def do_POST(self):
|
||||
'''
|
||||
API
|
||||
pullChanges [userid] from [to]
|
||||
|
|
@ -43,141 +171,85 @@ class NodeHandler(tornado.web.RequestHandler):
|
|||
|
||||
ping responds public ip
|
||||
'''
|
||||
key = str(self.request.headers['X-Ed25519-Key'])
|
||||
sig = str(self.request.headers['X-Ed25519-Signature'])
|
||||
data = self.request.body
|
||||
content = {}
|
||||
x509 = self.connection.get_peer_certificate()
|
||||
user_id = get_service_id(x509.get_pubkey()) if x509 else None
|
||||
|
||||
self.set_header('X-Node-Protocol', settings.NODE_PROTOCOL)
|
||||
if self.request.headers.get('X-Node-Protocol', None) > settings.NODE_PROTOCOL:
|
||||
content = {}
|
||||
try:
|
||||
content_len = int(self.headers.get('content-length', 0))
|
||||
data = self.rfile.read(content_len)
|
||||
if self.headers.get('Content-Encoding') == 'gzip':
|
||||
data = self.gunzip_data(data)
|
||||
except:
|
||||
logger.debug('invalid request', exc_info=1)
|
||||
response_status = (500, 'invalid request')
|
||||
self.write_response(response_status, content)
|
||||
return
|
||||
|
||||
response_status = (200, 'OK')
|
||||
if self.headers.get('X-Node-Protocol', '') > settings.NODE_PROTOCOL:
|
||||
state.update_required = True
|
||||
if self.request.headers.get('X-Node-Protocol', None) != settings.NODE_PROTOCOL:
|
||||
if self.headers.get('X-Node-Protocol', '') != settings.NODE_PROTOCOL:
|
||||
logger.debug('protocol missmatch %s vs %s',
|
||||
self.headers.get('X-Node-Protocol', ''), settings.NODE_PROTOCOL)
|
||||
logger.debug('headers %s', self.headers)
|
||||
content = settings.release
|
||||
else:
|
||||
if valid(key, data, sig):
|
||||
try:
|
||||
action, args = json.loads(data.decode('utf-8'))
|
||||
logger.debug('NODE action %s %s (%s)', action, args, key)
|
||||
if action == 'ping':
|
||||
content = {
|
||||
'ip': self.request.remote_addr
|
||||
}
|
||||
else:
|
||||
content = yield tornado.gen.Task(api_call, action, key, args)
|
||||
if content is None:
|
||||
content = {'status': 'not peered'}
|
||||
logger.debug('PEER %s IS UNKNOWN SEND 403', key)
|
||||
self.set_status(403)
|
||||
content = json.dumps(content).encode('utf-8')
|
||||
sig = settings.sk.sign(content, encoding='base64')
|
||||
self.set_header('X-Ed25519-Signature', sig)
|
||||
self.set_header('X-Node-Protocol', settings.NODE_PROTOCOL)
|
||||
self.write(content)
|
||||
|
||||
def get(self):
|
||||
self.set_header('X-Node-Protocol', settings.NODE_PROTOCOL)
|
||||
if self.request.headers.get('X-Node-Protocol', None) > settings.NODE_PROTOCOL:
|
||||
state.update_required = True
|
||||
self.write('Open Media Library')
|
||||
|
||||
@run_async
|
||||
def api_call(action, key, args, callback):
|
||||
with db.session():
|
||||
u = user.models.User.get(key)
|
||||
if action in (
|
||||
'requestPeering', 'acceptPeering', 'rejectPeering', 'removePeering'
|
||||
) or (u and u.peered):
|
||||
content = getattr(nodeapi, 'api_' + action)(key, *args)
|
||||
else:
|
||||
if u and u.pending:
|
||||
logger.debug('ignore request from pending peer[%s] %s (%s)', key, action, args)
|
||||
content = {}
|
||||
else:
|
||||
content = None
|
||||
callback(content)
|
||||
|
||||
class ShareHandler(tornado.web.RequestHandler):
|
||||
|
||||
def initialize(self):
|
||||
pass
|
||||
|
||||
def get(self, id):
|
||||
import item.models
|
||||
with db.session():
|
||||
i = item.models.Item.get(id)
|
||||
if not i:
|
||||
self.set_status(404)
|
||||
except:
|
||||
logger.debug('invalid data: %s', data, exc_info=1)
|
||||
response_status = (500, 'invalid request')
|
||||
content = {
|
||||
'status': 'invalid request'
|
||||
}
|
||||
self.write_response(response_status, content)
|
||||
return
|
||||
path = i.get_path()
|
||||
mimetype = {
|
||||
'epub': 'application/epub+zip',
|
||||
'pdf': 'application/pdf',
|
||||
'txt': 'text/plain',
|
||||
}.get(path.split('.')[-1], None)
|
||||
self.set_header('Content-Type', mimetype)
|
||||
logger.debug('GET file %s', id)
|
||||
with open(path, 'rb') as f:
|
||||
while 1:
|
||||
data = f.read(16384)
|
||||
if not data:
|
||||
break
|
||||
self.write(data)
|
||||
logger.debug('NODE action %s %s (%s)', action, args, user_id)
|
||||
if action == 'ping':
|
||||
content = {
|
||||
'status': 'ok'
|
||||
}
|
||||
else:
|
||||
content = api_call(action, user_id, args)
|
||||
if content is None:
|
||||
content = {'status': 'not peered'}
|
||||
logger.debug('PEER %s IS UNKNOWN SEND 403', user_id)
|
||||
response_status = (403, 'UNKNOWN USER')
|
||||
content = {}
|
||||
else:
|
||||
logger.debug('RESPONSE %s: %s', action, content)
|
||||
self.write_response(response_status, content)
|
||||
|
||||
def publish_node():
|
||||
update_online()
|
||||
if state.online:
|
||||
with db.session():
|
||||
for u in user.models.User.query.filter_by(queued=True):
|
||||
logger.debug('adding queued node... %s', u.id)
|
||||
state.nodes.queue('add', u.id)
|
||||
state.check_nodes = PeriodicCallback(check_nodes, 120000)
|
||||
state.check_nodes.start()
|
||||
state._online = PeriodicCallback(update_online, 60000)
|
||||
state._online.start()
|
||||
def write_response(self, response_status, content):
|
||||
self.send_response(*response_status)
|
||||
self.send_header('X-Node-Protocol', settings.NODE_PROTOCOL)
|
||||
self.send_header('Content-Type', 'application/json')
|
||||
content = json.dumps(content, ensure_ascii=False).encode('utf-8')
|
||||
content = self.gzip_data(content)
|
||||
self.send_header('Content-Length', str(len(content)))
|
||||
self.end_headers()
|
||||
self.wfile.write(content)
|
||||
|
||||
def update_online():
|
||||
host = get_public_ipv6()
|
||||
if not host:
|
||||
if state.online:
|
||||
state.online = False
|
||||
trigger_event('status', {
|
||||
'id': settings.USER_ID,
|
||||
'online': state.online
|
||||
})
|
||||
else:
|
||||
if host != state.host:
|
||||
state.host = host
|
||||
online = directory.put(settings.sk, {
|
||||
'host': host,
|
||||
'port': settings.server['node_port'],
|
||||
'cert': settings.server['cert']
|
||||
})
|
||||
if online != state.online:
|
||||
state.online = online
|
||||
trigger_event('status', {
|
||||
'id': settings.USER_ID,
|
||||
'online': state.online
|
||||
})
|
||||
class Server(Thread):
|
||||
http_server = None
|
||||
|
||||
def check_nodes():
|
||||
if state.online:
|
||||
with db.session():
|
||||
for u in user.models.User.query.filter_by(queued=True):
|
||||
if not state.nodes.is_online(u.id):
|
||||
logger.debug('queued peering message for %s trying to connect...', u.id)
|
||||
state.nodes.queue('add', u.id)
|
||||
def __init__(self):
|
||||
Thread.__init__(self)
|
||||
address = (settings.server['node_address'], settings.server['node_port'])
|
||||
self.http_server = NodeServer(address, Handler)
|
||||
self.daemon = True
|
||||
self.start()
|
||||
|
||||
def run(self):
|
||||
self.http_server.serve_forever()
|
||||
|
||||
def stop(self):
|
||||
if self.http_server:
|
||||
self.http_server.shutdown()
|
||||
self.http_server.socket.close()
|
||||
return Thread.join(self)
|
||||
|
||||
def start():
|
||||
application = Application([
|
||||
(r"/get/(.*)", ShareHandler),
|
||||
(r".*", NodeHandler),
|
||||
], gzip=True)
|
||||
if not os.path.exists(settings.ssl_cert_path):
|
||||
settings.server['cert'] = cert.generate_ssl()
|
||||
return Server()
|
||||
|
||||
http_server = HTTPServer(application, ssl_options={
|
||||
"certfile": settings.ssl_cert_path,
|
||||
"keyfile": settings.ssl_key_path
|
||||
})
|
||||
http_server.listen(settings.server['node_port'], settings.server['node_address'])
|
||||
state.main.add_callback(publish_node)
|
||||
return http_server
|
||||
|
|
|
|||
305
oml/node/sslsocket.py
Normal file
305
oml/node/sslsocket.py
Normal file
|
|
@ -0,0 +1,305 @@
|
|||
from io import BytesIO
|
||||
from socket import error
|
||||
from errno import EINTR
|
||||
|
||||
# Based on socket._fileobject from python2.7
|
||||
class fileobject(object):
|
||||
"""Faux file object attached to a socket object."""
|
||||
|
||||
default_bufsize = 8192
|
||||
name = "<socket>"
|
||||
|
||||
__slots__ = ["mode", "bufsize", "softspace",
|
||||
# "closed" is a property, see below
|
||||
"_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf", "_wbuf_len",
|
||||
"_close"]
|
||||
|
||||
def __init__(self, sock, mode='rb', bufsize=-1, close=False):
|
||||
self._sock = sock
|
||||
self.mode = mode # Not actually used in this version
|
||||
if bufsize < 0:
|
||||
bufsize = self.default_bufsize
|
||||
self.bufsize = bufsize
|
||||
self.softspace = False
|
||||
# _rbufsize is the suggested recv buffer size. It is *strictly*
|
||||
# obeyed within readline() for recv calls. If it is larger than
|
||||
# default_bufsize it will be used for recv calls within read().
|
||||
if bufsize == 0:
|
||||
self._rbufsize = 1
|
||||
elif bufsize == 1:
|
||||
self._rbufsize = self.default_bufsize
|
||||
else:
|
||||
self._rbufsize = bufsize
|
||||
self._wbufsize = bufsize
|
||||
# We use BytesIO for the read buffer to avoid holding a list
|
||||
# of variously sized string objects which have been known to
|
||||
# fragment the heap due to how they are malloc()ed and often
|
||||
# realloc()ed down much smaller than their original allocation.
|
||||
self._rbuf = BytesIO()
|
||||
self._wbuf = [] # A list of strings
|
||||
self._wbuf_len = 0
|
||||
self._close = close
|
||||
|
||||
def _getclosed(self):
|
||||
return self._sock is None
|
||||
closed = property(_getclosed, doc="True if the file is closed")
|
||||
|
||||
def close(self):
|
||||
try:
|
||||
if self._sock:
|
||||
self.flush()
|
||||
finally:
|
||||
if self._close:
|
||||
self._sock.close()
|
||||
self._sock = None
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
self.close()
|
||||
except:
|
||||
# close() may fail if __init__ didn't complete
|
||||
pass
|
||||
|
||||
def flush(self):
|
||||
if self._wbuf:
|
||||
data = b"".join(self._wbuf)
|
||||
self._wbuf = []
|
||||
self._wbuf_len = 0
|
||||
buffer_size = max(self._rbufsize, self.default_bufsize)
|
||||
data_size = len(data)
|
||||
write_offset = 0
|
||||
view = memoryview(data)
|
||||
try:
|
||||
while write_offset < data_size:
|
||||
self._sock.sendall(view[write_offset:write_offset+buffer_size])
|
||||
write_offset += buffer_size
|
||||
finally:
|
||||
if write_offset < data_size:
|
||||
remainder = data[write_offset:]
|
||||
del view, data # explicit free
|
||||
self._wbuf.append(remainder)
|
||||
self._wbuf_len = len(remainder)
|
||||
|
||||
def fileno(self):
|
||||
return self._sock.fileno()
|
||||
|
||||
def write(self, data):
|
||||
data = bytes(data) # XXX Should really reject non-string non-buffers
|
||||
if not data:
|
||||
return
|
||||
self._wbuf.append(data)
|
||||
self._wbuf_len += len(data)
|
||||
if (self._wbufsize == 0 or
|
||||
(self._wbufsize == 1 and b'\n' in data) or
|
||||
(self._wbufsize > 1 and self._wbuf_len >= self._wbufsize)):
|
||||
self.flush()
|
||||
|
||||
def writelines(self, list):
|
||||
# XXX We could do better here for very long lists
|
||||
# XXX Should really reject non-string non-buffers
|
||||
lines = filter(None, map(bytes, list))
|
||||
self._wbuf_len += sum(map(len, lines))
|
||||
self._wbuf.extend(lines)
|
||||
if (self._wbufsize <= 1 or
|
||||
self._wbuf_len >= self._wbufsize):
|
||||
self.flush()
|
||||
|
||||
def read(self, size=-1):
|
||||
# Use max, disallow tiny reads in a loop as they are very inefficient.
|
||||
# We never leave read() with any leftover data from a new recv() call
|
||||
# in our internal buffer.
|
||||
rbufsize = max(self._rbufsize, self.default_bufsize)
|
||||
# Our use of BytesIO rather than lists of string objects returned by
|
||||
# recv() minimizes memory usage and fragmentation that occurs when
|
||||
# rbufsize is large compared to the typical return value of recv().
|
||||
buf = self._rbuf
|
||||
buf.seek(0, 2) # seek end
|
||||
if size < 0:
|
||||
# Read until EOF
|
||||
self._rbuf = BytesIO() # reset _rbuf. we consume it via buf.
|
||||
while True:
|
||||
try:
|
||||
data = self._sock.recv(rbufsize)
|
||||
except error as e:
|
||||
if e.args[0] == EINTR:
|
||||
continue
|
||||
raise
|
||||
if not data:
|
||||
break
|
||||
buf.write(data)
|
||||
return buf.getvalue()
|
||||
else:
|
||||
# Read until size bytes or EOF seen, whichever comes first
|
||||
buf_len = buf.tell()
|
||||
if buf_len >= size:
|
||||
# Already have size bytes in our buffer? Extract and return.
|
||||
buf.seek(0)
|
||||
rv = buf.read(size)
|
||||
self._rbuf = BytesIO()
|
||||
self._rbuf.write(buf.read())
|
||||
return rv
|
||||
|
||||
self._rbuf = BytesIO() # reset _rbuf. we consume it via buf.
|
||||
while True:
|
||||
left = size - buf_len
|
||||
# recv() will malloc the amount of memory given as its
|
||||
# parameter even though it often returns much less data
|
||||
# than that. The returned data string is short lived
|
||||
# as we copy it into a BytesIO and free it. This avoids
|
||||
# fragmentation issues on many platforms.
|
||||
try:
|
||||
data = self._sock.recv(left)
|
||||
except error as e:
|
||||
if e.args[0] == EINTR:
|
||||
continue
|
||||
raise
|
||||
if not data:
|
||||
break
|
||||
n = len(data)
|
||||
if n == size and not buf_len:
|
||||
# Shortcut. Avoid buffer data copies when:
|
||||
# - We have no data in our buffer.
|
||||
# AND
|
||||
# - Our call to recv returned exactly the
|
||||
# number of bytes we were asked to read.
|
||||
return data
|
||||
if n == left:
|
||||
buf.write(data)
|
||||
del data # explicit free
|
||||
break
|
||||
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
|
||||
buf.write(data)
|
||||
buf_len += n
|
||||
del data # explicit free
|
||||
#assert buf_len == buf.tell()
|
||||
return buf.getvalue()
|
||||
|
||||
def readline(self, size=-1):
|
||||
buf = self._rbuf
|
||||
buf.seek(0, 2) # seek end
|
||||
if buf.tell() > 0:
|
||||
# check if we already have it in our buffer
|
||||
buf.seek(0)
|
||||
bline = buf.readline(size)
|
||||
if bline.endswith(b'\n') or len(bline) == size:
|
||||
self._rbuf = BytesIO()
|
||||
self._rbuf.write(buf.read())
|
||||
return bline
|
||||
del bline
|
||||
if size < 0:
|
||||
# Read until \n or EOF, whichever comes first
|
||||
if self._rbufsize <= 1:
|
||||
# Speed up unbuffered case
|
||||
buf.seek(0)
|
||||
buffers = [buf.read()]
|
||||
self._rbuf = BytesIO() # reset _rbuf. we consume it via buf.
|
||||
data = None
|
||||
recv = self._sock.recv
|
||||
while True:
|
||||
try:
|
||||
while data != b"\n":
|
||||
data = recv(1)
|
||||
if not data:
|
||||
break
|
||||
buffers.append(data)
|
||||
except error as e:
|
||||
# The try..except to catch EINTR was moved outside the
|
||||
# recv loop to avoid the per byte overhead.
|
||||
if e.args[0] == EINTR:
|
||||
continue
|
||||
raise
|
||||
break
|
||||
return "".join(buffers)
|
||||
|
||||
buf.seek(0, 2) # seek end
|
||||
self._rbuf = BytesIO() # reset _rbuf. we consume it via buf.
|
||||
while True:
|
||||
try:
|
||||
data = self._sock.recv(self._rbufsize)
|
||||
except error as e:
|
||||
if e.args[0] == EINTR:
|
||||
continue
|
||||
raise
|
||||
if not data:
|
||||
break
|
||||
nl = data.find(b'\n')
|
||||
if nl >= 0:
|
||||
nl += 1
|
||||
buf.write(data[:nl])
|
||||
self._rbuf.write(data[nl:])
|
||||
del data
|
||||
break
|
||||
buf.write(data)
|
||||
return buf.getvalue()
|
||||
else:
|
||||
# Read until size bytes or \n or EOF seen, whichever comes first
|
||||
buf.seek(0, 2) # seek end
|
||||
buf_len = buf.tell()
|
||||
if buf_len >= size:
|
||||
buf.seek(0)
|
||||
rv = buf.read(size)
|
||||
self._rbuf = BytesIO()
|
||||
self._rbuf.write(buf.read())
|
||||
return rv
|
||||
self._rbuf = BytesIO() # reset _rbuf. we consume it via buf.
|
||||
while True:
|
||||
try:
|
||||
data = self._sock.recv(self._rbufsize)
|
||||
except error as e:
|
||||
if e.args[0] == EINTR:
|
||||
continue
|
||||
raise
|
||||
if not data:
|
||||
break
|
||||
left = size - buf_len
|
||||
# did we just receive a newline?
|
||||
nl = data.find(b'\n', 0, left)
|
||||
if nl >= 0:
|
||||
nl += 1
|
||||
# save the excess data to _rbuf
|
||||
self._rbuf.write(data[nl:])
|
||||
if buf_len:
|
||||
buf.write(data[:nl])
|
||||
break
|
||||
else:
|
||||
# Shortcut. Avoid data copy through buf when returning
|
||||
# a substring of our first recv().
|
||||
return data[:nl]
|
||||
n = len(data)
|
||||
if n == size and not buf_len:
|
||||
# Shortcut. Avoid data copy through buf when
|
||||
# returning exactly all of our first recv().
|
||||
return data
|
||||
if n >= left:
|
||||
buf.write(data[:left])
|
||||
self._rbuf.write(data[left:])
|
||||
break
|
||||
buf.write(data)
|
||||
buf_len += n
|
||||
#assert buf_len == buf.tell()
|
||||
return buf.getvalue()
|
||||
|
||||
def readlines(self, sizehint=0):
|
||||
total = 0
|
||||
list = []
|
||||
while True:
|
||||
line = self.readline()
|
||||
if not line:
|
||||
break
|
||||
list.append(line)
|
||||
total += len(line)
|
||||
if sizehint and total >= sizehint:
|
||||
break
|
||||
return list
|
||||
|
||||
# Iterator protocols
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
line = self.readline()
|
||||
if not line:
|
||||
raise StopIteration
|
||||
return line
|
||||
|
||||
Loading…
Add table
Add a link
Reference in a new issue