2016-02-06 09:36:57 +00:00
|
|
|
"""Event loop using a selector and related classes.
|
|
|
|
|
|
|
|
A selector is a "notify-when-ready" multiplexer. For a subclass which
|
|
|
|
also includes support for signal handling, see the unix_events sub-module.
|
|
|
|
"""
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
__all__ = 'BaseSelectorEventLoop',
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
import collections
|
|
|
|
import errno
|
|
|
|
import functools
|
2018-12-31 23:25:26 +00:00
|
|
|
import selectors
|
2016-02-06 09:36:57 +00:00
|
|
|
import socket
|
|
|
|
import warnings
|
2018-12-31 23:25:26 +00:00
|
|
|
import weakref
|
2016-02-06 09:36:57 +00:00
|
|
|
try:
|
|
|
|
import ssl
|
|
|
|
except ImportError: # pragma: no cover
|
|
|
|
ssl = None
|
|
|
|
|
|
|
|
from . import base_events
|
|
|
|
from . import constants
|
|
|
|
from . import events
|
|
|
|
from . import futures
|
2018-12-31 23:25:26 +00:00
|
|
|
from . import protocols
|
2016-02-06 09:36:57 +00:00
|
|
|
from . import sslproto
|
2018-12-31 23:25:26 +00:00
|
|
|
from . import transports
|
2016-02-06 09:36:57 +00:00
|
|
|
from .log import logger
|
|
|
|
|
|
|
|
|
|
|
|
def _test_selector_event(selector, fd, event):
|
|
|
|
# Test if the selector is monitoring 'event' events
|
|
|
|
# for the file descriptor 'fd'.
|
|
|
|
try:
|
|
|
|
key = selector.get_key(fd)
|
|
|
|
except KeyError:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
return bool(key.events & event)
|
|
|
|
|
|
|
|
|
|
|
|
class BaseSelectorEventLoop(base_events.BaseEventLoop):
|
|
|
|
"""Selector event loop.
|
|
|
|
|
|
|
|
See events.EventLoop for API specification.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, selector=None):
|
|
|
|
super().__init__()
|
|
|
|
|
|
|
|
if selector is None:
|
|
|
|
selector = selectors.DefaultSelector()
|
|
|
|
logger.debug('Using selector: %s', selector.__class__.__name__)
|
|
|
|
self._selector = selector
|
|
|
|
self._make_self_pipe()
|
2018-12-31 23:25:26 +00:00
|
|
|
self._transports = weakref.WeakValueDictionary()
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
def _make_socket_transport(self, sock, protocol, waiter=None, *,
|
|
|
|
extra=None, server=None):
|
|
|
|
return _SelectorSocketTransport(self, sock, protocol, waiter,
|
|
|
|
extra, server)
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
def _make_ssl_transport(
|
|
|
|
self, rawsock, protocol, sslcontext, waiter=None,
|
|
|
|
*, server_side=False, server_hostname=None,
|
|
|
|
extra=None, server=None,
|
|
|
|
ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT):
|
|
|
|
ssl_protocol = sslproto.SSLProtocol(
|
|
|
|
self, protocol, sslcontext, waiter,
|
|
|
|
server_side, server_hostname,
|
|
|
|
ssl_handshake_timeout=ssl_handshake_timeout)
|
2016-02-06 09:36:57 +00:00
|
|
|
_SelectorSocketTransport(self, rawsock, ssl_protocol,
|
|
|
|
extra=extra, server=server)
|
|
|
|
return ssl_protocol._app_transport
|
|
|
|
|
|
|
|
def _make_datagram_transport(self, sock, protocol,
|
|
|
|
address=None, waiter=None, extra=None):
|
|
|
|
return _SelectorDatagramTransport(self, sock, protocol,
|
|
|
|
address, waiter, extra)
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
if self.is_running():
|
|
|
|
raise RuntimeError("Cannot close a running event loop")
|
|
|
|
if self.is_closed():
|
|
|
|
return
|
|
|
|
self._close_self_pipe()
|
|
|
|
super().close()
|
|
|
|
if self._selector is not None:
|
|
|
|
self._selector.close()
|
|
|
|
self._selector = None
|
|
|
|
|
|
|
|
def _close_self_pipe(self):
|
2018-12-31 23:25:26 +00:00
|
|
|
self._remove_reader(self._ssock.fileno())
|
2016-02-06 09:36:57 +00:00
|
|
|
self._ssock.close()
|
|
|
|
self._ssock = None
|
|
|
|
self._csock.close()
|
|
|
|
self._csock = None
|
|
|
|
self._internal_fds -= 1
|
|
|
|
|
|
|
|
def _make_self_pipe(self):
|
|
|
|
# A self-socket, really. :-)
|
2018-12-31 23:25:26 +00:00
|
|
|
self._ssock, self._csock = socket.socketpair()
|
2016-02-06 09:36:57 +00:00
|
|
|
self._ssock.setblocking(False)
|
|
|
|
self._csock.setblocking(False)
|
|
|
|
self._internal_fds += 1
|
2018-12-31 23:25:26 +00:00
|
|
|
self._add_reader(self._ssock.fileno(), self._read_from_self)
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
def _process_self_data(self, data):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def _read_from_self(self):
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
data = self._ssock.recv(4096)
|
|
|
|
if not data:
|
|
|
|
break
|
|
|
|
self._process_self_data(data)
|
|
|
|
except InterruptedError:
|
|
|
|
continue
|
|
|
|
except BlockingIOError:
|
|
|
|
break
|
|
|
|
|
|
|
|
def _write_to_self(self):
|
|
|
|
# This may be called from a different thread, possibly after
|
|
|
|
# _close_self_pipe() has been called or even while it is
|
|
|
|
# running. Guard for self._csock being None or closed. When
|
|
|
|
# a socket is closed, send() raises OSError (with errno set to
|
|
|
|
# EBADF, but let's not rely on the exact error code).
|
|
|
|
csock = self._csock
|
|
|
|
if csock is not None:
|
|
|
|
try:
|
|
|
|
csock.send(b'\0')
|
|
|
|
except OSError:
|
|
|
|
if self._debug:
|
|
|
|
logger.debug("Fail to write a null byte into the "
|
|
|
|
"self-pipe socket",
|
|
|
|
exc_info=True)
|
|
|
|
|
|
|
|
def _start_serving(self, protocol_factory, sock,
|
2018-12-31 23:25:26 +00:00
|
|
|
sslcontext=None, server=None, backlog=100,
|
|
|
|
ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT):
|
|
|
|
self._add_reader(sock.fileno(), self._accept_connection,
|
|
|
|
protocol_factory, sock, sslcontext, server, backlog,
|
|
|
|
ssl_handshake_timeout)
|
|
|
|
|
|
|
|
def _accept_connection(
|
|
|
|
self, protocol_factory, sock,
|
|
|
|
sslcontext=None, server=None, backlog=100,
|
|
|
|
ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT):
|
|
|
|
# This method is only called once for each event loop tick where the
|
|
|
|
# listening socket has triggered an EVENT_READ. There may be multiple
|
|
|
|
# connections waiting for an .accept() so it is called in a loop.
|
|
|
|
# See https://bugs.python.org/issue27906 for more details.
|
|
|
|
for _ in range(backlog):
|
|
|
|
try:
|
|
|
|
conn, addr = sock.accept()
|
|
|
|
if self._debug:
|
|
|
|
logger.debug("%r got a new connection from %r: %r",
|
|
|
|
server, addr, conn)
|
|
|
|
conn.setblocking(False)
|
|
|
|
except (BlockingIOError, InterruptedError, ConnectionAbortedError):
|
|
|
|
# Early exit because the socket accept buffer is empty.
|
|
|
|
return None
|
|
|
|
except OSError as exc:
|
|
|
|
# There's nowhere to send the error, so just log it.
|
|
|
|
if exc.errno in (errno.EMFILE, errno.ENFILE,
|
|
|
|
errno.ENOBUFS, errno.ENOMEM):
|
|
|
|
# Some platforms (e.g. Linux keep reporting the FD as
|
|
|
|
# ready, so we remove the read handler temporarily.
|
|
|
|
# We'll try again in a while.
|
|
|
|
self.call_exception_handler({
|
|
|
|
'message': 'socket.accept() out of system resource',
|
|
|
|
'exception': exc,
|
|
|
|
'socket': sock,
|
|
|
|
})
|
|
|
|
self._remove_reader(sock.fileno())
|
|
|
|
self.call_later(constants.ACCEPT_RETRY_DELAY,
|
|
|
|
self._start_serving,
|
|
|
|
protocol_factory, sock, sslcontext, server,
|
|
|
|
backlog, ssl_handshake_timeout)
|
|
|
|
else:
|
|
|
|
raise # The event loop will catch, log and ignore it.
|
2016-02-06 09:36:57 +00:00
|
|
|
else:
|
2018-12-31 23:25:26 +00:00
|
|
|
extra = {'peername': addr}
|
|
|
|
accept = self._accept_connection2(
|
|
|
|
protocol_factory, conn, extra, sslcontext, server,
|
|
|
|
ssl_handshake_timeout)
|
|
|
|
self.create_task(accept)
|
|
|
|
|
|
|
|
async def _accept_connection2(
|
|
|
|
self, protocol_factory, conn, extra,
|
|
|
|
sslcontext=None, server=None,
|
|
|
|
ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT):
|
2016-02-06 09:36:57 +00:00
|
|
|
protocol = None
|
|
|
|
transport = None
|
|
|
|
try:
|
|
|
|
protocol = protocol_factory()
|
2018-12-31 23:25:26 +00:00
|
|
|
waiter = self.create_future()
|
2016-02-06 09:36:57 +00:00
|
|
|
if sslcontext:
|
|
|
|
transport = self._make_ssl_transport(
|
|
|
|
conn, protocol, sslcontext, waiter=waiter,
|
2018-12-31 23:25:26 +00:00
|
|
|
server_side=True, extra=extra, server=server,
|
|
|
|
ssl_handshake_timeout=ssl_handshake_timeout)
|
2016-02-06 09:36:57 +00:00
|
|
|
else:
|
|
|
|
transport = self._make_socket_transport(
|
|
|
|
conn, protocol, waiter=waiter, extra=extra,
|
|
|
|
server=server)
|
|
|
|
|
|
|
|
try:
|
2018-12-31 23:25:26 +00:00
|
|
|
await waiter
|
2016-02-06 09:36:57 +00:00
|
|
|
except:
|
|
|
|
transport.close()
|
|
|
|
raise
|
|
|
|
|
|
|
|
# It's now up to the protocol to handle the connection.
|
|
|
|
except Exception as exc:
|
|
|
|
if self._debug:
|
|
|
|
context = {
|
2018-12-31 23:25:26 +00:00
|
|
|
'message':
|
|
|
|
'Error on transport creation for incoming connection',
|
2016-02-06 09:36:57 +00:00
|
|
|
'exception': exc,
|
|
|
|
}
|
|
|
|
if protocol is not None:
|
|
|
|
context['protocol'] = protocol
|
|
|
|
if transport is not None:
|
|
|
|
context['transport'] = transport
|
|
|
|
self.call_exception_handler(context)
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
def _ensure_fd_no_transport(self, fd):
|
|
|
|
fileno = fd
|
|
|
|
if not isinstance(fileno, int):
|
|
|
|
try:
|
|
|
|
fileno = int(fileno.fileno())
|
|
|
|
except (AttributeError, TypeError, ValueError):
|
|
|
|
# This code matches selectors._fileobj_to_fd function.
|
|
|
|
raise ValueError(f"Invalid file object: {fd!r}") from None
|
|
|
|
try:
|
|
|
|
transport = self._transports[fileno]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
if not transport.is_closing():
|
|
|
|
raise RuntimeError(
|
|
|
|
f'File descriptor {fd!r} is used by transport '
|
|
|
|
f'{transport!r}')
|
|
|
|
|
|
|
|
def _add_reader(self, fd, callback, *args):
|
2016-02-06 09:36:57 +00:00
|
|
|
self._check_closed()
|
2018-12-31 23:25:26 +00:00
|
|
|
handle = events.Handle(callback, args, self, None)
|
2016-02-06 09:36:57 +00:00
|
|
|
try:
|
|
|
|
key = self._selector.get_key(fd)
|
|
|
|
except KeyError:
|
|
|
|
self._selector.register(fd, selectors.EVENT_READ,
|
|
|
|
(handle, None))
|
|
|
|
else:
|
|
|
|
mask, (reader, writer) = key.events, key.data
|
|
|
|
self._selector.modify(fd, mask | selectors.EVENT_READ,
|
|
|
|
(handle, writer))
|
|
|
|
if reader is not None:
|
|
|
|
reader.cancel()
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
def _remove_reader(self, fd):
|
2016-02-06 09:36:57 +00:00
|
|
|
if self.is_closed():
|
|
|
|
return False
|
|
|
|
try:
|
|
|
|
key = self._selector.get_key(fd)
|
|
|
|
except KeyError:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
mask, (reader, writer) = key.events, key.data
|
|
|
|
mask &= ~selectors.EVENT_READ
|
|
|
|
if not mask:
|
|
|
|
self._selector.unregister(fd)
|
|
|
|
else:
|
|
|
|
self._selector.modify(fd, mask, (None, writer))
|
|
|
|
|
|
|
|
if reader is not None:
|
|
|
|
reader.cancel()
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
def _add_writer(self, fd, callback, *args):
|
2016-02-06 09:36:57 +00:00
|
|
|
self._check_closed()
|
2018-12-31 23:25:26 +00:00
|
|
|
handle = events.Handle(callback, args, self, None)
|
2016-02-06 09:36:57 +00:00
|
|
|
try:
|
|
|
|
key = self._selector.get_key(fd)
|
|
|
|
except KeyError:
|
|
|
|
self._selector.register(fd, selectors.EVENT_WRITE,
|
|
|
|
(None, handle))
|
|
|
|
else:
|
|
|
|
mask, (reader, writer) = key.events, key.data
|
|
|
|
self._selector.modify(fd, mask | selectors.EVENT_WRITE,
|
|
|
|
(reader, handle))
|
|
|
|
if writer is not None:
|
|
|
|
writer.cancel()
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
def _remove_writer(self, fd):
|
2016-02-06 09:36:57 +00:00
|
|
|
"""Remove a writer callback."""
|
|
|
|
if self.is_closed():
|
|
|
|
return False
|
|
|
|
try:
|
|
|
|
key = self._selector.get_key(fd)
|
|
|
|
except KeyError:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
mask, (reader, writer) = key.events, key.data
|
|
|
|
# Remove both writer and connector.
|
|
|
|
mask &= ~selectors.EVENT_WRITE
|
|
|
|
if not mask:
|
|
|
|
self._selector.unregister(fd)
|
|
|
|
else:
|
|
|
|
self._selector.modify(fd, mask, (reader, None))
|
|
|
|
|
|
|
|
if writer is not None:
|
|
|
|
writer.cancel()
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
def add_reader(self, fd, callback, *args):
|
|
|
|
"""Add a reader callback."""
|
|
|
|
self._ensure_fd_no_transport(fd)
|
|
|
|
return self._add_reader(fd, callback, *args)
|
|
|
|
|
|
|
|
def remove_reader(self, fd):
|
|
|
|
"""Remove a reader callback."""
|
|
|
|
self._ensure_fd_no_transport(fd)
|
|
|
|
return self._remove_reader(fd)
|
|
|
|
|
|
|
|
def add_writer(self, fd, callback, *args):
|
|
|
|
"""Add a writer callback.."""
|
|
|
|
self._ensure_fd_no_transport(fd)
|
|
|
|
return self._add_writer(fd, callback, *args)
|
|
|
|
|
|
|
|
def remove_writer(self, fd):
|
|
|
|
"""Remove a writer callback."""
|
|
|
|
self._ensure_fd_no_transport(fd)
|
|
|
|
return self._remove_writer(fd)
|
|
|
|
|
|
|
|
async def sock_recv(self, sock, n):
|
2016-02-06 09:36:57 +00:00
|
|
|
"""Receive data from the socket.
|
|
|
|
|
|
|
|
The return value is a bytes object representing the data received.
|
|
|
|
The maximum amount of data to be received at once is specified by
|
|
|
|
nbytes.
|
|
|
|
"""
|
|
|
|
if self._debug and sock.gettimeout() != 0:
|
|
|
|
raise ValueError("the socket must be non-blocking")
|
2018-12-31 23:25:26 +00:00
|
|
|
fut = self.create_future()
|
|
|
|
self._sock_recv(fut, None, sock, n)
|
|
|
|
return await fut
|
2016-02-06 09:36:57 +00:00
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
def _sock_recv(self, fut, registered_fd, sock, n):
|
2016-02-06 09:36:57 +00:00
|
|
|
# _sock_recv() can add itself as an I/O callback if the operation can't
|
|
|
|
# be done immediately. Don't use it directly, call sock_recv().
|
2018-12-31 23:25:26 +00:00
|
|
|
if registered_fd is not None:
|
2016-02-06 09:36:57 +00:00
|
|
|
# Remove the callback early. It should be rare that the
|
|
|
|
# selector says the fd is ready but the call still returns
|
|
|
|
# EAGAIN, and I am willing to take a hit in that case in
|
|
|
|
# order to simplify the common case.
|
2018-12-31 23:25:26 +00:00
|
|
|
self.remove_reader(registered_fd)
|
2016-02-06 09:36:57 +00:00
|
|
|
if fut.cancelled():
|
|
|
|
return
|
|
|
|
try:
|
|
|
|
data = sock.recv(n)
|
|
|
|
except (BlockingIOError, InterruptedError):
|
2018-12-31 23:25:26 +00:00
|
|
|
fd = sock.fileno()
|
|
|
|
self.add_reader(fd, self._sock_recv, fut, fd, sock, n)
|
2016-02-06 09:36:57 +00:00
|
|
|
except Exception as exc:
|
|
|
|
fut.set_exception(exc)
|
|
|
|
else:
|
|
|
|
fut.set_result(data)
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
async def sock_recv_into(self, sock, buf):
|
|
|
|
"""Receive data from the socket.
|
|
|
|
|
|
|
|
The received data is written into *buf* (a writable buffer).
|
|
|
|
The return value is the number of bytes written.
|
|
|
|
"""
|
|
|
|
if self._debug and sock.gettimeout() != 0:
|
|
|
|
raise ValueError("the socket must be non-blocking")
|
|
|
|
fut = self.create_future()
|
|
|
|
self._sock_recv_into(fut, None, sock, buf)
|
|
|
|
return await fut
|
|
|
|
|
|
|
|
def _sock_recv_into(self, fut, registered_fd, sock, buf):
|
|
|
|
# _sock_recv_into() can add itself as an I/O callback if the operation
|
|
|
|
# can't be done immediately. Don't use it directly, call
|
|
|
|
# sock_recv_into().
|
|
|
|
if registered_fd is not None:
|
|
|
|
# Remove the callback early. It should be rare that the
|
|
|
|
# selector says the FD is ready but the call still returns
|
|
|
|
# EAGAIN, and I am willing to take a hit in that case in
|
|
|
|
# order to simplify the common case.
|
|
|
|
self.remove_reader(registered_fd)
|
|
|
|
if fut.cancelled():
|
|
|
|
return
|
|
|
|
try:
|
|
|
|
nbytes = sock.recv_into(buf)
|
|
|
|
except (BlockingIOError, InterruptedError):
|
|
|
|
fd = sock.fileno()
|
|
|
|
self.add_reader(fd, self._sock_recv_into, fut, fd, sock, buf)
|
|
|
|
except Exception as exc:
|
|
|
|
fut.set_exception(exc)
|
|
|
|
else:
|
|
|
|
fut.set_result(nbytes)
|
|
|
|
|
|
|
|
async def sock_sendall(self, sock, data):
|
2016-02-06 09:36:57 +00:00
|
|
|
"""Send data to the socket.
|
|
|
|
|
|
|
|
The socket must be connected to a remote socket. This method continues
|
|
|
|
to send data from data until either all data has been sent or an
|
|
|
|
error occurs. None is returned on success. On error, an exception is
|
|
|
|
raised, and there is no way to determine how much data, if any, was
|
|
|
|
successfully processed by the receiving end of the connection.
|
|
|
|
"""
|
|
|
|
if self._debug and sock.gettimeout() != 0:
|
|
|
|
raise ValueError("the socket must be non-blocking")
|
2018-12-31 23:25:26 +00:00
|
|
|
fut = self.create_future()
|
2016-02-06 09:36:57 +00:00
|
|
|
if data:
|
2018-12-31 23:25:26 +00:00
|
|
|
self._sock_sendall(fut, None, sock, data)
|
2016-02-06 09:36:57 +00:00
|
|
|
else:
|
|
|
|
fut.set_result(None)
|
2018-12-31 23:25:26 +00:00
|
|
|
return await fut
|
2016-02-06 09:36:57 +00:00
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
def _sock_sendall(self, fut, registered_fd, sock, data):
|
|
|
|
if registered_fd is not None:
|
|
|
|
self.remove_writer(registered_fd)
|
2016-02-06 09:36:57 +00:00
|
|
|
if fut.cancelled():
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
n = sock.send(data)
|
|
|
|
except (BlockingIOError, InterruptedError):
|
|
|
|
n = 0
|
|
|
|
except Exception as exc:
|
|
|
|
fut.set_exception(exc)
|
|
|
|
return
|
|
|
|
|
|
|
|
if n == len(data):
|
|
|
|
fut.set_result(None)
|
|
|
|
else:
|
|
|
|
if n:
|
|
|
|
data = data[n:]
|
2018-12-31 23:25:26 +00:00
|
|
|
fd = sock.fileno()
|
|
|
|
self.add_writer(fd, self._sock_sendall, fut, fd, sock, data)
|
2016-02-06 09:36:57 +00:00
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
async def sock_connect(self, sock, address):
|
2016-02-06 09:36:57 +00:00
|
|
|
"""Connect to a remote socket at address.
|
|
|
|
|
|
|
|
This method is a coroutine.
|
|
|
|
"""
|
|
|
|
if self._debug and sock.gettimeout() != 0:
|
|
|
|
raise ValueError("the socket must be non-blocking")
|
2018-12-31 23:25:26 +00:00
|
|
|
|
|
|
|
if not hasattr(socket, 'AF_UNIX') or sock.family != socket.AF_UNIX:
|
|
|
|
resolved = await self._ensure_resolved(
|
|
|
|
address, family=sock.family, proto=sock.proto, loop=self)
|
|
|
|
_, _, _, _, address = resolved[0]
|
|
|
|
|
|
|
|
fut = self.create_future()
|
|
|
|
self._sock_connect(fut, sock, address)
|
|
|
|
return await fut
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
def _sock_connect(self, fut, sock, address):
|
|
|
|
fd = sock.fileno()
|
|
|
|
try:
|
|
|
|
sock.connect(address)
|
|
|
|
except (BlockingIOError, InterruptedError):
|
|
|
|
# Issue #23618: When the C function connect() fails with EINTR, the
|
|
|
|
# connection runs in background. We have to wait until the socket
|
|
|
|
# becomes writable to be notified when the connection succeed or
|
|
|
|
# fails.
|
2018-12-31 23:25:26 +00:00
|
|
|
fut.add_done_callback(
|
|
|
|
functools.partial(self._sock_connect_done, fd))
|
2016-02-06 09:36:57 +00:00
|
|
|
self.add_writer(fd, self._sock_connect_cb, fut, sock, address)
|
|
|
|
except Exception as exc:
|
|
|
|
fut.set_exception(exc)
|
|
|
|
else:
|
|
|
|
fut.set_result(None)
|
|
|
|
|
|
|
|
def _sock_connect_done(self, fd, fut):
|
|
|
|
self.remove_writer(fd)
|
|
|
|
|
|
|
|
def _sock_connect_cb(self, fut, sock, address):
|
|
|
|
if fut.cancelled():
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
|
|
|
if err != 0:
|
|
|
|
# Jump to any except clause below.
|
2018-12-31 23:25:26 +00:00
|
|
|
raise OSError(err, f'Connect call failed {address}')
|
2016-02-06 09:36:57 +00:00
|
|
|
except (BlockingIOError, InterruptedError):
|
|
|
|
# socket is still registered, the callback will be retried later
|
|
|
|
pass
|
|
|
|
except Exception as exc:
|
|
|
|
fut.set_exception(exc)
|
|
|
|
else:
|
|
|
|
fut.set_result(None)
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
async def sock_accept(self, sock):
|
2016-02-06 09:36:57 +00:00
|
|
|
"""Accept a connection.
|
|
|
|
|
|
|
|
The socket must be bound to an address and listening for connections.
|
|
|
|
The return value is a pair (conn, address) where conn is a new socket
|
|
|
|
object usable to send and receive data on the connection, and address
|
|
|
|
is the address bound to the socket on the other end of the connection.
|
|
|
|
"""
|
|
|
|
if self._debug and sock.gettimeout() != 0:
|
|
|
|
raise ValueError("the socket must be non-blocking")
|
2018-12-31 23:25:26 +00:00
|
|
|
fut = self.create_future()
|
2016-02-06 09:36:57 +00:00
|
|
|
self._sock_accept(fut, False, sock)
|
2018-12-31 23:25:26 +00:00
|
|
|
return await fut
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
def _sock_accept(self, fut, registered, sock):
|
|
|
|
fd = sock.fileno()
|
|
|
|
if registered:
|
|
|
|
self.remove_reader(fd)
|
|
|
|
if fut.cancelled():
|
|
|
|
return
|
|
|
|
try:
|
|
|
|
conn, address = sock.accept()
|
|
|
|
conn.setblocking(False)
|
|
|
|
except (BlockingIOError, InterruptedError):
|
|
|
|
self.add_reader(fd, self._sock_accept, fut, True, sock)
|
|
|
|
except Exception as exc:
|
|
|
|
fut.set_exception(exc)
|
|
|
|
else:
|
|
|
|
fut.set_result((conn, address))
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
async def _sendfile_native(self, transp, file, offset, count):
|
|
|
|
del self._transports[transp._sock_fd]
|
|
|
|
resume_reading = transp.is_reading()
|
|
|
|
transp.pause_reading()
|
|
|
|
await transp._make_empty_waiter()
|
|
|
|
try:
|
|
|
|
return await self.sock_sendfile(transp._sock, file, offset, count,
|
|
|
|
fallback=False)
|
|
|
|
finally:
|
|
|
|
transp._reset_empty_waiter()
|
|
|
|
if resume_reading:
|
|
|
|
transp.resume_reading()
|
|
|
|
self._transports[transp._sock_fd] = transp
|
|
|
|
|
2016-02-06 09:36:57 +00:00
|
|
|
def _process_events(self, event_list):
|
|
|
|
for key, mask in event_list:
|
|
|
|
fileobj, (reader, writer) = key.fileobj, key.data
|
|
|
|
if mask & selectors.EVENT_READ and reader is not None:
|
|
|
|
if reader._cancelled:
|
2018-12-31 23:25:26 +00:00
|
|
|
self._remove_reader(fileobj)
|
2016-02-06 09:36:57 +00:00
|
|
|
else:
|
|
|
|
self._add_callback(reader)
|
|
|
|
if mask & selectors.EVENT_WRITE and writer is not None:
|
|
|
|
if writer._cancelled:
|
2018-12-31 23:25:26 +00:00
|
|
|
self._remove_writer(fileobj)
|
2016-02-06 09:36:57 +00:00
|
|
|
else:
|
|
|
|
self._add_callback(writer)
|
|
|
|
|
|
|
|
def _stop_serving(self, sock):
|
2018-12-31 23:25:26 +00:00
|
|
|
self._remove_reader(sock.fileno())
|
2016-02-06 09:36:57 +00:00
|
|
|
sock.close()
|
|
|
|
|
|
|
|
|
|
|
|
class _SelectorTransport(transports._FlowControlMixin,
|
|
|
|
transports.Transport):
|
|
|
|
|
|
|
|
max_size = 256 * 1024 # Buffer size passed to recv().
|
|
|
|
|
|
|
|
_buffer_factory = bytearray # Constructs initial value for self._buffer.
|
|
|
|
|
|
|
|
# Attribute used in the destructor: it must be set even if the constructor
|
|
|
|
# is not called (see _SelectorSslTransport which may start by raising an
|
|
|
|
# exception)
|
|
|
|
_sock = None
|
|
|
|
|
|
|
|
def __init__(self, loop, sock, protocol, extra=None, server=None):
|
|
|
|
super().__init__(extra, loop)
|
|
|
|
self._extra['socket'] = sock
|
|
|
|
self._extra['sockname'] = sock.getsockname()
|
|
|
|
if 'peername' not in self._extra:
|
|
|
|
try:
|
|
|
|
self._extra['peername'] = sock.getpeername()
|
|
|
|
except socket.error:
|
|
|
|
self._extra['peername'] = None
|
|
|
|
self._sock = sock
|
|
|
|
self._sock_fd = sock.fileno()
|
2018-12-31 23:25:26 +00:00
|
|
|
|
|
|
|
self._protocol_connected = False
|
|
|
|
self.set_protocol(protocol)
|
|
|
|
|
2016-02-06 09:36:57 +00:00
|
|
|
self._server = server
|
|
|
|
self._buffer = self._buffer_factory()
|
|
|
|
self._conn_lost = 0 # Set when call to connection_lost scheduled.
|
|
|
|
self._closing = False # Set when close() called.
|
|
|
|
if self._server is not None:
|
|
|
|
self._server._attach()
|
2018-12-31 23:25:26 +00:00
|
|
|
loop._transports[self._sock_fd] = self
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
info = [self.__class__.__name__]
|
|
|
|
if self._sock is None:
|
|
|
|
info.append('closed')
|
|
|
|
elif self._closing:
|
|
|
|
info.append('closing')
|
2018-12-31 23:25:26 +00:00
|
|
|
info.append(f'fd={self._sock_fd}')
|
2016-02-06 09:36:57 +00:00
|
|
|
# test if the transport was closed
|
|
|
|
if self._loop is not None and not self._loop.is_closed():
|
|
|
|
polling = _test_selector_event(self._loop._selector,
|
|
|
|
self._sock_fd, selectors.EVENT_READ)
|
|
|
|
if polling:
|
|
|
|
info.append('read=polling')
|
|
|
|
else:
|
|
|
|
info.append('read=idle')
|
|
|
|
|
|
|
|
polling = _test_selector_event(self._loop._selector,
|
|
|
|
self._sock_fd,
|
|
|
|
selectors.EVENT_WRITE)
|
|
|
|
if polling:
|
|
|
|
state = 'polling'
|
|
|
|
else:
|
|
|
|
state = 'idle'
|
|
|
|
|
|
|
|
bufsize = self.get_write_buffer_size()
|
2018-12-31 23:25:26 +00:00
|
|
|
info.append(f'write=<{state}, bufsize={bufsize}>')
|
|
|
|
return '<{}>'.format(' '.join(info))
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
def abort(self):
|
|
|
|
self._force_close(None)
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
def set_protocol(self, protocol):
|
|
|
|
self._protocol = protocol
|
|
|
|
self._protocol_connected = True
|
|
|
|
|
|
|
|
def get_protocol(self):
|
|
|
|
return self._protocol
|
|
|
|
|
|
|
|
def is_closing(self):
|
|
|
|
return self._closing
|
|
|
|
|
2016-02-06 09:36:57 +00:00
|
|
|
def close(self):
|
|
|
|
if self._closing:
|
|
|
|
return
|
|
|
|
self._closing = True
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop._remove_reader(self._sock_fd)
|
2016-02-06 09:36:57 +00:00
|
|
|
if not self._buffer:
|
|
|
|
self._conn_lost += 1
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop._remove_writer(self._sock_fd)
|
2016-02-06 09:36:57 +00:00
|
|
|
self._loop.call_soon(self._call_connection_lost, None)
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
def __del__(self):
|
|
|
|
if self._sock is not None:
|
|
|
|
warnings.warn(f"unclosed transport {self!r}", ResourceWarning,
|
|
|
|
source=self)
|
|
|
|
self._sock.close()
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
def _fatal_error(self, exc, message='Fatal error on transport'):
|
|
|
|
# Should be called from exception handler only.
|
2018-12-31 23:25:26 +00:00
|
|
|
if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
|
2016-02-06 09:36:57 +00:00
|
|
|
if self._loop.get_debug():
|
|
|
|
logger.debug("%r: %s", self, message, exc_info=True)
|
|
|
|
else:
|
|
|
|
self._loop.call_exception_handler({
|
|
|
|
'message': message,
|
|
|
|
'exception': exc,
|
|
|
|
'transport': self,
|
|
|
|
'protocol': self._protocol,
|
|
|
|
})
|
|
|
|
self._force_close(exc)
|
|
|
|
|
|
|
|
def _force_close(self, exc):
|
|
|
|
if self._conn_lost:
|
|
|
|
return
|
|
|
|
if self._buffer:
|
|
|
|
self._buffer.clear()
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop._remove_writer(self._sock_fd)
|
2016-02-06 09:36:57 +00:00
|
|
|
if not self._closing:
|
|
|
|
self._closing = True
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop._remove_reader(self._sock_fd)
|
2016-02-06 09:36:57 +00:00
|
|
|
self._conn_lost += 1
|
|
|
|
self._loop.call_soon(self._call_connection_lost, exc)
|
|
|
|
|
|
|
|
def _call_connection_lost(self, exc):
|
|
|
|
try:
|
|
|
|
if self._protocol_connected:
|
|
|
|
self._protocol.connection_lost(exc)
|
|
|
|
finally:
|
|
|
|
self._sock.close()
|
|
|
|
self._sock = None
|
|
|
|
self._protocol = None
|
|
|
|
self._loop = None
|
|
|
|
server = self._server
|
|
|
|
if server is not None:
|
|
|
|
server._detach()
|
|
|
|
self._server = None
|
|
|
|
|
|
|
|
def get_write_buffer_size(self):
|
|
|
|
return len(self._buffer)
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
def _add_reader(self, fd, callback, *args):
|
|
|
|
if self._closing:
|
|
|
|
return
|
|
|
|
|
|
|
|
self._loop._add_reader(fd, callback, *args)
|
|
|
|
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
class _SelectorSocketTransport(_SelectorTransport):
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
_start_tls_compatible = True
|
|
|
|
_sendfile_compatible = constants._SendfileMode.TRY_NATIVE
|
|
|
|
|
2016-02-06 09:36:57 +00:00
|
|
|
def __init__(self, loop, sock, protocol, waiter=None,
|
|
|
|
extra=None, server=None):
|
2018-12-31 23:25:26 +00:00
|
|
|
|
|
|
|
self._read_ready_cb = None
|
2016-02-06 09:36:57 +00:00
|
|
|
super().__init__(loop, sock, protocol, extra, server)
|
|
|
|
self._eof = False
|
|
|
|
self._paused = False
|
2018-12-31 23:25:26 +00:00
|
|
|
self._empty_waiter = None
|
|
|
|
|
|
|
|
# Disable the Nagle algorithm -- small writes will be
|
|
|
|
# sent without waiting for the TCP ACK. This generally
|
|
|
|
# decreases the latency (in some cases significantly.)
|
|
|
|
base_events._set_nodelay(self._sock)
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
self._loop.call_soon(self._protocol.connection_made, self)
|
|
|
|
# only start reading when connection_made() has been called
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop.call_soon(self._add_reader,
|
2016-02-06 09:36:57 +00:00
|
|
|
self._sock_fd, self._read_ready)
|
|
|
|
if waiter is not None:
|
|
|
|
# only wake up the waiter when connection_made() has been called
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop.call_soon(futures._set_result_unless_cancelled,
|
|
|
|
waiter, None)
|
|
|
|
|
|
|
|
def set_protocol(self, protocol):
|
|
|
|
if isinstance(protocol, protocols.BufferedProtocol):
|
|
|
|
self._read_ready_cb = self._read_ready__get_buffer
|
|
|
|
else:
|
|
|
|
self._read_ready_cb = self._read_ready__data_received
|
|
|
|
|
|
|
|
super().set_protocol(protocol)
|
|
|
|
|
|
|
|
def is_reading(self):
|
|
|
|
return not self._paused and not self._closing
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
def pause_reading(self):
|
2018-12-31 23:25:26 +00:00
|
|
|
if self._closing or self._paused:
|
|
|
|
return
|
2016-02-06 09:36:57 +00:00
|
|
|
self._paused = True
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop._remove_reader(self._sock_fd)
|
2016-02-06 09:36:57 +00:00
|
|
|
if self._loop.get_debug():
|
|
|
|
logger.debug("%r pauses reading", self)
|
|
|
|
|
|
|
|
def resume_reading(self):
|
2018-12-31 23:25:26 +00:00
|
|
|
if self._closing or not self._paused:
|
2016-02-06 09:36:57 +00:00
|
|
|
return
|
2018-12-31 23:25:26 +00:00
|
|
|
self._paused = False
|
|
|
|
self._add_reader(self._sock_fd, self._read_ready)
|
2016-02-06 09:36:57 +00:00
|
|
|
if self._loop.get_debug():
|
|
|
|
logger.debug("%r resumes reading", self)
|
|
|
|
|
|
|
|
def _read_ready(self):
|
2018-12-31 23:25:26 +00:00
|
|
|
self._read_ready_cb()
|
|
|
|
|
|
|
|
def _read_ready__get_buffer(self):
|
|
|
|
if self._conn_lost:
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
buf = self._protocol.get_buffer(-1)
|
|
|
|
if not len(buf):
|
|
|
|
raise RuntimeError('get_buffer() returned an empty buffer')
|
|
|
|
except Exception as exc:
|
|
|
|
self._fatal_error(
|
|
|
|
exc, 'Fatal error: protocol.get_buffer() call failed.')
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
nbytes = self._sock.recv_into(buf)
|
|
|
|
except (BlockingIOError, InterruptedError):
|
|
|
|
return
|
|
|
|
except Exception as exc:
|
|
|
|
self._fatal_error(exc, 'Fatal read error on socket transport')
|
|
|
|
return
|
|
|
|
|
|
|
|
if not nbytes:
|
|
|
|
self._read_ready__on_eof()
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
self._protocol.buffer_updated(nbytes)
|
|
|
|
except Exception as exc:
|
|
|
|
self._fatal_error(
|
|
|
|
exc, 'Fatal error: protocol.buffer_updated() call failed.')
|
|
|
|
|
|
|
|
def _read_ready__data_received(self):
|
|
|
|
if self._conn_lost:
|
|
|
|
return
|
2016-02-06 09:36:57 +00:00
|
|
|
try:
|
|
|
|
data = self._sock.recv(self.max_size)
|
|
|
|
except (BlockingIOError, InterruptedError):
|
2018-12-31 23:25:26 +00:00
|
|
|
return
|
2016-02-06 09:36:57 +00:00
|
|
|
except Exception as exc:
|
|
|
|
self._fatal_error(exc, 'Fatal read error on socket transport')
|
2018-12-31 23:25:26 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
if not data:
|
|
|
|
self._read_ready__on_eof()
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
self._protocol.data_received(data)
|
|
|
|
except Exception as exc:
|
|
|
|
self._fatal_error(
|
|
|
|
exc, 'Fatal error: protocol.data_received() call failed.')
|
|
|
|
|
|
|
|
def _read_ready__on_eof(self):
|
|
|
|
if self._loop.get_debug():
|
|
|
|
logger.debug("%r received EOF", self)
|
|
|
|
|
|
|
|
try:
|
|
|
|
keep_open = self._protocol.eof_received()
|
|
|
|
except Exception as exc:
|
|
|
|
self._fatal_error(
|
|
|
|
exc, 'Fatal error: protocol.eof_received() call failed.')
|
|
|
|
return
|
|
|
|
|
|
|
|
if keep_open:
|
|
|
|
# We're keeping the connection open so the
|
|
|
|
# protocol can write more, but we still can't
|
|
|
|
# receive more, so remove the reader callback.
|
|
|
|
self._loop._remove_reader(self._sock_fd)
|
2016-02-06 09:36:57 +00:00
|
|
|
else:
|
2018-12-31 23:25:26 +00:00
|
|
|
self.close()
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
def write(self, data):
|
|
|
|
if not isinstance(data, (bytes, bytearray, memoryview)):
|
2018-12-31 23:25:26 +00:00
|
|
|
raise TypeError(f'data argument must be a bytes-like object, '
|
|
|
|
f'not {type(data).__name__!r}')
|
2016-02-06 09:36:57 +00:00
|
|
|
if self._eof:
|
|
|
|
raise RuntimeError('Cannot call write() after write_eof()')
|
2018-12-31 23:25:26 +00:00
|
|
|
if self._empty_waiter is not None:
|
|
|
|
raise RuntimeError('unable to write; sendfile is in progress')
|
2016-02-06 09:36:57 +00:00
|
|
|
if not data:
|
|
|
|
return
|
|
|
|
|
|
|
|
if self._conn_lost:
|
|
|
|
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
|
|
|
logger.warning('socket.send() raised exception.')
|
|
|
|
self._conn_lost += 1
|
|
|
|
return
|
|
|
|
|
|
|
|
if not self._buffer:
|
|
|
|
# Optimization: try to send now.
|
|
|
|
try:
|
|
|
|
n = self._sock.send(data)
|
|
|
|
except (BlockingIOError, InterruptedError):
|
|
|
|
pass
|
|
|
|
except Exception as exc:
|
|
|
|
self._fatal_error(exc, 'Fatal write error on socket transport')
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
data = data[n:]
|
|
|
|
if not data:
|
|
|
|
return
|
|
|
|
# Not all was written; register write handler.
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop._add_writer(self._sock_fd, self._write_ready)
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
# Add it to the buffer.
|
|
|
|
self._buffer.extend(data)
|
|
|
|
self._maybe_pause_protocol()
|
|
|
|
|
|
|
|
def _write_ready(self):
|
|
|
|
assert self._buffer, 'Data should not be empty'
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
if self._conn_lost:
|
|
|
|
return
|
2016-02-06 09:36:57 +00:00
|
|
|
try:
|
|
|
|
n = self._sock.send(self._buffer)
|
|
|
|
except (BlockingIOError, InterruptedError):
|
|
|
|
pass
|
|
|
|
except Exception as exc:
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop._remove_writer(self._sock_fd)
|
2016-02-06 09:36:57 +00:00
|
|
|
self._buffer.clear()
|
|
|
|
self._fatal_error(exc, 'Fatal write error on socket transport')
|
2018-12-31 23:25:26 +00:00
|
|
|
if self._empty_waiter is not None:
|
|
|
|
self._empty_waiter.set_exception(exc)
|
2016-02-06 09:36:57 +00:00
|
|
|
else:
|
|
|
|
if n:
|
|
|
|
del self._buffer[:n]
|
|
|
|
self._maybe_resume_protocol() # May append to buffer.
|
|
|
|
if not self._buffer:
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop._remove_writer(self._sock_fd)
|
|
|
|
if self._empty_waiter is not None:
|
|
|
|
self._empty_waiter.set_result(None)
|
2016-02-06 09:36:57 +00:00
|
|
|
if self._closing:
|
|
|
|
self._call_connection_lost(None)
|
|
|
|
elif self._eof:
|
|
|
|
self._sock.shutdown(socket.SHUT_WR)
|
|
|
|
|
|
|
|
def write_eof(self):
|
2018-12-31 23:25:26 +00:00
|
|
|
if self._closing or self._eof:
|
2016-02-06 09:36:57 +00:00
|
|
|
return
|
|
|
|
self._eof = True
|
|
|
|
if not self._buffer:
|
|
|
|
self._sock.shutdown(socket.SHUT_WR)
|
|
|
|
|
|
|
|
def can_write_eof(self):
|
|
|
|
return True
|
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
def _call_connection_lost(self, exc):
|
|
|
|
super()._call_connection_lost(exc)
|
|
|
|
if self._empty_waiter is not None:
|
|
|
|
self._empty_waiter.set_exception(
|
|
|
|
ConnectionError("Connection is closed by peer"))
|
|
|
|
|
|
|
|
def _make_empty_waiter(self):
|
|
|
|
if self._empty_waiter is not None:
|
|
|
|
raise RuntimeError("Empty waiter is already set")
|
|
|
|
self._empty_waiter = self._loop.create_future()
|
2016-02-06 09:36:57 +00:00
|
|
|
if not self._buffer:
|
2018-12-31 23:25:26 +00:00
|
|
|
self._empty_waiter.set_result(None)
|
|
|
|
return self._empty_waiter
|
2016-02-06 09:36:57 +00:00
|
|
|
|
2018-12-31 23:25:26 +00:00
|
|
|
def _reset_empty_waiter(self):
|
|
|
|
self._empty_waiter = None
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
|
|
|
|
class _SelectorDatagramTransport(_SelectorTransport):
|
|
|
|
|
|
|
|
_buffer_factory = collections.deque
|
|
|
|
|
|
|
|
def __init__(self, loop, sock, protocol, address=None,
|
|
|
|
waiter=None, extra=None):
|
|
|
|
super().__init__(loop, sock, protocol, extra)
|
|
|
|
self._address = address
|
|
|
|
self._loop.call_soon(self._protocol.connection_made, self)
|
|
|
|
# only start reading when connection_made() has been called
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop.call_soon(self._add_reader,
|
2016-02-06 09:36:57 +00:00
|
|
|
self._sock_fd, self._read_ready)
|
|
|
|
if waiter is not None:
|
|
|
|
# only wake up the waiter when connection_made() has been called
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop.call_soon(futures._set_result_unless_cancelled,
|
|
|
|
waiter, None)
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
def get_write_buffer_size(self):
|
|
|
|
return sum(len(data) for data, _ in self._buffer)
|
|
|
|
|
|
|
|
def _read_ready(self):
|
2018-12-31 23:25:26 +00:00
|
|
|
if self._conn_lost:
|
|
|
|
return
|
2016-02-06 09:36:57 +00:00
|
|
|
try:
|
|
|
|
data, addr = self._sock.recvfrom(self.max_size)
|
|
|
|
except (BlockingIOError, InterruptedError):
|
|
|
|
pass
|
|
|
|
except OSError as exc:
|
|
|
|
self._protocol.error_received(exc)
|
|
|
|
except Exception as exc:
|
|
|
|
self._fatal_error(exc, 'Fatal read error on datagram transport')
|
|
|
|
else:
|
|
|
|
self._protocol.datagram_received(data, addr)
|
|
|
|
|
|
|
|
def sendto(self, data, addr=None):
|
|
|
|
if not isinstance(data, (bytes, bytearray, memoryview)):
|
2018-12-31 23:25:26 +00:00
|
|
|
raise TypeError(f'data argument must be a bytes-like object, '
|
|
|
|
f'not {type(data).__name__!r}')
|
2016-02-06 09:36:57 +00:00
|
|
|
if not data:
|
|
|
|
return
|
|
|
|
|
|
|
|
if self._address and addr not in (None, self._address):
|
2018-12-31 23:25:26 +00:00
|
|
|
raise ValueError(
|
|
|
|
f'Invalid address: must be None or {self._address}')
|
2016-02-06 09:36:57 +00:00
|
|
|
|
|
|
|
if self._conn_lost and self._address:
|
|
|
|
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
|
|
|
logger.warning('socket.send() raised exception.')
|
|
|
|
self._conn_lost += 1
|
|
|
|
return
|
|
|
|
|
|
|
|
if not self._buffer:
|
|
|
|
# Attempt to send it right away first.
|
|
|
|
try:
|
|
|
|
if self._address:
|
|
|
|
self._sock.send(data)
|
|
|
|
else:
|
|
|
|
self._sock.sendto(data, addr)
|
|
|
|
return
|
|
|
|
except (BlockingIOError, InterruptedError):
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop._add_writer(self._sock_fd, self._sendto_ready)
|
2016-02-06 09:36:57 +00:00
|
|
|
except OSError as exc:
|
|
|
|
self._protocol.error_received(exc)
|
|
|
|
return
|
|
|
|
except Exception as exc:
|
2018-12-31 23:25:26 +00:00
|
|
|
self._fatal_error(
|
|
|
|
exc, 'Fatal write error on datagram transport')
|
2016-02-06 09:36:57 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# Ensure that what we buffer is immutable.
|
|
|
|
self._buffer.append((bytes(data), addr))
|
|
|
|
self._maybe_pause_protocol()
|
|
|
|
|
|
|
|
def _sendto_ready(self):
|
|
|
|
while self._buffer:
|
|
|
|
data, addr = self._buffer.popleft()
|
|
|
|
try:
|
|
|
|
if self._address:
|
|
|
|
self._sock.send(data)
|
|
|
|
else:
|
|
|
|
self._sock.sendto(data, addr)
|
|
|
|
except (BlockingIOError, InterruptedError):
|
|
|
|
self._buffer.appendleft((data, addr)) # Try again later.
|
|
|
|
break
|
|
|
|
except OSError as exc:
|
|
|
|
self._protocol.error_received(exc)
|
|
|
|
return
|
|
|
|
except Exception as exc:
|
2018-12-31 23:25:26 +00:00
|
|
|
self._fatal_error(
|
|
|
|
exc, 'Fatal write error on datagram transport')
|
2016-02-06 09:36:57 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
self._maybe_resume_protocol() # May append to buffer.
|
|
|
|
if not self._buffer:
|
2018-12-31 23:25:26 +00:00
|
|
|
self._loop._remove_writer(self._sock_fd)
|
2016-02-06 09:36:57 +00:00
|
|
|
if self._closing:
|
|
|
|
self._call_connection_lost(None)
|