add Linux_i686
This commit is contained in:
parent
75f9a2fcbc
commit
95cd9b11f2
1644 changed files with 564260 additions and 0 deletions
|
|
@ -0,0 +1,12 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Twisted Internet: Asynchronous I/O and Events.
|
||||
|
||||
Twisted Internet is a collection of compatible event-loops for Python. It contains
|
||||
the code to dispatch events to interested observers and a portable API so that
|
||||
observers need not care about which event loop is running. Thus, it is possible
|
||||
to use the same code for different loops, from Twisted's basic, yet portable,
|
||||
select-based loop to the loops of various GUI toolkits like GTK+ or Tk.
|
||||
"""
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
# -*- test-case-name: twisted.test.test_process -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Cross-platform process-related functionality used by different
|
||||
L{IReactorProcess} implementations.
|
||||
"""
|
||||
|
||||
from twisted.python.reflect import qual
|
||||
from twisted.python.deprecate import getWarningMethod
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.python.log import err
|
||||
from twisted.persisted.styles import Ephemeral
|
||||
|
||||
_missingProcessExited = ("Since Twisted 8.2, IProcessProtocol.processExited "
|
||||
"is required. %s must implement it.")
|
||||
|
||||
class BaseProcess(Ephemeral):
|
||||
pid = None
|
||||
status = None
|
||||
lostProcess = 0
|
||||
proto = None
|
||||
|
||||
def __init__(self, protocol):
|
||||
self.proto = protocol
|
||||
|
||||
|
||||
def _callProcessExited(self, reason):
|
||||
default = object()
|
||||
processExited = getattr(self.proto, 'processExited', default)
|
||||
if processExited is default:
|
||||
getWarningMethod()(
|
||||
_missingProcessExited % (qual(self.proto.__class__),),
|
||||
DeprecationWarning, stacklevel=0)
|
||||
else:
|
||||
try:
|
||||
processExited(Failure(reason))
|
||||
except:
|
||||
err(None, "unexpected error in processExited")
|
||||
|
||||
|
||||
def processEnded(self, status):
|
||||
"""
|
||||
This is called when the child terminates.
|
||||
"""
|
||||
self.status = status
|
||||
self.lostProcess += 1
|
||||
self.pid = None
|
||||
self._callProcessExited(self._getReason(status))
|
||||
self.maybeCallProcessEnded()
|
||||
|
||||
|
||||
def maybeCallProcessEnded(self):
|
||||
"""
|
||||
Call processEnded on protocol after final cleanup.
|
||||
"""
|
||||
if self.proto is not None:
|
||||
reason = self._getReason(self.status)
|
||||
proto = self.proto
|
||||
self.proto = None
|
||||
try:
|
||||
proto.processEnded(Failure(reason))
|
||||
except:
|
||||
err(None, "unexpected error in processEnded")
|
||||
|
|
@ -0,0 +1,388 @@
|
|||
# -*- test-case-name: twisted.test.test_process -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
http://isometric.sixsided.org/_/gates_in_the_head/
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
# Win32 imports
|
||||
import win32api
|
||||
import win32con
|
||||
import win32event
|
||||
import win32file
|
||||
import win32pipe
|
||||
import win32process
|
||||
import win32security
|
||||
|
||||
import pywintypes
|
||||
|
||||
# security attributes for pipes
|
||||
PIPE_ATTRS_INHERITABLE = win32security.SECURITY_ATTRIBUTES()
|
||||
PIPE_ATTRS_INHERITABLE.bInheritHandle = 1
|
||||
|
||||
from zope.interface import implements
|
||||
from twisted.internet.interfaces import IProcessTransport, IConsumer, IProducer
|
||||
|
||||
from twisted.python.win32 import quoteArguments
|
||||
|
||||
from twisted.internet import error
|
||||
|
||||
from twisted.internet import _pollingfile
|
||||
from twisted.internet._baseprocess import BaseProcess
|
||||
|
||||
def debug(msg):
|
||||
import sys
|
||||
print msg
|
||||
sys.stdout.flush()
|
||||
|
||||
class _Reaper(_pollingfile._PollableResource):
|
||||
|
||||
def __init__(self, proc):
|
||||
self.proc = proc
|
||||
|
||||
def checkWork(self):
|
||||
if win32event.WaitForSingleObject(self.proc.hProcess, 0) != win32event.WAIT_OBJECT_0:
|
||||
return 0
|
||||
exitCode = win32process.GetExitCodeProcess(self.proc.hProcess)
|
||||
self.deactivate()
|
||||
self.proc.processEnded(exitCode)
|
||||
return 0
|
||||
|
||||
|
||||
def _findShebang(filename):
|
||||
"""
|
||||
Look for a #! line, and return the value following the #! if one exists, or
|
||||
None if this file is not a script.
|
||||
|
||||
I don't know if there are any conventions for quoting in Windows shebang
|
||||
lines, so this doesn't support any; therefore, you may not pass any
|
||||
arguments to scripts invoked as filters. That's probably wrong, so if
|
||||
somebody knows more about the cultural expectations on Windows, please feel
|
||||
free to fix.
|
||||
|
||||
This shebang line support was added in support of the CGI tests;
|
||||
appropriately enough, I determined that shebang lines are culturally
|
||||
accepted in the Windows world through this page::
|
||||
|
||||
http://www.cgi101.com/learn/connect/winxp.html
|
||||
|
||||
@param filename: str representing a filename
|
||||
|
||||
@return: a str representing another filename.
|
||||
"""
|
||||
f = file(filename, 'rU')
|
||||
if f.read(2) == '#!':
|
||||
exe = f.readline(1024).strip('\n')
|
||||
return exe
|
||||
|
||||
def _invalidWin32App(pywinerr):
|
||||
"""
|
||||
Determine if a pywintypes.error is telling us that the given process is
|
||||
'not a valid win32 application', i.e. not a PE format executable.
|
||||
|
||||
@param pywinerr: a pywintypes.error instance raised by CreateProcess
|
||||
|
||||
@return: a boolean
|
||||
"""
|
||||
|
||||
# Let's do this better in the future, but I have no idea what this error
|
||||
# is; MSDN doesn't mention it, and there is no symbolic constant in
|
||||
# win32process module that represents 193.
|
||||
|
||||
return pywinerr.args[0] == 193
|
||||
|
||||
class Process(_pollingfile._PollingTimer, BaseProcess):
|
||||
"""A process that integrates with the Twisted event loop.
|
||||
|
||||
If your subprocess is a python program, you need to:
|
||||
|
||||
- Run python.exe with the '-u' command line option - this turns on
|
||||
unbuffered I/O. Buffering stdout/err/in can cause problems, see e.g.
|
||||
http://support.microsoft.com/default.aspx?scid=kb;EN-US;q1903
|
||||
|
||||
- If you don't want Windows messing with data passed over
|
||||
stdin/out/err, set the pipes to be in binary mode::
|
||||
|
||||
import os, sys, mscvrt
|
||||
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
|
||||
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
|
||||
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
|
||||
|
||||
"""
|
||||
implements(IProcessTransport, IConsumer, IProducer)
|
||||
|
||||
closedNotifies = 0
|
||||
|
||||
def __init__(self, reactor, protocol, command, args, environment, path):
|
||||
"""
|
||||
Create a new child process.
|
||||
"""
|
||||
_pollingfile._PollingTimer.__init__(self, reactor)
|
||||
BaseProcess.__init__(self, protocol)
|
||||
|
||||
# security attributes for pipes
|
||||
sAttrs = win32security.SECURITY_ATTRIBUTES()
|
||||
sAttrs.bInheritHandle = 1
|
||||
|
||||
# create the pipes which will connect to the secondary process
|
||||
self.hStdoutR, hStdoutW = win32pipe.CreatePipe(sAttrs, 0)
|
||||
self.hStderrR, hStderrW = win32pipe.CreatePipe(sAttrs, 0)
|
||||
hStdinR, self.hStdinW = win32pipe.CreatePipe(sAttrs, 0)
|
||||
|
||||
win32pipe.SetNamedPipeHandleState(self.hStdinW,
|
||||
win32pipe.PIPE_NOWAIT,
|
||||
None,
|
||||
None)
|
||||
|
||||
# set the info structure for the new process.
|
||||
StartupInfo = win32process.STARTUPINFO()
|
||||
StartupInfo.hStdOutput = hStdoutW
|
||||
StartupInfo.hStdError = hStderrW
|
||||
StartupInfo.hStdInput = hStdinR
|
||||
StartupInfo.dwFlags = win32process.STARTF_USESTDHANDLES
|
||||
|
||||
# Create new handles whose inheritance property is false
|
||||
currentPid = win32api.GetCurrentProcess()
|
||||
|
||||
tmp = win32api.DuplicateHandle(currentPid, self.hStdoutR, currentPid, 0, 0,
|
||||
win32con.DUPLICATE_SAME_ACCESS)
|
||||
win32file.CloseHandle(self.hStdoutR)
|
||||
self.hStdoutR = tmp
|
||||
|
||||
tmp = win32api.DuplicateHandle(currentPid, self.hStderrR, currentPid, 0, 0,
|
||||
win32con.DUPLICATE_SAME_ACCESS)
|
||||
win32file.CloseHandle(self.hStderrR)
|
||||
self.hStderrR = tmp
|
||||
|
||||
tmp = win32api.DuplicateHandle(currentPid, self.hStdinW, currentPid, 0, 0,
|
||||
win32con.DUPLICATE_SAME_ACCESS)
|
||||
win32file.CloseHandle(self.hStdinW)
|
||||
self.hStdinW = tmp
|
||||
|
||||
# Add the specified environment to the current environment - this is
|
||||
# necessary because certain operations are only supported on Windows
|
||||
# if certain environment variables are present.
|
||||
|
||||
env = os.environ.copy()
|
||||
env.update(environment or {})
|
||||
|
||||
cmdline = quoteArguments(args)
|
||||
# TODO: error detection here. See #2787 and #4184.
|
||||
def doCreate():
|
||||
self.hProcess, self.hThread, self.pid, dwTid = win32process.CreateProcess(
|
||||
command, cmdline, None, None, 1, 0, env, path, StartupInfo)
|
||||
try:
|
||||
try:
|
||||
doCreate()
|
||||
except TypeError, e:
|
||||
# win32process.CreateProcess cannot deal with mixed
|
||||
# str/unicode environment, so we make it all Unicode
|
||||
if e.args != ('All dictionary items must be strings, or '
|
||||
'all must be unicode',):
|
||||
raise
|
||||
newenv = {}
|
||||
for key, value in env.items():
|
||||
newenv[unicode(key)] = unicode(value)
|
||||
env = newenv
|
||||
doCreate()
|
||||
except pywintypes.error, pwte:
|
||||
if not _invalidWin32App(pwte):
|
||||
# This behavior isn't _really_ documented, but let's make it
|
||||
# consistent with the behavior that is documented.
|
||||
raise OSError(pwte)
|
||||
else:
|
||||
# look for a shebang line. Insert the original 'command'
|
||||
# (actually a script) into the new arguments list.
|
||||
sheb = _findShebang(command)
|
||||
if sheb is None:
|
||||
raise OSError(
|
||||
"%r is neither a Windows executable, "
|
||||
"nor a script with a shebang line" % command)
|
||||
else:
|
||||
args = list(args)
|
||||
args.insert(0, command)
|
||||
cmdline = quoteArguments(args)
|
||||
origcmd = command
|
||||
command = sheb
|
||||
try:
|
||||
# Let's try again.
|
||||
doCreate()
|
||||
except pywintypes.error, pwte2:
|
||||
# d'oh, failed again!
|
||||
if _invalidWin32App(pwte2):
|
||||
raise OSError(
|
||||
"%r has an invalid shebang line: "
|
||||
"%r is not a valid executable" % (
|
||||
origcmd, sheb))
|
||||
raise OSError(pwte2)
|
||||
|
||||
# close handles which only the child will use
|
||||
win32file.CloseHandle(hStderrW)
|
||||
win32file.CloseHandle(hStdoutW)
|
||||
win32file.CloseHandle(hStdinR)
|
||||
|
||||
# set up everything
|
||||
self.stdout = _pollingfile._PollableReadPipe(
|
||||
self.hStdoutR,
|
||||
lambda data: self.proto.childDataReceived(1, data),
|
||||
self.outConnectionLost)
|
||||
|
||||
self.stderr = _pollingfile._PollableReadPipe(
|
||||
self.hStderrR,
|
||||
lambda data: self.proto.childDataReceived(2, data),
|
||||
self.errConnectionLost)
|
||||
|
||||
self.stdin = _pollingfile._PollableWritePipe(
|
||||
self.hStdinW, self.inConnectionLost)
|
||||
|
||||
for pipewatcher in self.stdout, self.stderr, self.stdin:
|
||||
self._addPollableResource(pipewatcher)
|
||||
|
||||
|
||||
# notify protocol
|
||||
self.proto.makeConnection(self)
|
||||
|
||||
self._addPollableResource(_Reaper(self))
|
||||
|
||||
|
||||
def signalProcess(self, signalID):
|
||||
if self.pid is None:
|
||||
raise error.ProcessExitedAlready()
|
||||
if signalID in ("INT", "TERM", "KILL"):
|
||||
win32process.TerminateProcess(self.hProcess, 1)
|
||||
|
||||
|
||||
def _getReason(self, status):
|
||||
if status == 0:
|
||||
return error.ProcessDone(status)
|
||||
return error.ProcessTerminated(status)
|
||||
|
||||
|
||||
def write(self, data):
|
||||
"""
|
||||
Write data to the process' stdin.
|
||||
|
||||
@type data: C{str}
|
||||
"""
|
||||
self.stdin.write(data)
|
||||
|
||||
|
||||
def writeSequence(self, seq):
|
||||
"""
|
||||
Write data to the process' stdin.
|
||||
|
||||
@type data: C{list} of C{str}
|
||||
"""
|
||||
self.stdin.writeSequence(seq)
|
||||
|
||||
|
||||
def writeToChild(self, fd, data):
|
||||
"""
|
||||
Similar to L{ITransport.write} but also allows the file descriptor in
|
||||
the child process which will receive the bytes to be specified.
|
||||
|
||||
This implementation is limited to writing to the child's standard input.
|
||||
|
||||
@param fd: The file descriptor to which to write. Only stdin (C{0}) is
|
||||
supported.
|
||||
@type fd: C{int}
|
||||
|
||||
@param data: The bytes to write.
|
||||
@type data: C{str}
|
||||
|
||||
@return: C{None}
|
||||
|
||||
@raise KeyError: If C{fd} is anything other than the stdin file
|
||||
descriptor (C{0}).
|
||||
"""
|
||||
if fd == 0:
|
||||
self.stdin.write(data)
|
||||
else:
|
||||
raise KeyError(fd)
|
||||
|
||||
|
||||
def closeChildFD(self, fd):
|
||||
if fd == 0:
|
||||
self.closeStdin()
|
||||
elif fd == 1:
|
||||
self.closeStdout()
|
||||
elif fd == 2:
|
||||
self.closeStderr()
|
||||
else:
|
||||
raise NotImplementedError("Only standard-IO file descriptors available on win32")
|
||||
|
||||
def closeStdin(self):
|
||||
"""Close the process' stdin.
|
||||
"""
|
||||
self.stdin.close()
|
||||
|
||||
def closeStderr(self):
|
||||
self.stderr.close()
|
||||
|
||||
def closeStdout(self):
|
||||
self.stdout.close()
|
||||
|
||||
def loseConnection(self):
|
||||
"""Close the process' stdout, in and err."""
|
||||
self.closeStdin()
|
||||
self.closeStdout()
|
||||
self.closeStderr()
|
||||
|
||||
|
||||
def outConnectionLost(self):
|
||||
self.proto.childConnectionLost(1)
|
||||
self.connectionLostNotify()
|
||||
|
||||
|
||||
def errConnectionLost(self):
|
||||
self.proto.childConnectionLost(2)
|
||||
self.connectionLostNotify()
|
||||
|
||||
|
||||
def inConnectionLost(self):
|
||||
self.proto.childConnectionLost(0)
|
||||
self.connectionLostNotify()
|
||||
|
||||
|
||||
def connectionLostNotify(self):
|
||||
"""
|
||||
Will be called 3 times, by stdout/err threads and process handle.
|
||||
"""
|
||||
self.closedNotifies += 1
|
||||
self.maybeCallProcessEnded()
|
||||
|
||||
|
||||
def maybeCallProcessEnded(self):
|
||||
if self.closedNotifies == 3 and self.lostProcess:
|
||||
win32file.CloseHandle(self.hProcess)
|
||||
win32file.CloseHandle(self.hThread)
|
||||
self.hProcess = None
|
||||
self.hThread = None
|
||||
BaseProcess.maybeCallProcessEnded(self)
|
||||
|
||||
|
||||
# IConsumer
|
||||
def registerProducer(self, producer, streaming):
|
||||
self.stdin.registerProducer(producer, streaming)
|
||||
|
||||
def unregisterProducer(self):
|
||||
self.stdin.unregisterProducer()
|
||||
|
||||
# IProducer
|
||||
def pauseProducing(self):
|
||||
self._pause()
|
||||
|
||||
def resumeProducing(self):
|
||||
self._unpause()
|
||||
|
||||
def stopProducing(self):
|
||||
self.loseConnection()
|
||||
|
||||
def __repr__(self):
|
||||
"""
|
||||
Return a string representation of the process.
|
||||
"""
|
||||
return "<%s pid=%s>" % (self.__class__.__name__, self.pid)
|
||||
|
|
@ -0,0 +1,390 @@
|
|||
# -*- test-case-name: twisted.internet.test -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
This module provides base support for Twisted to interact with the glib/gtk
|
||||
mainloops.
|
||||
|
||||
The classes in this module should not be used directly, but rather you should
|
||||
import gireactor or gtk3reactor for GObject Introspection based applications,
|
||||
or glib2reactor or gtk2reactor for applications using legacy static bindings.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import sys
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet import base, posixbase, selectreactor
|
||||
from twisted.internet.interfaces import IReactorFDSet
|
||||
from twisted.python import log
|
||||
|
||||
|
||||
|
||||
def ensureNotImported(moduleNames, errorMessage, preventImports=[]):
|
||||
"""
|
||||
Check whether the given modules were imported, and if requested, ensure
|
||||
they will not be importable in the future.
|
||||
|
||||
@param moduleNames: A list of module names we make sure aren't imported.
|
||||
@type moduleNames: C{list} of C{str}
|
||||
|
||||
@param preventImports: A list of module name whose future imports should
|
||||
be prevented.
|
||||
@type preventImports: C{list} of C{str}
|
||||
|
||||
@param errorMessage: Message to use when raising an C{ImportError}.
|
||||
@type errorMessage: C{str}
|
||||
|
||||
@raises: C{ImportError} with given error message if a given module name
|
||||
has already been imported.
|
||||
"""
|
||||
for name in moduleNames:
|
||||
if sys.modules.get(name) is not None:
|
||||
raise ImportError(errorMessage)
|
||||
|
||||
# Disable module imports to avoid potential problems.
|
||||
for name in preventImports:
|
||||
sys.modules[name] = None
|
||||
|
||||
|
||||
|
||||
class GlibWaker(posixbase._UnixWaker):
|
||||
"""
|
||||
Run scheduled events after waking up.
|
||||
"""
|
||||
|
||||
def doRead(self):
|
||||
posixbase._UnixWaker.doRead(self)
|
||||
self.reactor._simulate()
|
||||
|
||||
|
||||
|
||||
@implementer(IReactorFDSet)
|
||||
class GlibReactorBase(posixbase.PosixReactorBase, posixbase._PollLikeMixin):
|
||||
"""
|
||||
Base class for GObject event loop reactors.
|
||||
|
||||
Notification for I/O events (reads and writes on file descriptors) is done
|
||||
by the the gobject-based event loop. File descriptors are registered with
|
||||
gobject with the appropriate flags for read/write/disconnect notification.
|
||||
|
||||
Time-based events, the results of C{callLater} and C{callFromThread}, are
|
||||
handled differently. Rather than registering each event with gobject, a
|
||||
single gobject timeout is registered for the earliest scheduled event, the
|
||||
output of C{reactor.timeout()}. For example, if there are timeouts in 1, 2
|
||||
and 3.4 seconds, a single timeout is registered for 1 second in the
|
||||
future. When this timeout is hit, C{_simulate} is called, which calls the
|
||||
appropriate Twisted-level handlers, and a new timeout is added to gobject
|
||||
by the C{_reschedule} method.
|
||||
|
||||
To handle C{callFromThread} events, we use a custom waker that calls
|
||||
C{_simulate} whenever it wakes up.
|
||||
|
||||
@ivar _sources: A dictionary mapping L{FileDescriptor} instances to
|
||||
GSource handles.
|
||||
|
||||
@ivar _reads: A set of L{FileDescriptor} instances currently monitored for
|
||||
reading.
|
||||
|
||||
@ivar _writes: A set of L{FileDescriptor} instances currently monitored for
|
||||
writing.
|
||||
|
||||
@ivar _simtag: A GSource handle for the next L{simulate} call.
|
||||
"""
|
||||
|
||||
# Install a waker that knows it needs to call C{_simulate} in order to run
|
||||
# callbacks queued from a thread:
|
||||
_wakerFactory = GlibWaker
|
||||
|
||||
def __init__(self, glib_module, gtk_module, useGtk=False):
|
||||
self._simtag = None
|
||||
self._reads = set()
|
||||
self._writes = set()
|
||||
self._sources = {}
|
||||
self._glib = glib_module
|
||||
self._gtk = gtk_module
|
||||
posixbase.PosixReactorBase.__init__(self)
|
||||
|
||||
self._source_remove = self._glib.source_remove
|
||||
self._timeout_add = self._glib.timeout_add
|
||||
|
||||
def _mainquit():
|
||||
if self._gtk.main_level():
|
||||
self._gtk.main_quit()
|
||||
|
||||
if useGtk:
|
||||
self._pending = self._gtk.events_pending
|
||||
self._iteration = self._gtk.main_iteration_do
|
||||
self._crash = _mainquit
|
||||
self._run = self._gtk.main
|
||||
else:
|
||||
self.context = self._glib.main_context_default()
|
||||
self._pending = self.context.pending
|
||||
self._iteration = self.context.iteration
|
||||
self.loop = self._glib.MainLoop()
|
||||
self._crash = lambda: self._glib.idle_add(self.loop.quit)
|
||||
self._run = self.loop.run
|
||||
|
||||
|
||||
def _handleSignals(self):
|
||||
# First, install SIGINT and friends:
|
||||
base._SignalReactorMixin._handleSignals(self)
|
||||
# Next, since certain versions of gtk will clobber our signal handler,
|
||||
# set all signal handlers again after the event loop has started to
|
||||
# ensure they're *really* set. We don't call this twice so we don't
|
||||
# leak file descriptors created in the SIGCHLD initialization:
|
||||
self.callLater(0, posixbase.PosixReactorBase._handleSignals, self)
|
||||
|
||||
|
||||
# The input_add function in pygtk1 checks for objects with a
|
||||
# 'fileno' method and, if present, uses the result of that method
|
||||
# as the input source. The pygtk2 input_add does not do this. The
|
||||
# function below replicates the pygtk1 functionality.
|
||||
|
||||
# In addition, pygtk maps gtk.input_add to _gobject.io_add_watch, and
|
||||
# g_io_add_watch() takes different condition bitfields than
|
||||
# gtk_input_add(). We use g_io_add_watch() here in case pygtk fixes this
|
||||
# bug.
|
||||
def input_add(self, source, condition, callback):
|
||||
if hasattr(source, 'fileno'):
|
||||
# handle python objects
|
||||
def wrapper(ignored, condition):
|
||||
return callback(source, condition)
|
||||
fileno = source.fileno()
|
||||
else:
|
||||
fileno = source
|
||||
wrapper = callback
|
||||
return self._glib.io_add_watch(
|
||||
fileno, condition, wrapper,
|
||||
priority=self._glib.PRIORITY_DEFAULT_IDLE)
|
||||
|
||||
|
||||
def _ioEventCallback(self, source, condition):
|
||||
"""
|
||||
Called by event loop when an I/O event occurs.
|
||||
"""
|
||||
log.callWithLogger(
|
||||
source, self._doReadOrWrite, source, source, condition)
|
||||
return True # True = don't auto-remove the source
|
||||
|
||||
|
||||
def _add(self, source, primary, other, primaryFlag, otherFlag):
|
||||
"""
|
||||
Add the given L{FileDescriptor} for monitoring either for reading or
|
||||
writing. If the file is already monitored for the other operation, we
|
||||
delete the previous registration and re-register it for both reading
|
||||
and writing.
|
||||
"""
|
||||
if source in primary:
|
||||
return
|
||||
flags = primaryFlag
|
||||
if source in other:
|
||||
self._source_remove(self._sources[source])
|
||||
flags |= otherFlag
|
||||
self._sources[source] = self.input_add(
|
||||
source, flags, self._ioEventCallback)
|
||||
primary.add(source)
|
||||
|
||||
|
||||
def addReader(self, reader):
|
||||
"""
|
||||
Add a L{FileDescriptor} for monitoring of data available to read.
|
||||
"""
|
||||
self._add(reader, self._reads, self._writes,
|
||||
self.INFLAGS, self.OUTFLAGS)
|
||||
|
||||
|
||||
def addWriter(self, writer):
|
||||
"""
|
||||
Add a L{FileDescriptor} for monitoring ability to write data.
|
||||
"""
|
||||
self._add(writer, self._writes, self._reads,
|
||||
self.OUTFLAGS, self.INFLAGS)
|
||||
|
||||
|
||||
def getReaders(self):
|
||||
"""
|
||||
Retrieve the list of current L{FileDescriptor} monitored for reading.
|
||||
"""
|
||||
return list(self._reads)
|
||||
|
||||
|
||||
def getWriters(self):
|
||||
"""
|
||||
Retrieve the list of current L{FileDescriptor} monitored for writing.
|
||||
"""
|
||||
return list(self._writes)
|
||||
|
||||
|
||||
def removeAll(self):
|
||||
"""
|
||||
Remove monitoring for all registered L{FileDescriptor}s.
|
||||
"""
|
||||
return self._removeAll(self._reads, self._writes)
|
||||
|
||||
|
||||
def _remove(self, source, primary, other, flags):
|
||||
"""
|
||||
Remove monitoring the given L{FileDescriptor} for either reading or
|
||||
writing. If it's still monitored for the other operation, we
|
||||
re-register the L{FileDescriptor} for only that operation.
|
||||
"""
|
||||
if source not in primary:
|
||||
return
|
||||
self._source_remove(self._sources[source])
|
||||
primary.remove(source)
|
||||
if source in other:
|
||||
self._sources[source] = self.input_add(
|
||||
source, flags, self._ioEventCallback)
|
||||
else:
|
||||
self._sources.pop(source)
|
||||
|
||||
|
||||
def removeReader(self, reader):
|
||||
"""
|
||||
Stop monitoring the given L{FileDescriptor} for reading.
|
||||
"""
|
||||
self._remove(reader, self._reads, self._writes, self.OUTFLAGS)
|
||||
|
||||
|
||||
def removeWriter(self, writer):
|
||||
"""
|
||||
Stop monitoring the given L{FileDescriptor} for writing.
|
||||
"""
|
||||
self._remove(writer, self._writes, self._reads, self.INFLAGS)
|
||||
|
||||
|
||||
def iterate(self, delay=0):
|
||||
"""
|
||||
One iteration of the event loop, for trial's use.
|
||||
|
||||
This is not used for actual reactor runs.
|
||||
"""
|
||||
self.runUntilCurrent()
|
||||
while self._pending():
|
||||
self._iteration(0)
|
||||
|
||||
|
||||
def crash(self):
|
||||
"""
|
||||
Crash the reactor.
|
||||
"""
|
||||
posixbase.PosixReactorBase.crash(self)
|
||||
self._crash()
|
||||
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop the reactor.
|
||||
"""
|
||||
posixbase.PosixReactorBase.stop(self)
|
||||
# The base implementation only sets a flag, to ensure shutting down is
|
||||
# not reentrant. Unfortunately, this flag is not meaningful to the
|
||||
# gobject event loop. We therefore call wakeUp() to ensure the event
|
||||
# loop will call back into Twisted once this iteration is done. This
|
||||
# will result in self.runUntilCurrent() being called, where the stop
|
||||
# flag will trigger the actual shutdown process, eventually calling
|
||||
# crash() which will do the actual gobject event loop shutdown.
|
||||
self.wakeUp()
|
||||
|
||||
|
||||
def run(self, installSignalHandlers=True):
|
||||
"""
|
||||
Run the reactor.
|
||||
"""
|
||||
self.callWhenRunning(self._reschedule)
|
||||
self.startRunning(installSignalHandlers=installSignalHandlers)
|
||||
if self._started:
|
||||
self._run()
|
||||
|
||||
|
||||
def callLater(self, *args, **kwargs):
|
||||
"""
|
||||
Schedule a C{DelayedCall}.
|
||||
"""
|
||||
result = posixbase.PosixReactorBase.callLater(self, *args, **kwargs)
|
||||
# Make sure we'll get woken up at correct time to handle this new
|
||||
# scheduled call:
|
||||
self._reschedule()
|
||||
return result
|
||||
|
||||
|
||||
def _reschedule(self):
|
||||
"""
|
||||
Schedule a glib timeout for C{_simulate}.
|
||||
"""
|
||||
if self._simtag is not None:
|
||||
self._source_remove(self._simtag)
|
||||
self._simtag = None
|
||||
timeout = self.timeout()
|
||||
if timeout is not None:
|
||||
self._simtag = self._timeout_add(
|
||||
int(timeout * 1000), self._simulate,
|
||||
priority=self._glib.PRIORITY_DEFAULT_IDLE)
|
||||
|
||||
|
||||
def _simulate(self):
|
||||
"""
|
||||
Run timers, and then reschedule glib timeout for next scheduled event.
|
||||
"""
|
||||
self.runUntilCurrent()
|
||||
self._reschedule()
|
||||
|
||||
|
||||
|
||||
class PortableGlibReactorBase(selectreactor.SelectReactor):
|
||||
"""
|
||||
Base class for GObject event loop reactors that works on Windows.
|
||||
|
||||
Sockets aren't supported by GObject's input_add on Win32.
|
||||
"""
|
||||
def __init__(self, glib_module, gtk_module, useGtk=False):
|
||||
self._simtag = None
|
||||
self._glib = glib_module
|
||||
self._gtk = gtk_module
|
||||
selectreactor.SelectReactor.__init__(self)
|
||||
|
||||
self._source_remove = self._glib.source_remove
|
||||
self._timeout_add = self._glib.timeout_add
|
||||
|
||||
def _mainquit():
|
||||
if self._gtk.main_level():
|
||||
self._gtk.main_quit()
|
||||
|
||||
if useGtk:
|
||||
self._crash = _mainquit
|
||||
self._run = self._gtk.main
|
||||
else:
|
||||
self.loop = self._glib.MainLoop()
|
||||
self._crash = lambda: self._glib.idle_add(self.loop.quit)
|
||||
self._run = self.loop.run
|
||||
|
||||
|
||||
def crash(self):
|
||||
selectreactor.SelectReactor.crash(self)
|
||||
self._crash()
|
||||
|
||||
|
||||
def run(self, installSignalHandlers=True):
|
||||
self.startRunning(installSignalHandlers=installSignalHandlers)
|
||||
self._timeout_add(0, self.simulate)
|
||||
if self._started:
|
||||
self._run()
|
||||
|
||||
|
||||
def simulate(self):
|
||||
"""
|
||||
Run simulation loops and reschedule callbacks.
|
||||
"""
|
||||
if self._simtag is not None:
|
||||
self._source_remove(self._simtag)
|
||||
self.iterate()
|
||||
timeout = min(self.timeout(), 0.01)
|
||||
if timeout is None:
|
||||
timeout = 0.01
|
||||
self._simtag = self._timeout_add(
|
||||
int(timeout * 1000), self.simulate,
|
||||
priority=self._glib.PRIORITY_DEFAULT_IDLE)
|
||||
|
|
@ -0,0 +1,271 @@
|
|||
# -*- test-case-name: twisted.test.test_ssl -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
This module implements memory BIO based TLS support. It is the preferred
|
||||
implementation and will be used whenever pyOpenSSL 0.10 or newer is installed
|
||||
(whenever L{twisted.protocols.tls} is importable).
|
||||
|
||||
@since: 11.1
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
from zope.interface import implementer
|
||||
from zope.interface import directlyProvides
|
||||
|
||||
from twisted.internet.interfaces import ITLSTransport, ISSLTransport
|
||||
from twisted.internet.abstract import FileDescriptor
|
||||
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol
|
||||
|
||||
|
||||
class _BypassTLS(object):
|
||||
"""
|
||||
L{_BypassTLS} is used as the transport object for the TLS protocol object
|
||||
used to implement C{startTLS}. Its methods skip any TLS logic which
|
||||
C{startTLS} enables.
|
||||
|
||||
@ivar _base: A transport class L{_BypassTLS} has been mixed in with to which
|
||||
methods will be forwarded. This class is only responsible for sending
|
||||
bytes over the connection, not doing TLS.
|
||||
|
||||
@ivar _connection: A L{Connection} which TLS has been started on which will
|
||||
be proxied to by this object. Any method which has its behavior
|
||||
altered after C{startTLS} will be skipped in favor of the base class's
|
||||
implementation. This allows the TLS protocol object to have direct
|
||||
access to the transport, necessary to actually implement TLS.
|
||||
"""
|
||||
def __init__(self, base, connection):
|
||||
self._base = base
|
||||
self._connection = connection
|
||||
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""
|
||||
Forward any extra attribute access to the original transport object.
|
||||
For example, this exposes C{getHost}, the behavior of which does not
|
||||
change after TLS is enabled.
|
||||
"""
|
||||
return getattr(self._connection, name)
|
||||
|
||||
|
||||
def write(self, data):
|
||||
"""
|
||||
Write some bytes directly to the connection.
|
||||
"""
|
||||
return self._base.write(self._connection, data)
|
||||
|
||||
|
||||
def writeSequence(self, iovec):
|
||||
"""
|
||||
Write a some bytes directly to the connection.
|
||||
"""
|
||||
return self._base.writeSequence(self._connection, iovec)
|
||||
|
||||
|
||||
def loseConnection(self, *args, **kwargs):
|
||||
"""
|
||||
Close the underlying connection.
|
||||
"""
|
||||
return self._base.loseConnection(self._connection, *args, **kwargs)
|
||||
|
||||
|
||||
def registerProducer(self, producer, streaming):
|
||||
"""
|
||||
Register a producer with the underlying connection.
|
||||
"""
|
||||
return self._base.registerProducer(self._connection, producer, streaming)
|
||||
|
||||
|
||||
def unregisterProducer(self):
|
||||
"""
|
||||
Unregister a producer with the underlying connection.
|
||||
"""
|
||||
return self._base.unregisterProducer(self._connection)
|
||||
|
||||
|
||||
|
||||
def startTLS(transport, contextFactory, normal, bypass):
|
||||
"""
|
||||
Add a layer of SSL to a transport.
|
||||
|
||||
@param transport: The transport which will be modified. This can either by
|
||||
a L{FileDescriptor<twisted.internet.abstract.FileDescriptor>} or a
|
||||
L{FileHandle<twisted.internet.iocpreactor.abstract.FileHandle>}. The
|
||||
actual requirements of this instance are that it have:
|
||||
|
||||
- a C{_tlsClientDefault} attribute indicating whether the transport is
|
||||
a client (C{True}) or a server (C{False})
|
||||
- a settable C{TLS} attribute which can be used to mark the fact
|
||||
that SSL has been started
|
||||
- settable C{getHandle} and C{getPeerCertificate} attributes so
|
||||
these L{ISSLTransport} methods can be added to it
|
||||
- a C{protocol} attribute referring to the L{IProtocol} currently
|
||||
connected to the transport, which can also be set to a new
|
||||
L{IProtocol} for the transport to deliver data to
|
||||
|
||||
@param contextFactory: An SSL context factory defining SSL parameters for
|
||||
the new SSL layer.
|
||||
@type contextFactory: L{twisted.internet.ssl.ContextFactory}
|
||||
|
||||
@param normal: A flag indicating whether SSL will go in the same direction
|
||||
as the underlying transport goes. That is, if the SSL client will be
|
||||
the underlying client and the SSL server will be the underlying server.
|
||||
C{True} means it is the same, C{False} means they are switched.
|
||||
@type param: L{bool}
|
||||
|
||||
@param bypass: A transport base class to call methods on to bypass the new
|
||||
SSL layer (so that the SSL layer itself can send its bytes).
|
||||
@type bypass: L{type}
|
||||
"""
|
||||
# Figure out which direction the SSL goes in. If normal is True,
|
||||
# we'll go in the direction indicated by the subclass. Otherwise,
|
||||
# we'll go the other way (client = not normal ^ _tlsClientDefault,
|
||||
# in other words).
|
||||
if normal:
|
||||
client = transport._tlsClientDefault
|
||||
else:
|
||||
client = not transport._tlsClientDefault
|
||||
|
||||
# If we have a producer, unregister it, and then re-register it below once
|
||||
# we've switched to TLS mode, so it gets hooked up correctly:
|
||||
producer, streaming = None, None
|
||||
if transport.producer is not None:
|
||||
producer, streaming = transport.producer, transport.streamingProducer
|
||||
transport.unregisterProducer()
|
||||
|
||||
tlsFactory = TLSMemoryBIOFactory(contextFactory, client, None)
|
||||
tlsProtocol = TLSMemoryBIOProtocol(tlsFactory, transport.protocol, False)
|
||||
transport.protocol = tlsProtocol
|
||||
|
||||
transport.getHandle = tlsProtocol.getHandle
|
||||
transport.getPeerCertificate = tlsProtocol.getPeerCertificate
|
||||
|
||||
# Mark the transport as secure.
|
||||
directlyProvides(transport, ISSLTransport)
|
||||
|
||||
# Remember we did this so that write and writeSequence can send the
|
||||
# data to the right place.
|
||||
transport.TLS = True
|
||||
|
||||
# Hook it up
|
||||
transport.protocol.makeConnection(_BypassTLS(bypass, transport))
|
||||
|
||||
# Restore producer if necessary:
|
||||
if producer:
|
||||
transport.registerProducer(producer, streaming)
|
||||
|
||||
|
||||
|
||||
@implementer(ITLSTransport)
|
||||
class ConnectionMixin(object):
|
||||
"""
|
||||
A mixin for L{twisted.internet.abstract.FileDescriptor} which adds an
|
||||
L{ITLSTransport} implementation.
|
||||
|
||||
@ivar TLS: A flag indicating whether TLS is currently in use on this
|
||||
transport. This is not a good way for applications to check for TLS,
|
||||
instead use L{ISSLTransport.providedBy}.
|
||||
"""
|
||||
|
||||
TLS = False
|
||||
|
||||
def startTLS(self, ctx, normal=True):
|
||||
"""
|
||||
@see: L{ITLSTransport.startTLS}
|
||||
"""
|
||||
startTLS(self, ctx, normal, FileDescriptor)
|
||||
|
||||
|
||||
def write(self, bytes):
|
||||
"""
|
||||
Write some bytes to this connection, passing them through a TLS layer if
|
||||
necessary, or discarding them if the connection has already been lost.
|
||||
"""
|
||||
if self.TLS:
|
||||
if self.connected:
|
||||
self.protocol.write(bytes)
|
||||
else:
|
||||
FileDescriptor.write(self, bytes)
|
||||
|
||||
|
||||
def writeSequence(self, iovec):
|
||||
"""
|
||||
Write some bytes to this connection, scatter/gather-style, passing them
|
||||
through a TLS layer if necessary, or discarding them if the connection
|
||||
has already been lost.
|
||||
"""
|
||||
if self.TLS:
|
||||
if self.connected:
|
||||
self.protocol.writeSequence(iovec)
|
||||
else:
|
||||
FileDescriptor.writeSequence(self, iovec)
|
||||
|
||||
|
||||
def loseConnection(self):
|
||||
"""
|
||||
Close this connection after writing all pending data.
|
||||
|
||||
If TLS has been negotiated, perform a TLS shutdown.
|
||||
"""
|
||||
if self.TLS:
|
||||
if self.connected and not self.disconnecting:
|
||||
self.protocol.loseConnection()
|
||||
else:
|
||||
FileDescriptor.loseConnection(self)
|
||||
|
||||
|
||||
def registerProducer(self, producer, streaming):
|
||||
"""
|
||||
Register a producer.
|
||||
|
||||
If TLS is enabled, the TLS connection handles this.
|
||||
"""
|
||||
if self.TLS:
|
||||
# Registering a producer before we're connected shouldn't be a
|
||||
# problem. If we end up with a write(), that's already handled in
|
||||
# the write() code above, and there are no other potential
|
||||
# side-effects.
|
||||
self.protocol.registerProducer(producer, streaming)
|
||||
else:
|
||||
FileDescriptor.registerProducer(self, producer, streaming)
|
||||
|
||||
|
||||
def unregisterProducer(self):
|
||||
"""
|
||||
Unregister a producer.
|
||||
|
||||
If TLS is enabled, the TLS connection handles this.
|
||||
"""
|
||||
if self.TLS:
|
||||
self.protocol.unregisterProducer()
|
||||
else:
|
||||
FileDescriptor.unregisterProducer(self)
|
||||
|
||||
|
||||
|
||||
class ClientMixin(object):
|
||||
"""
|
||||
A mixin for L{twisted.internet.tcp.Client} which just marks it as a client
|
||||
for the purposes of the default TLS handshake.
|
||||
|
||||
@ivar _tlsClientDefault: Always C{True}, indicating that this is a client
|
||||
connection, and by default when TLS is negotiated this class will act as
|
||||
a TLS client.
|
||||
"""
|
||||
_tlsClientDefault = True
|
||||
|
||||
|
||||
|
||||
class ServerMixin(object):
|
||||
"""
|
||||
A mixin for L{twisted.internet.tcp.Server} which just marks it as a server
|
||||
for the purposes of the default TLS handshake.
|
||||
|
||||
@ivar _tlsClientDefault: Always C{False}, indicating that this is a server
|
||||
connection, and by default when TLS is negotiated this class will act as
|
||||
a TLS server.
|
||||
"""
|
||||
_tlsClientDefault = False
|
||||
|
|
@ -0,0 +1,300 @@
|
|||
# -*- test-case-name: twisted.internet.test.test_pollingfile -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Implements a simple polling interface for file descriptors that don't work with
|
||||
select() - this is pretty much only useful on Windows.
|
||||
"""
|
||||
|
||||
from zope.interface import implements
|
||||
|
||||
from twisted.internet.interfaces import IConsumer, IPushProducer
|
||||
|
||||
|
||||
MIN_TIMEOUT = 0.000000001
|
||||
MAX_TIMEOUT = 0.1
|
||||
|
||||
|
||||
|
||||
class _PollableResource:
|
||||
active = True
|
||||
|
||||
def activate(self):
|
||||
self.active = True
|
||||
|
||||
|
||||
def deactivate(self):
|
||||
self.active = False
|
||||
|
||||
|
||||
|
||||
class _PollingTimer:
|
||||
# Everything is private here because it is really an implementation detail.
|
||||
|
||||
def __init__(self, reactor):
|
||||
self.reactor = reactor
|
||||
self._resources = []
|
||||
self._pollTimer = None
|
||||
self._currentTimeout = MAX_TIMEOUT
|
||||
self._paused = False
|
||||
|
||||
def _addPollableResource(self, res):
|
||||
self._resources.append(res)
|
||||
self._checkPollingState()
|
||||
|
||||
def _checkPollingState(self):
|
||||
for resource in self._resources:
|
||||
if resource.active:
|
||||
self._startPolling()
|
||||
break
|
||||
else:
|
||||
self._stopPolling()
|
||||
|
||||
def _startPolling(self):
|
||||
if self._pollTimer is None:
|
||||
self._pollTimer = self._reschedule()
|
||||
|
||||
def _stopPolling(self):
|
||||
if self._pollTimer is not None:
|
||||
self._pollTimer.cancel()
|
||||
self._pollTimer = None
|
||||
|
||||
def _pause(self):
|
||||
self._paused = True
|
||||
|
||||
def _unpause(self):
|
||||
self._paused = False
|
||||
self._checkPollingState()
|
||||
|
||||
def _reschedule(self):
|
||||
if not self._paused:
|
||||
return self.reactor.callLater(self._currentTimeout, self._pollEvent)
|
||||
|
||||
def _pollEvent(self):
|
||||
workUnits = 0.
|
||||
anyActive = []
|
||||
for resource in self._resources:
|
||||
if resource.active:
|
||||
workUnits += resource.checkWork()
|
||||
# Check AFTER work has been done
|
||||
if resource.active:
|
||||
anyActive.append(resource)
|
||||
|
||||
newTimeout = self._currentTimeout
|
||||
if workUnits:
|
||||
newTimeout = self._currentTimeout / (workUnits + 1.)
|
||||
if newTimeout < MIN_TIMEOUT:
|
||||
newTimeout = MIN_TIMEOUT
|
||||
else:
|
||||
newTimeout = self._currentTimeout * 2.
|
||||
if newTimeout > MAX_TIMEOUT:
|
||||
newTimeout = MAX_TIMEOUT
|
||||
self._currentTimeout = newTimeout
|
||||
if anyActive:
|
||||
self._pollTimer = self._reschedule()
|
||||
|
||||
|
||||
# If we ever (let's hope not) need the above functionality on UNIX, this could
|
||||
# be factored into a different module.
|
||||
|
||||
import win32pipe
|
||||
import win32file
|
||||
import win32api
|
||||
import pywintypes
|
||||
|
||||
class _PollableReadPipe(_PollableResource):
|
||||
|
||||
implements(IPushProducer)
|
||||
|
||||
def __init__(self, pipe, receivedCallback, lostCallback):
|
||||
# security attributes for pipes
|
||||
self.pipe = pipe
|
||||
self.receivedCallback = receivedCallback
|
||||
self.lostCallback = lostCallback
|
||||
|
||||
def checkWork(self):
|
||||
finished = 0
|
||||
fullDataRead = []
|
||||
|
||||
while 1:
|
||||
try:
|
||||
buffer, bytesToRead, result = win32pipe.PeekNamedPipe(self.pipe, 1)
|
||||
# finished = (result == -1)
|
||||
if not bytesToRead:
|
||||
break
|
||||
hr, data = win32file.ReadFile(self.pipe, bytesToRead, None)
|
||||
fullDataRead.append(data)
|
||||
except win32api.error:
|
||||
finished = 1
|
||||
break
|
||||
|
||||
dataBuf = ''.join(fullDataRead)
|
||||
if dataBuf:
|
||||
self.receivedCallback(dataBuf)
|
||||
if finished:
|
||||
self.cleanup()
|
||||
return len(dataBuf)
|
||||
|
||||
def cleanup(self):
|
||||
self.deactivate()
|
||||
self.lostCallback()
|
||||
|
||||
def close(self):
|
||||
try:
|
||||
win32api.CloseHandle(self.pipe)
|
||||
except pywintypes.error:
|
||||
# You can't close std handles...?
|
||||
pass
|
||||
|
||||
def stopProducing(self):
|
||||
self.close()
|
||||
|
||||
def pauseProducing(self):
|
||||
self.deactivate()
|
||||
|
||||
def resumeProducing(self):
|
||||
self.activate()
|
||||
|
||||
|
||||
FULL_BUFFER_SIZE = 64 * 1024
|
||||
|
||||
class _PollableWritePipe(_PollableResource):
|
||||
|
||||
implements(IConsumer)
|
||||
|
||||
def __init__(self, writePipe, lostCallback):
|
||||
self.disconnecting = False
|
||||
self.producer = None
|
||||
self.producerPaused = False
|
||||
self.streamingProducer = 0
|
||||
self.outQueue = []
|
||||
self.writePipe = writePipe
|
||||
self.lostCallback = lostCallback
|
||||
try:
|
||||
win32pipe.SetNamedPipeHandleState(writePipe,
|
||||
win32pipe.PIPE_NOWAIT,
|
||||
None,
|
||||
None)
|
||||
except pywintypes.error:
|
||||
# Maybe it's an invalid handle. Who knows.
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
self.disconnecting = True
|
||||
|
||||
def bufferFull(self):
|
||||
if self.producer is not None:
|
||||
self.producerPaused = True
|
||||
self.producer.pauseProducing()
|
||||
|
||||
def bufferEmpty(self):
|
||||
if self.producer is not None and ((not self.streamingProducer) or
|
||||
self.producerPaused):
|
||||
self.producer.producerPaused = False
|
||||
self.producer.resumeProducing()
|
||||
return True
|
||||
return False
|
||||
|
||||
# almost-but-not-quite-exact copy-paste from abstract.FileDescriptor... ugh
|
||||
|
||||
def registerProducer(self, producer, streaming):
|
||||
"""Register to receive data from a producer.
|
||||
|
||||
This sets this selectable to be a consumer for a producer. When this
|
||||
selectable runs out of data on a write() call, it will ask the producer
|
||||
to resumeProducing(). A producer should implement the IProducer
|
||||
interface.
|
||||
|
||||
FileDescriptor provides some infrastructure for producer methods.
|
||||
"""
|
||||
if self.producer is not None:
|
||||
raise RuntimeError(
|
||||
"Cannot register producer %s, because producer %s was never "
|
||||
"unregistered." % (producer, self.producer))
|
||||
if not self.active:
|
||||
producer.stopProducing()
|
||||
else:
|
||||
self.producer = producer
|
||||
self.streamingProducer = streaming
|
||||
if not streaming:
|
||||
producer.resumeProducing()
|
||||
|
||||
def unregisterProducer(self):
|
||||
"""Stop consuming data from a producer, without disconnecting.
|
||||
"""
|
||||
self.producer = None
|
||||
|
||||
def writeConnectionLost(self):
|
||||
self.deactivate()
|
||||
try:
|
||||
win32api.CloseHandle(self.writePipe)
|
||||
except pywintypes.error:
|
||||
# OMG what
|
||||
pass
|
||||
self.lostCallback()
|
||||
|
||||
|
||||
def writeSequence(self, seq):
|
||||
"""
|
||||
Append a C{list} or C{tuple} of bytes to the output buffer.
|
||||
|
||||
@param seq: C{list} or C{tuple} of C{str} instances to be appended to
|
||||
the output buffer.
|
||||
|
||||
@raise TypeError: If C{seq} contains C{unicode}.
|
||||
"""
|
||||
if unicode in map(type, seq):
|
||||
raise TypeError("Unicode not allowed in output buffer.")
|
||||
self.outQueue.extend(seq)
|
||||
|
||||
|
||||
def write(self, data):
|
||||
"""
|
||||
Append some bytes to the output buffer.
|
||||
|
||||
@param data: C{str} to be appended to the output buffer.
|
||||
@type data: C{str}.
|
||||
|
||||
@raise TypeError: If C{data} is C{unicode} instead of C{str}.
|
||||
"""
|
||||
if isinstance(data, unicode):
|
||||
raise TypeError("Unicode not allowed in output buffer.")
|
||||
if self.disconnecting:
|
||||
return
|
||||
self.outQueue.append(data)
|
||||
if sum(map(len, self.outQueue)) > FULL_BUFFER_SIZE:
|
||||
self.bufferFull()
|
||||
|
||||
|
||||
def checkWork(self):
|
||||
numBytesWritten = 0
|
||||
if not self.outQueue:
|
||||
if self.disconnecting:
|
||||
self.writeConnectionLost()
|
||||
return 0
|
||||
try:
|
||||
win32file.WriteFile(self.writePipe, '', None)
|
||||
except pywintypes.error:
|
||||
self.writeConnectionLost()
|
||||
return numBytesWritten
|
||||
while self.outQueue:
|
||||
data = self.outQueue.pop(0)
|
||||
errCode = 0
|
||||
try:
|
||||
errCode, nBytesWritten = win32file.WriteFile(self.writePipe,
|
||||
data, None)
|
||||
except win32api.error:
|
||||
self.writeConnectionLost()
|
||||
break
|
||||
else:
|
||||
# assert not errCode, "wtf an error code???"
|
||||
numBytesWritten += nBytesWritten
|
||||
if len(data) > nBytesWritten:
|
||||
self.outQueue.insert(0, data[nBytesWritten:])
|
||||
break
|
||||
else:
|
||||
resumed = self.bufferEmpty()
|
||||
if not resumed and self.disconnecting:
|
||||
self.writeConnectionLost()
|
||||
return numBytesWritten
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
"""
|
||||
Serial Port Protocol
|
||||
"""
|
||||
|
||||
# system imports
|
||||
import os, errno
|
||||
|
||||
# dependent on pyserial ( http://pyserial.sf.net/ )
|
||||
# only tested w/ 1.18 (5 Dec 2002)
|
||||
import serial
|
||||
from serial import PARITY_NONE, PARITY_EVEN, PARITY_ODD
|
||||
from serial import STOPBITS_ONE, STOPBITS_TWO
|
||||
from serial import FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS
|
||||
|
||||
from serialport import BaseSerialPort
|
||||
|
||||
# twisted imports
|
||||
from twisted.internet import abstract, fdesc, main
|
||||
|
||||
class SerialPort(BaseSerialPort, abstract.FileDescriptor):
|
||||
"""
|
||||
A select()able serial device, acting as a transport.
|
||||
"""
|
||||
|
||||
connected = 1
|
||||
|
||||
def __init__(self, protocol, deviceNameOrPortNumber, reactor,
|
||||
baudrate = 9600, bytesize = EIGHTBITS, parity = PARITY_NONE,
|
||||
stopbits = STOPBITS_ONE, timeout = 0, xonxoff = 0, rtscts = 0):
|
||||
abstract.FileDescriptor.__init__(self, reactor)
|
||||
self._serial = self._serialFactory(
|
||||
deviceNameOrPortNumber, baudrate=baudrate, bytesize=bytesize,
|
||||
parity=parity, stopbits=stopbits, timeout=timeout,
|
||||
xonxoff=xonxoff, rtscts=rtscts)
|
||||
self.reactor = reactor
|
||||
self.flushInput()
|
||||
self.flushOutput()
|
||||
self.protocol = protocol
|
||||
self.protocol.makeConnection(self)
|
||||
self.startReading()
|
||||
|
||||
|
||||
def fileno(self):
|
||||
return self._serial.fd
|
||||
|
||||
|
||||
def writeSomeData(self, data):
|
||||
"""
|
||||
Write some data to the serial device.
|
||||
"""
|
||||
return fdesc.writeToFD(self.fileno(), data)
|
||||
|
||||
|
||||
def doRead(self):
|
||||
"""
|
||||
Some data's readable from serial device.
|
||||
"""
|
||||
return fdesc.readFromFD(self.fileno(), self.protocol.dataReceived)
|
||||
|
||||
|
||||
def connectionLost(self, reason):
|
||||
"""
|
||||
Called when the serial port disconnects.
|
||||
|
||||
Will call C{connectionLost} on the protocol that is handling the
|
||||
serial data.
|
||||
"""
|
||||
abstract.FileDescriptor.connectionLost(self, reason)
|
||||
self._serial.close()
|
||||
self.protocol.connectionLost(reason)
|
||||
|
|
@ -0,0 +1,167 @@
|
|||
# -*- test-case-name: twisted.test.test_stdio -*-
|
||||
|
||||
"""Standard input/out/err support.
|
||||
|
||||
Future Plans::
|
||||
|
||||
support for stderr, perhaps
|
||||
Rewrite to use the reactor instead of an ad-hoc mechanism for connecting
|
||||
protocols to transport.
|
||||
|
||||
Maintainer: James Y Knight
|
||||
"""
|
||||
|
||||
from zope.interface import implements
|
||||
|
||||
from twisted.internet import process, error, interfaces
|
||||
from twisted.python import log, failure
|
||||
|
||||
|
||||
class PipeAddress(object):
|
||||
implements(interfaces.IAddress)
|
||||
|
||||
|
||||
class StandardIO(object):
|
||||
implements(interfaces.ITransport, interfaces.IProducer,
|
||||
interfaces.IConsumer, interfaces.IHalfCloseableDescriptor)
|
||||
|
||||
_reader = None
|
||||
_writer = None
|
||||
disconnected = False
|
||||
disconnecting = False
|
||||
|
||||
def __init__(self, proto, stdin=0, stdout=1, reactor=None):
|
||||
if reactor is None:
|
||||
from twisted.internet import reactor
|
||||
self.protocol = proto
|
||||
|
||||
self._writer = process.ProcessWriter(reactor, self, 'write', stdout)
|
||||
self._reader = process.ProcessReader(reactor, self, 'read', stdin)
|
||||
self._reader.startReading()
|
||||
self.protocol.makeConnection(self)
|
||||
|
||||
# ITransport
|
||||
|
||||
# XXX Actually, see #3597.
|
||||
def loseWriteConnection(self):
|
||||
if self._writer is not None:
|
||||
self._writer.loseConnection()
|
||||
|
||||
def write(self, data):
|
||||
if self._writer is not None:
|
||||
self._writer.write(data)
|
||||
|
||||
def writeSequence(self, data):
|
||||
if self._writer is not None:
|
||||
self._writer.writeSequence(data)
|
||||
|
||||
def loseConnection(self):
|
||||
self.disconnecting = True
|
||||
|
||||
if self._writer is not None:
|
||||
self._writer.loseConnection()
|
||||
if self._reader is not None:
|
||||
# Don't loseConnection, because we don't want to SIGPIPE it.
|
||||
self._reader.stopReading()
|
||||
|
||||
def getPeer(self):
|
||||
return PipeAddress()
|
||||
|
||||
def getHost(self):
|
||||
return PipeAddress()
|
||||
|
||||
|
||||
# Callbacks from process.ProcessReader/ProcessWriter
|
||||
def childDataReceived(self, fd, data):
|
||||
self.protocol.dataReceived(data)
|
||||
|
||||
def childConnectionLost(self, fd, reason):
|
||||
if self.disconnected:
|
||||
return
|
||||
|
||||
if reason.value.__class__ == error.ConnectionDone:
|
||||
# Normal close
|
||||
if fd == 'read':
|
||||
self._readConnectionLost(reason)
|
||||
else:
|
||||
self._writeConnectionLost(reason)
|
||||
else:
|
||||
self.connectionLost(reason)
|
||||
|
||||
def connectionLost(self, reason):
|
||||
self.disconnected = True
|
||||
|
||||
# Make sure to cleanup the other half
|
||||
_reader = self._reader
|
||||
_writer = self._writer
|
||||
protocol = self.protocol
|
||||
self._reader = self._writer = None
|
||||
self.protocol = None
|
||||
|
||||
if _writer is not None and not _writer.disconnected:
|
||||
_writer.connectionLost(reason)
|
||||
|
||||
if _reader is not None and not _reader.disconnected:
|
||||
_reader.connectionLost(reason)
|
||||
|
||||
try:
|
||||
protocol.connectionLost(reason)
|
||||
except:
|
||||
log.err()
|
||||
|
||||
def _writeConnectionLost(self, reason):
|
||||
self._writer=None
|
||||
if self.disconnecting:
|
||||
self.connectionLost(reason)
|
||||
return
|
||||
|
||||
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
|
||||
if p:
|
||||
try:
|
||||
p.writeConnectionLost()
|
||||
except:
|
||||
log.err()
|
||||
self.connectionLost(failure.Failure())
|
||||
|
||||
def _readConnectionLost(self, reason):
|
||||
self._reader=None
|
||||
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
|
||||
if p:
|
||||
try:
|
||||
p.readConnectionLost()
|
||||
except:
|
||||
log.err()
|
||||
self.connectionLost(failure.Failure())
|
||||
else:
|
||||
self.connectionLost(reason)
|
||||
|
||||
# IConsumer
|
||||
def registerProducer(self, producer, streaming):
|
||||
if self._writer is None:
|
||||
producer.stopProducing()
|
||||
else:
|
||||
self._writer.registerProducer(producer, streaming)
|
||||
|
||||
def unregisterProducer(self):
|
||||
if self._writer is not None:
|
||||
self._writer.unregisterProducer()
|
||||
|
||||
# IProducer
|
||||
def stopProducing(self):
|
||||
self.loseConnection()
|
||||
|
||||
def pauseProducing(self):
|
||||
if self._reader is not None:
|
||||
self._reader.pauseProducing()
|
||||
|
||||
def resumeProducing(self):
|
||||
if self._reader is not None:
|
||||
self._reader.resumeProducing()
|
||||
|
||||
def stopReading(self):
|
||||
"""Compatibility only, don't use. Call pauseProducing."""
|
||||
self.pauseProducing()
|
||||
|
||||
def startReading(self):
|
||||
"""Compatibility only, don't use. Call resumeProducing."""
|
||||
self.resumeProducing()
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
# -*- test-case-name: twisted.internet.test.test_sigchld -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
This module is used to integrate child process termination into a
|
||||
reactor event loop. This is a challenging feature to provide because
|
||||
most platforms indicate process termination via SIGCHLD and do not
|
||||
provide a way to wait for that signal and arbitrary I/O events at the
|
||||
same time. The naive implementation involves installing a Python
|
||||
SIGCHLD handler; unfortunately this leads to other syscalls being
|
||||
interrupted (whenever SIGCHLD is received) and failing with EINTR
|
||||
(which almost no one is prepared to handle). This interruption can be
|
||||
disabled via siginterrupt(2) (or one of the equivalent mechanisms);
|
||||
however, if the SIGCHLD is delivered by the platform to a non-main
|
||||
thread (not a common occurrence, but difficult to prove impossible),
|
||||
the main thread (waiting on select() or another event notification
|
||||
API) may not wake up leading to an arbitrary delay before the child
|
||||
termination is noticed.
|
||||
|
||||
The basic solution to all these issues involves enabling SA_RESTART
|
||||
(ie, disabling system call interruption) and registering a C signal
|
||||
handler which writes a byte to a pipe. The other end of the pipe is
|
||||
registered with the event loop, allowing it to wake up shortly after
|
||||
SIGCHLD is received. See L{twisted.internet.posixbase._SIGCHLDWaker}
|
||||
for the implementation of the event loop side of this solution. The
|
||||
use of a pipe this way is known as the U{self-pipe
|
||||
trick<http://cr.yp.to/docs/selfpipe.html>}.
|
||||
|
||||
From Python version 2.6, C{signal.siginterrupt} and C{signal.set_wakeup_fd}
|
||||
provide the necessary C signal handler which writes to the pipe to be
|
||||
registered with C{SA_RESTART}.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import signal
|
||||
|
||||
|
||||
def installHandler(fd):
|
||||
"""
|
||||
Install a signal handler which will write a byte to C{fd} when
|
||||
I{SIGCHLD} is received.
|
||||
|
||||
This is implemented by installing a SIGCHLD handler that does nothing,
|
||||
setting the I{SIGCHLD} handler as not allowed to interrupt system calls,
|
||||
and using L{signal.set_wakeup_fd} to do the actual writing.
|
||||
|
||||
@param fd: The file descriptor to which to write when I{SIGCHLD} is
|
||||
received.
|
||||
@type fd: C{int}
|
||||
"""
|
||||
if fd == -1:
|
||||
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
|
||||
else:
|
||||
def noopSignalHandler(*args):
|
||||
pass
|
||||
signal.signal(signal.SIGCHLD, noopSignalHandler)
|
||||
signal.siginterrupt(signal.SIGCHLD, False)
|
||||
return signal.set_wakeup_fd(fd)
|
||||
|
||||
|
||||
|
||||
def isDefaultHandler():
|
||||
"""
|
||||
Determine whether the I{SIGCHLD} handler is the default or not.
|
||||
"""
|
||||
return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
# -*- test-case-name: twisted.test.test_ssl -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
This module implements helpers for switching to TLS on an existing transport.
|
||||
|
||||
@since: 11.1
|
||||
"""
|
||||
|
||||
class _TLSDelayed(object):
|
||||
"""
|
||||
State tracking record for TLS startup parameters. Used to remember how
|
||||
TLS should be started when starting it is delayed to wait for the output
|
||||
buffer to be flushed.
|
||||
|
||||
@ivar bufferedData: A C{list} which contains all the data which was
|
||||
written to the transport after an attempt to start TLS was made but
|
||||
before the buffers outstanding at that time could be flushed and TLS
|
||||
could really be started. This is appended to by the transport's
|
||||
write and writeSequence methods until it is possible to actually
|
||||
start TLS, then it is written to the TLS-enabled transport.
|
||||
|
||||
@ivar context: An SSL context factory object to use to start TLS.
|
||||
|
||||
@ivar extra: An extra argument to pass to the transport's C{startTLS}
|
||||
method.
|
||||
"""
|
||||
def __init__(self, bufferedData, context, extra):
|
||||
self.bufferedData = bufferedData
|
||||
self.context = context
|
||||
self.extra = extra
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,361 @@
|
|||
# -*- test-case-name: twisted.test.test_internet -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Threaded select reactor
|
||||
|
||||
The threadedselectreactor is a specialized reactor for integrating with
|
||||
arbitrary foreign event loop, such as those you find in GUI toolkits.
|
||||
|
||||
There are three things you'll need to do to use this reactor.
|
||||
|
||||
Install the reactor at the beginning of your program, before importing
|
||||
the rest of Twisted::
|
||||
|
||||
| from twisted.internet import _threadedselect
|
||||
| _threadedselect.install()
|
||||
|
||||
Interleave this reactor with your foreign event loop, at some point after
|
||||
your event loop is initialized::
|
||||
|
||||
| from twisted.internet import reactor
|
||||
| reactor.interleave(foreignEventLoopWakerFunction)
|
||||
| self.addSystemEventTrigger('after', 'shutdown', foreignEventLoopStop)
|
||||
|
||||
Instead of shutting down the foreign event loop directly, shut down the
|
||||
reactor::
|
||||
|
||||
| from twisted.internet import reactor
|
||||
| reactor.stop()
|
||||
|
||||
In order for Twisted to do its work in the main thread (the thread that
|
||||
interleave is called from), a waker function is necessary. The waker function
|
||||
will be called from a "background" thread with one argument: func.
|
||||
The waker function's purpose is to call func() from the main thread.
|
||||
Many GUI toolkits ship with appropriate waker functions.
|
||||
Some examples of this are wxPython's wx.callAfter (may be wxCallAfter in
|
||||
older versions of wxPython) or PyObjC's PyObjCTools.AppHelper.callAfter.
|
||||
These would be used in place of "foreignEventLoopWakerFunction" in the above
|
||||
example.
|
||||
|
||||
The other integration point at which the foreign event loop and this reactor
|
||||
must integrate is shutdown. In order to ensure clean shutdown of Twisted,
|
||||
you must allow for Twisted to come to a complete stop before quitting the
|
||||
application. Typically, you will do this by setting up an after shutdown
|
||||
trigger to stop your foreign event loop, and call reactor.stop() where you
|
||||
would normally have initiated the shutdown procedure for the foreign event
|
||||
loop. Shutdown functions that could be used in place of
|
||||
"foreignEventloopStop" would be the ExitMainLoop method of the wxApp instance
|
||||
with wxPython, or the PyObjCTools.AppHelper.stopEventLoop function.
|
||||
"""
|
||||
|
||||
from threading import Thread
|
||||
from Queue import Queue, Empty
|
||||
from time import sleep
|
||||
import sys
|
||||
|
||||
from zope.interface import implements
|
||||
|
||||
from twisted.internet.interfaces import IReactorFDSet
|
||||
from twisted.internet import error
|
||||
from twisted.internet import posixbase
|
||||
from twisted.internet.posixbase import _NO_FILENO, _NO_FILEDESC
|
||||
from twisted.python import log, failure, threadable
|
||||
from twisted.persisted import styles
|
||||
from twisted.python.runtime import platformType
|
||||
|
||||
import select
|
||||
from errno import EINTR, EBADF
|
||||
|
||||
from twisted.internet.selectreactor import _select
|
||||
|
||||
def dictRemove(dct, value):
|
||||
try:
|
||||
del dct[value]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def raiseException(e):
|
||||
raise e
|
||||
|
||||
class ThreadedSelectReactor(posixbase.PosixReactorBase):
|
||||
"""A threaded select() based reactor - runs on all POSIX platforms and on
|
||||
Win32.
|
||||
"""
|
||||
implements(IReactorFDSet)
|
||||
|
||||
def __init__(self):
|
||||
threadable.init(1)
|
||||
self.reads = {}
|
||||
self.writes = {}
|
||||
self.toThreadQueue = Queue()
|
||||
self.toMainThread = Queue()
|
||||
self.workerThread = None
|
||||
self.mainWaker = None
|
||||
posixbase.PosixReactorBase.__init__(self)
|
||||
self.addSystemEventTrigger('after', 'shutdown', self._mainLoopShutdown)
|
||||
|
||||
def wakeUp(self):
|
||||
# we want to wake up from any thread
|
||||
self.waker.wakeUp()
|
||||
|
||||
def callLater(self, *args, **kw):
|
||||
tple = posixbase.PosixReactorBase.callLater(self, *args, **kw)
|
||||
self.wakeUp()
|
||||
return tple
|
||||
|
||||
def _sendToMain(self, msg, *args):
|
||||
#print >>sys.stderr, 'sendToMain', msg, args
|
||||
self.toMainThread.put((msg, args))
|
||||
if self.mainWaker is not None:
|
||||
self.mainWaker()
|
||||
|
||||
def _sendToThread(self, fn, *args):
|
||||
#print >>sys.stderr, 'sendToThread', fn, args
|
||||
self.toThreadQueue.put((fn, args))
|
||||
|
||||
def _preenDescriptorsInThread(self):
|
||||
log.msg("Malformed file descriptor found. Preening lists.")
|
||||
readers = self.reads.keys()
|
||||
writers = self.writes.keys()
|
||||
self.reads.clear()
|
||||
self.writes.clear()
|
||||
for selDict, selList in ((self.reads, readers), (self.writes, writers)):
|
||||
for selectable in selList:
|
||||
try:
|
||||
select.select([selectable], [selectable], [selectable], 0)
|
||||
except:
|
||||
log.msg("bad descriptor %s" % selectable)
|
||||
else:
|
||||
selDict[selectable] = 1
|
||||
|
||||
def _workerInThread(self):
|
||||
try:
|
||||
while 1:
|
||||
fn, args = self.toThreadQueue.get()
|
||||
#print >>sys.stderr, "worker got", fn, args
|
||||
fn(*args)
|
||||
except SystemExit:
|
||||
pass # exception indicates this thread should exit
|
||||
except:
|
||||
f = failure.Failure()
|
||||
self._sendToMain('Failure', f)
|
||||
#print >>sys.stderr, "worker finished"
|
||||
|
||||
def _doSelectInThread(self, timeout):
|
||||
"""Run one iteration of the I/O monitor loop.
|
||||
|
||||
This will run all selectables who had input or output readiness
|
||||
waiting for them.
|
||||
"""
|
||||
reads = self.reads
|
||||
writes = self.writes
|
||||
while 1:
|
||||
try:
|
||||
r, w, ignored = _select(reads.keys(),
|
||||
writes.keys(),
|
||||
[], timeout)
|
||||
break
|
||||
except ValueError, ve:
|
||||
# Possibly a file descriptor has gone negative?
|
||||
log.err()
|
||||
self._preenDescriptorsInThread()
|
||||
except TypeError, te:
|
||||
# Something *totally* invalid (object w/o fileno, non-integral
|
||||
# result) was passed
|
||||
log.err()
|
||||
self._preenDescriptorsInThread()
|
||||
except (select.error, IOError), se:
|
||||
# select(2) encountered an error
|
||||
if se.args[0] in (0, 2):
|
||||
# windows does this if it got an empty list
|
||||
if (not reads) and (not writes):
|
||||
return
|
||||
else:
|
||||
raise
|
||||
elif se.args[0] == EINTR:
|
||||
return
|
||||
elif se.args[0] == EBADF:
|
||||
self._preenDescriptorsInThread()
|
||||
else:
|
||||
# OK, I really don't know what's going on. Blow up.
|
||||
raise
|
||||
self._sendToMain('Notify', r, w)
|
||||
|
||||
def _process_Notify(self, r, w):
|
||||
#print >>sys.stderr, "_process_Notify"
|
||||
reads = self.reads
|
||||
writes = self.writes
|
||||
|
||||
_drdw = self._doReadOrWrite
|
||||
_logrun = log.callWithLogger
|
||||
for selectables, method, dct in ((r, "doRead", reads), (w, "doWrite", writes)):
|
||||
for selectable in selectables:
|
||||
# if this was disconnected in another thread, kill it.
|
||||
if selectable not in dct:
|
||||
continue
|
||||
# This for pausing input when we're not ready for more.
|
||||
_logrun(selectable, _drdw, selectable, method, dct)
|
||||
#print >>sys.stderr, "done _process_Notify"
|
||||
|
||||
def _process_Failure(self, f):
|
||||
f.raiseException()
|
||||
|
||||
_doIterationInThread = _doSelectInThread
|
||||
|
||||
def ensureWorkerThread(self):
|
||||
if self.workerThread is None or not self.workerThread.isAlive():
|
||||
self.workerThread = Thread(target=self._workerInThread)
|
||||
self.workerThread.start()
|
||||
|
||||
def doThreadIteration(self, timeout):
|
||||
self._sendToThread(self._doIterationInThread, timeout)
|
||||
self.ensureWorkerThread()
|
||||
#print >>sys.stderr, 'getting...'
|
||||
msg, args = self.toMainThread.get()
|
||||
#print >>sys.stderr, 'got', msg, args
|
||||
getattr(self, '_process_' + msg)(*args)
|
||||
|
||||
doIteration = doThreadIteration
|
||||
|
||||
def _interleave(self):
|
||||
while self.running:
|
||||
#print >>sys.stderr, "runUntilCurrent"
|
||||
self.runUntilCurrent()
|
||||
t2 = self.timeout()
|
||||
t = self.running and t2
|
||||
self._sendToThread(self._doIterationInThread, t)
|
||||
#print >>sys.stderr, "yielding"
|
||||
yield None
|
||||
#print >>sys.stderr, "fetching"
|
||||
msg, args = self.toMainThread.get_nowait()
|
||||
getattr(self, '_process_' + msg)(*args)
|
||||
|
||||
def interleave(self, waker, *args, **kw):
|
||||
"""
|
||||
interleave(waker) interleaves this reactor with the
|
||||
current application by moving the blocking parts of
|
||||
the reactor (select() in this case) to a separate
|
||||
thread. This is typically useful for integration with
|
||||
GUI applications which have their own event loop
|
||||
already running.
|
||||
|
||||
See the module docstring for more information.
|
||||
"""
|
||||
self.startRunning(*args, **kw)
|
||||
loop = self._interleave()
|
||||
def mainWaker(waker=waker, loop=loop):
|
||||
#print >>sys.stderr, "mainWaker()"
|
||||
waker(loop.next)
|
||||
self.mainWaker = mainWaker
|
||||
loop.next()
|
||||
self.ensureWorkerThread()
|
||||
|
||||
def _mainLoopShutdown(self):
|
||||
self.mainWaker = None
|
||||
if self.workerThread is not None:
|
||||
#print >>sys.stderr, 'getting...'
|
||||
self._sendToThread(raiseException, SystemExit)
|
||||
self.wakeUp()
|
||||
try:
|
||||
while 1:
|
||||
msg, args = self.toMainThread.get_nowait()
|
||||
#print >>sys.stderr, "ignored:", (msg, args)
|
||||
except Empty:
|
||||
pass
|
||||
self.workerThread.join()
|
||||
self.workerThread = None
|
||||
try:
|
||||
while 1:
|
||||
fn, args = self.toThreadQueue.get_nowait()
|
||||
if fn is self._doIterationInThread:
|
||||
log.msg('Iteration is still in the thread queue!')
|
||||
elif fn is raiseException and args[0] is SystemExit:
|
||||
pass
|
||||
else:
|
||||
fn(*args)
|
||||
except Empty:
|
||||
pass
|
||||
|
||||
def _doReadOrWrite(self, selectable, method, dict):
|
||||
try:
|
||||
why = getattr(selectable, method)()
|
||||
handfn = getattr(selectable, 'fileno', None)
|
||||
if not handfn:
|
||||
why = _NO_FILENO
|
||||
elif handfn() == -1:
|
||||
why = _NO_FILEDESC
|
||||
except:
|
||||
why = sys.exc_info()[1]
|
||||
log.err()
|
||||
if why:
|
||||
self._disconnectSelectable(selectable, why, method == "doRead")
|
||||
|
||||
def addReader(self, reader):
|
||||
"""Add a FileDescriptor for notification of data available to read.
|
||||
"""
|
||||
self._sendToThread(self.reads.__setitem__, reader, 1)
|
||||
self.wakeUp()
|
||||
|
||||
def addWriter(self, writer):
|
||||
"""Add a FileDescriptor for notification of data available to write.
|
||||
"""
|
||||
self._sendToThread(self.writes.__setitem__, writer, 1)
|
||||
self.wakeUp()
|
||||
|
||||
def removeReader(self, reader):
|
||||
"""Remove a Selectable for notification of data available to read.
|
||||
"""
|
||||
self._sendToThread(dictRemove, self.reads, reader)
|
||||
|
||||
def removeWriter(self, writer):
|
||||
"""Remove a Selectable for notification of data available to write.
|
||||
"""
|
||||
self._sendToThread(dictRemove, self.writes, writer)
|
||||
|
||||
def removeAll(self):
|
||||
return self._removeAll(self.reads, self.writes)
|
||||
|
||||
|
||||
def getReaders(self):
|
||||
return self.reads.keys()
|
||||
|
||||
|
||||
def getWriters(self):
|
||||
return self.writes.keys()
|
||||
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Extend the base stop implementation to also wake up the select thread so
|
||||
that C{runUntilCurrent} notices the reactor should stop.
|
||||
"""
|
||||
posixbase.PosixReactorBase.stop(self)
|
||||
self.wakeUp()
|
||||
|
||||
|
||||
def run(self, installSignalHandlers=1):
|
||||
self.startRunning(installSignalHandlers=installSignalHandlers)
|
||||
self.mainLoop()
|
||||
|
||||
def mainLoop(self):
|
||||
q = Queue()
|
||||
self.interleave(q.put)
|
||||
while self.running:
|
||||
try:
|
||||
q.get()()
|
||||
except StopIteration:
|
||||
break
|
||||
|
||||
|
||||
|
||||
def install():
|
||||
"""Configure the twisted mainloop to be run using the select() reactor.
|
||||
"""
|
||||
reactor = ThreadedSelectReactor()
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(reactor)
|
||||
return reactor
|
||||
|
||||
__all__ = ['install']
|
||||
|
|
@ -0,0 +1,126 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Serial port support for Windows.
|
||||
|
||||
Requires PySerial and pywin32.
|
||||
"""
|
||||
|
||||
# system imports
|
||||
import serial
|
||||
from serial import PARITY_NONE, PARITY_EVEN, PARITY_ODD
|
||||
from serial import STOPBITS_ONE, STOPBITS_TWO
|
||||
from serial import FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS
|
||||
import win32file, win32event
|
||||
|
||||
# twisted imports
|
||||
from twisted.internet import abstract
|
||||
|
||||
# sibling imports
|
||||
from serialport import BaseSerialPort
|
||||
|
||||
|
||||
class SerialPort(BaseSerialPort, abstract.FileDescriptor):
|
||||
"""A serial device, acting as a transport, that uses a win32 event."""
|
||||
|
||||
connected = 1
|
||||
|
||||
def __init__(self, protocol, deviceNameOrPortNumber, reactor,
|
||||
baudrate = 9600, bytesize = EIGHTBITS, parity = PARITY_NONE,
|
||||
stopbits = STOPBITS_ONE, xonxoff = 0, rtscts = 0):
|
||||
self._serial = self._serialFactory(
|
||||
deviceNameOrPortNumber, baudrate=baudrate, bytesize=bytesize,
|
||||
parity=parity, stopbits=stopbits, timeout=None,
|
||||
xonxoff=xonxoff, rtscts=rtscts)
|
||||
self.flushInput()
|
||||
self.flushOutput()
|
||||
self.reactor = reactor
|
||||
self.protocol = protocol
|
||||
self.outQueue = []
|
||||
self.closed = 0
|
||||
self.closedNotifies = 0
|
||||
self.writeInProgress = 0
|
||||
|
||||
self.protocol = protocol
|
||||
self._overlappedRead = win32file.OVERLAPPED()
|
||||
self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None)
|
||||
self._overlappedWrite = win32file.OVERLAPPED()
|
||||
self._overlappedWrite.hEvent = win32event.CreateEvent(None, 0, 0, None)
|
||||
|
||||
self.reactor.addEvent(self._overlappedRead.hEvent, self, 'serialReadEvent')
|
||||
self.reactor.addEvent(self._overlappedWrite.hEvent, self, 'serialWriteEvent')
|
||||
|
||||
self.protocol.makeConnection(self)
|
||||
self._finishPortSetup()
|
||||
|
||||
|
||||
def _finishPortSetup(self):
|
||||
"""
|
||||
Finish setting up the serial port.
|
||||
|
||||
This is a separate method to facilitate testing.
|
||||
"""
|
||||
flags, comstat = win32file.ClearCommError(self._serial.hComPort)
|
||||
rc, self.read_buf = win32file.ReadFile(self._serial.hComPort,
|
||||
win32file.AllocateReadBuffer(1),
|
||||
self._overlappedRead)
|
||||
|
||||
|
||||
def serialReadEvent(self):
|
||||
#get that character we set up
|
||||
n = win32file.GetOverlappedResult(self._serial.hComPort, self._overlappedRead, 0)
|
||||
if n:
|
||||
first = str(self.read_buf[:n])
|
||||
#now we should get everything that is already in the buffer
|
||||
flags, comstat = win32file.ClearCommError(self._serial.hComPort)
|
||||
if comstat.cbInQue:
|
||||
win32event.ResetEvent(self._overlappedRead.hEvent)
|
||||
rc, buf = win32file.ReadFile(self._serial.hComPort,
|
||||
win32file.AllocateReadBuffer(comstat.cbInQue),
|
||||
self._overlappedRead)
|
||||
n = win32file.GetOverlappedResult(self._serial.hComPort, self._overlappedRead, 1)
|
||||
#handle all the received data:
|
||||
self.protocol.dataReceived(first + str(buf[:n]))
|
||||
else:
|
||||
#handle all the received data:
|
||||
self.protocol.dataReceived(first)
|
||||
|
||||
#set up next one
|
||||
win32event.ResetEvent(self._overlappedRead.hEvent)
|
||||
rc, self.read_buf = win32file.ReadFile(self._serial.hComPort,
|
||||
win32file.AllocateReadBuffer(1),
|
||||
self._overlappedRead)
|
||||
|
||||
|
||||
def write(self, data):
|
||||
if data:
|
||||
if self.writeInProgress:
|
||||
self.outQueue.append(data)
|
||||
else:
|
||||
self.writeInProgress = 1
|
||||
win32file.WriteFile(self._serial.hComPort, data, self._overlappedWrite)
|
||||
|
||||
|
||||
def serialWriteEvent(self):
|
||||
try:
|
||||
dataToWrite = self.outQueue.pop(0)
|
||||
except IndexError:
|
||||
self.writeInProgress = 0
|
||||
return
|
||||
else:
|
||||
win32file.WriteFile(self._serial.hComPort, dataToWrite, self._overlappedWrite)
|
||||
|
||||
|
||||
def connectionLost(self, reason):
|
||||
"""
|
||||
Called when the serial port disconnects.
|
||||
|
||||
Will call C{connectionLost} on the protocol that is handling the
|
||||
serial data.
|
||||
"""
|
||||
self.reactor.removeEvent(self._overlappedRead.hEvent)
|
||||
self.reactor.removeEvent(self._overlappedWrite.hEvent)
|
||||
abstract.FileDescriptor.connectionLost(self, reason)
|
||||
self._serial.close()
|
||||
self.protocol.connectionLost(reason)
|
||||
|
|
@ -0,0 +1,125 @@
|
|||
# -*- test-case-name: twisted.test.test_stdio -*-
|
||||
|
||||
"""
|
||||
Windows-specific implementation of the L{twisted.internet.stdio} interface.
|
||||
"""
|
||||
|
||||
import win32api
|
||||
import os, msvcrt
|
||||
|
||||
from zope.interface import implements
|
||||
|
||||
from twisted.internet.interfaces import IHalfCloseableProtocol, ITransport, IAddress
|
||||
from twisted.internet.interfaces import IConsumer, IPushProducer
|
||||
|
||||
from twisted.internet import _pollingfile, main
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
|
||||
class Win32PipeAddress(object):
|
||||
implements(IAddress)
|
||||
|
||||
|
||||
|
||||
class StandardIO(_pollingfile._PollingTimer):
|
||||
|
||||
implements(ITransport,
|
||||
IConsumer,
|
||||
IPushProducer)
|
||||
|
||||
disconnecting = False
|
||||
disconnected = False
|
||||
|
||||
def __init__(self, proto, reactor=None):
|
||||
"""
|
||||
Start talking to standard IO with the given protocol.
|
||||
|
||||
Also, put it stdin/stdout/stderr into binary mode.
|
||||
"""
|
||||
if reactor is None:
|
||||
from twisted.internet import reactor
|
||||
|
||||
for stdfd in range(0, 1, 2):
|
||||
msvcrt.setmode(stdfd, os.O_BINARY)
|
||||
|
||||
_pollingfile._PollingTimer.__init__(self, reactor)
|
||||
self.proto = proto
|
||||
|
||||
hstdin = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
|
||||
hstdout = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
|
||||
|
||||
self.stdin = _pollingfile._PollableReadPipe(
|
||||
hstdin, self.dataReceived, self.readConnectionLost)
|
||||
|
||||
self.stdout = _pollingfile._PollableWritePipe(
|
||||
hstdout, self.writeConnectionLost)
|
||||
|
||||
self._addPollableResource(self.stdin)
|
||||
self._addPollableResource(self.stdout)
|
||||
|
||||
self.proto.makeConnection(self)
|
||||
|
||||
def dataReceived(self, data):
|
||||
self.proto.dataReceived(data)
|
||||
|
||||
def readConnectionLost(self):
|
||||
if IHalfCloseableProtocol.providedBy(self.proto):
|
||||
self.proto.readConnectionLost()
|
||||
self.checkConnLost()
|
||||
|
||||
def writeConnectionLost(self):
|
||||
if IHalfCloseableProtocol.providedBy(self.proto):
|
||||
self.proto.writeConnectionLost()
|
||||
self.checkConnLost()
|
||||
|
||||
connsLost = 0
|
||||
|
||||
def checkConnLost(self):
|
||||
self.connsLost += 1
|
||||
if self.connsLost >= 2:
|
||||
self.disconnecting = True
|
||||
self.disconnected = True
|
||||
self.proto.connectionLost(Failure(main.CONNECTION_DONE))
|
||||
|
||||
# ITransport
|
||||
|
||||
def write(self, data):
|
||||
self.stdout.write(data)
|
||||
|
||||
def writeSequence(self, seq):
|
||||
self.stdout.write(''.join(seq))
|
||||
|
||||
def loseConnection(self):
|
||||
self.disconnecting = True
|
||||
self.stdin.close()
|
||||
self.stdout.close()
|
||||
|
||||
def getPeer(self):
|
||||
return Win32PipeAddress()
|
||||
|
||||
def getHost(self):
|
||||
return Win32PipeAddress()
|
||||
|
||||
# IConsumer
|
||||
|
||||
def registerProducer(self, producer, streaming):
|
||||
return self.stdout.registerProducer(producer, streaming)
|
||||
|
||||
def unregisterProducer(self):
|
||||
return self.stdout.unregisterProducer()
|
||||
|
||||
# def write() above
|
||||
|
||||
# IProducer
|
||||
|
||||
def stopProducing(self):
|
||||
self.stdin.stopProducing()
|
||||
|
||||
# IPushProducer
|
||||
|
||||
def pauseProducing(self):
|
||||
self.stdin.pauseProducing()
|
||||
|
||||
def resumeProducing(self):
|
||||
self.stdin.resumeProducing()
|
||||
|
||||
|
|
@ -0,0 +1,535 @@
|
|||
# -*- test-case-name: twisted.test.test_abstract -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Support for generic select()able objects.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
from socket import AF_INET6, inet_pton, error
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
# Twisted Imports
|
||||
from twisted.python.compat import _PY3, unicode, lazyByteSlice
|
||||
from twisted.python import reflect, failure
|
||||
from twisted.internet import interfaces, main
|
||||
|
||||
if _PY3:
|
||||
def _concatenate(bObj, offset, bArray):
|
||||
# Python 3 lacks the buffer() builtin and the other primitives don't
|
||||
# help in this case. Just do the copy. Perhaps later these buffers can
|
||||
# be joined and FileDescriptor can use writev(). Or perhaps bytearrays
|
||||
# would help.
|
||||
return bObj[offset:] + b"".join(bArray)
|
||||
else:
|
||||
def _concatenate(bObj, offset, bArray):
|
||||
# Avoid one extra string copy by using a buffer to limit what we include
|
||||
# in the result.
|
||||
return buffer(bObj, offset) + b"".join(bArray)
|
||||
|
||||
|
||||
class _ConsumerMixin(object):
|
||||
"""
|
||||
L{IConsumer} implementations can mix this in to get C{registerProducer} and
|
||||
C{unregisterProducer} methods which take care of keeping track of a
|
||||
producer's state.
|
||||
|
||||
Subclasses must provide three attributes which L{_ConsumerMixin} will read
|
||||
but not write:
|
||||
|
||||
- connected: A C{bool} which is C{True} as long as the the consumer has
|
||||
someplace to send bytes (for example, a TCP connection), and then
|
||||
C{False} when it no longer does.
|
||||
|
||||
- disconnecting: A C{bool} which is C{False} until something like
|
||||
L{ITransport.loseConnection} is called, indicating that the send buffer
|
||||
should be flushed and the connection lost afterwards. Afterwards,
|
||||
C{True}.
|
||||
|
||||
- disconnected: A C{bool} which is C{False} until the consumer no longer
|
||||
has a place to send bytes, then C{True}.
|
||||
|
||||
Subclasses must also override the C{startWriting} method.
|
||||
|
||||
@ivar producer: C{None} if no producer is registered, otherwise the
|
||||
registered producer.
|
||||
|
||||
@ivar producerPaused: A flag indicating whether the producer is currently
|
||||
paused.
|
||||
@type producerPaused: L{bool}
|
||||
|
||||
@ivar streamingProducer: A flag indicating whether the producer was
|
||||
registered as a streaming (ie push) producer or not (ie a pull
|
||||
producer). This will determine whether the consumer may ever need to
|
||||
pause and resume it, or if it can merely call C{resumeProducing} on it
|
||||
when buffer space is available.
|
||||
@ivar streamingProducer: C{bool} or C{int}
|
||||
|
||||
"""
|
||||
producer = None
|
||||
producerPaused = False
|
||||
streamingProducer = False
|
||||
|
||||
def startWriting(self):
|
||||
"""
|
||||
Override in a subclass to cause the reactor to monitor this selectable
|
||||
for write events. This will be called once in C{unregisterProducer} if
|
||||
C{loseConnection} has previously been called, so that the connection can
|
||||
actually close.
|
||||
"""
|
||||
raise NotImplementedError("%r did not implement startWriting")
|
||||
|
||||
|
||||
def registerProducer(self, producer, streaming):
|
||||
"""
|
||||
Register to receive data from a producer.
|
||||
|
||||
This sets this selectable to be a consumer for a producer. When this
|
||||
selectable runs out of data on a write() call, it will ask the producer
|
||||
to resumeProducing(). When the FileDescriptor's internal data buffer is
|
||||
filled, it will ask the producer to pauseProducing(). If the connection
|
||||
is lost, FileDescriptor calls producer's stopProducing() method.
|
||||
|
||||
If streaming is true, the producer should provide the IPushProducer
|
||||
interface. Otherwise, it is assumed that producer provides the
|
||||
IPullProducer interface. In this case, the producer won't be asked to
|
||||
pauseProducing(), but it has to be careful to write() data only when its
|
||||
resumeProducing() method is called.
|
||||
"""
|
||||
if self.producer is not None:
|
||||
raise RuntimeError(
|
||||
"Cannot register producer %s, because producer %s was never "
|
||||
"unregistered." % (producer, self.producer))
|
||||
if self.disconnected:
|
||||
producer.stopProducing()
|
||||
else:
|
||||
self.producer = producer
|
||||
self.streamingProducer = streaming
|
||||
if not streaming:
|
||||
producer.resumeProducing()
|
||||
|
||||
|
||||
def unregisterProducer(self):
|
||||
"""
|
||||
Stop consuming data from a producer, without disconnecting.
|
||||
"""
|
||||
self.producer = None
|
||||
if self.connected and self.disconnecting:
|
||||
self.startWriting()
|
||||
|
||||
|
||||
|
||||
@implementer(interfaces.ILoggingContext)
|
||||
class _LogOwner(object):
|
||||
"""
|
||||
Mixin to help implement L{interfaces.ILoggingContext} for transports which
|
||||
have a protocol, the log prefix of which should also appear in the
|
||||
transport's log prefix.
|
||||
"""
|
||||
|
||||
def _getLogPrefix(self, applicationObject):
|
||||
"""
|
||||
Determine the log prefix to use for messages related to
|
||||
C{applicationObject}, which may or may not be an
|
||||
L{interfaces.ILoggingContext} provider.
|
||||
|
||||
@return: A C{str} giving the log prefix to use.
|
||||
"""
|
||||
if interfaces.ILoggingContext.providedBy(applicationObject):
|
||||
return applicationObject.logPrefix()
|
||||
return applicationObject.__class__.__name__
|
||||
|
||||
|
||||
def logPrefix(self):
|
||||
"""
|
||||
Override this method to insert custom logging behavior. Its
|
||||
return value will be inserted in front of every line. It may
|
||||
be called more times than the number of output lines.
|
||||
"""
|
||||
return "-"
|
||||
|
||||
|
||||
|
||||
@implementer(
|
||||
interfaces.IPushProducer, interfaces.IReadWriteDescriptor,
|
||||
interfaces.IConsumer, interfaces.ITransport,
|
||||
interfaces.IHalfCloseableDescriptor)
|
||||
class FileDescriptor(_ConsumerMixin, _LogOwner):
|
||||
"""
|
||||
An object which can be operated on by select().
|
||||
|
||||
This is an abstract superclass of all objects which may be notified when
|
||||
they are readable or writable; e.g. they have a file-descriptor that is
|
||||
valid to be passed to select(2).
|
||||
"""
|
||||
connected = 0
|
||||
disconnected = 0
|
||||
disconnecting = 0
|
||||
_writeDisconnecting = False
|
||||
_writeDisconnected = False
|
||||
dataBuffer = b""
|
||||
offset = 0
|
||||
|
||||
SEND_LIMIT = 128*1024
|
||||
|
||||
def __init__(self, reactor=None):
|
||||
"""
|
||||
@param reactor: An L{IReactorFDSet} provider which this descriptor will
|
||||
use to get readable and writeable event notifications. If no value
|
||||
is given, the global reactor will be used.
|
||||
"""
|
||||
if not reactor:
|
||||
from twisted.internet import reactor
|
||||
self.reactor = reactor
|
||||
self._tempDataBuffer = [] # will be added to dataBuffer in doWrite
|
||||
self._tempDataLen = 0
|
||||
|
||||
|
||||
def connectionLost(self, reason):
|
||||
"""The connection was lost.
|
||||
|
||||
This is called when the connection on a selectable object has been
|
||||
lost. It will be called whether the connection was closed explicitly,
|
||||
an exception occurred in an event handler, or the other end of the
|
||||
connection closed it first.
|
||||
|
||||
Clean up state here, but make sure to call back up to FileDescriptor.
|
||||
"""
|
||||
self.disconnected = 1
|
||||
self.connected = 0
|
||||
if self.producer is not None:
|
||||
self.producer.stopProducing()
|
||||
self.producer = None
|
||||
self.stopReading()
|
||||
self.stopWriting()
|
||||
|
||||
|
||||
def writeSomeData(self, data):
|
||||
"""
|
||||
Write as much as possible of the given data, immediately.
|
||||
|
||||
This is called to invoke the lower-level writing functionality, such
|
||||
as a socket's send() method, or a file's write(); this method
|
||||
returns an integer or an exception. If an integer, it is the number
|
||||
of bytes written (possibly zero); if an exception, it indicates the
|
||||
connection was lost.
|
||||
"""
|
||||
raise NotImplementedError("%s does not implement writeSomeData" %
|
||||
reflect.qual(self.__class__))
|
||||
|
||||
|
||||
def doRead(self):
|
||||
"""
|
||||
Called when data is available for reading.
|
||||
|
||||
Subclasses must override this method. The result will be interpreted
|
||||
in the same way as a result of doWrite().
|
||||
"""
|
||||
raise NotImplementedError("%s does not implement doRead" %
|
||||
reflect.qual(self.__class__))
|
||||
|
||||
def doWrite(self):
|
||||
"""
|
||||
Called when data can be written.
|
||||
|
||||
@return: C{None} on success, an exception or a negative integer on
|
||||
failure.
|
||||
|
||||
@see: L{twisted.internet.interfaces.IWriteDescriptor.doWrite}.
|
||||
"""
|
||||
if len(self.dataBuffer) - self.offset < self.SEND_LIMIT:
|
||||
# If there is currently less than SEND_LIMIT bytes left to send
|
||||
# in the string, extend it with the array data.
|
||||
self.dataBuffer = _concatenate(
|
||||
self.dataBuffer, self.offset, self._tempDataBuffer)
|
||||
self.offset = 0
|
||||
self._tempDataBuffer = []
|
||||
self._tempDataLen = 0
|
||||
|
||||
# Send as much data as you can.
|
||||
if self.offset:
|
||||
l = self.writeSomeData(lazyByteSlice(self.dataBuffer, self.offset))
|
||||
else:
|
||||
l = self.writeSomeData(self.dataBuffer)
|
||||
|
||||
# There is no writeSomeData implementation in Twisted which returns
|
||||
# < 0, but the documentation for writeSomeData used to claim negative
|
||||
# integers meant connection lost. Keep supporting this here,
|
||||
# although it may be worth deprecating and removing at some point.
|
||||
if isinstance(l, Exception) or l < 0:
|
||||
return l
|
||||
self.offset += l
|
||||
# If there is nothing left to send,
|
||||
if self.offset == len(self.dataBuffer) and not self._tempDataLen:
|
||||
self.dataBuffer = b""
|
||||
self.offset = 0
|
||||
# stop writing.
|
||||
self.stopWriting()
|
||||
# If I've got a producer who is supposed to supply me with data,
|
||||
if self.producer is not None and ((not self.streamingProducer)
|
||||
or self.producerPaused):
|
||||
# tell them to supply some more.
|
||||
self.producerPaused = False
|
||||
self.producer.resumeProducing()
|
||||
elif self.disconnecting:
|
||||
# But if I was previously asked to let the connection die, do
|
||||
# so.
|
||||
return self._postLoseConnection()
|
||||
elif self._writeDisconnecting:
|
||||
# I was previously asked to half-close the connection. We
|
||||
# set _writeDisconnected before calling handler, in case the
|
||||
# handler calls loseConnection(), which will want to check for
|
||||
# this attribute.
|
||||
self._writeDisconnected = True
|
||||
result = self._closeWriteConnection()
|
||||
return result
|
||||
return None
|
||||
|
||||
def _postLoseConnection(self):
|
||||
"""Called after a loseConnection(), when all data has been written.
|
||||
|
||||
Whatever this returns is then returned by doWrite.
|
||||
"""
|
||||
# default implementation, telling reactor we're finished
|
||||
return main.CONNECTION_DONE
|
||||
|
||||
def _closeWriteConnection(self):
|
||||
# override in subclasses
|
||||
pass
|
||||
|
||||
def writeConnectionLost(self, reason):
|
||||
# in current code should never be called
|
||||
self.connectionLost(reason)
|
||||
|
||||
def readConnectionLost(self, reason):
|
||||
# override in subclasses
|
||||
self.connectionLost(reason)
|
||||
|
||||
|
||||
def _isSendBufferFull(self):
|
||||
"""
|
||||
Determine whether the user-space send buffer for this transport is full
|
||||
or not.
|
||||
|
||||
When the buffer contains more than C{self.bufferSize} bytes, it is
|
||||
considered full. This might be improved by considering the size of the
|
||||
kernel send buffer and how much of it is free.
|
||||
|
||||
@return: C{True} if it is full, C{False} otherwise.
|
||||
"""
|
||||
return len(self.dataBuffer) + self._tempDataLen > self.bufferSize
|
||||
|
||||
|
||||
def _maybePauseProducer(self):
|
||||
"""
|
||||
Possibly pause a producer, if there is one and the send buffer is full.
|
||||
"""
|
||||
# If we are responsible for pausing our producer,
|
||||
if self.producer is not None and self.streamingProducer:
|
||||
# and our buffer is full,
|
||||
if self._isSendBufferFull():
|
||||
# pause it.
|
||||
self.producerPaused = True
|
||||
self.producer.pauseProducing()
|
||||
|
||||
|
||||
def write(self, data):
|
||||
"""Reliably write some data.
|
||||
|
||||
The data is buffered until the underlying file descriptor is ready
|
||||
for writing. If there is more than C{self.bufferSize} data in the
|
||||
buffer and this descriptor has a registered streaming producer, its
|
||||
C{pauseProducing()} method will be called.
|
||||
"""
|
||||
if isinstance(data, unicode): # no, really, I mean it
|
||||
raise TypeError("Data must not be unicode")
|
||||
if not self.connected or self._writeDisconnected:
|
||||
return
|
||||
if data:
|
||||
self._tempDataBuffer.append(data)
|
||||
self._tempDataLen += len(data)
|
||||
self._maybePauseProducer()
|
||||
self.startWriting()
|
||||
|
||||
|
||||
def writeSequence(self, iovec):
|
||||
"""
|
||||
Reliably write a sequence of data.
|
||||
|
||||
Currently, this is a convenience method roughly equivalent to::
|
||||
|
||||
for chunk in iovec:
|
||||
fd.write(chunk)
|
||||
|
||||
It may have a more efficient implementation at a later time or in a
|
||||
different reactor.
|
||||
|
||||
As with the C{write()} method, if a buffer size limit is reached and a
|
||||
streaming producer is registered, it will be paused until the buffered
|
||||
data is written to the underlying file descriptor.
|
||||
"""
|
||||
for i in iovec:
|
||||
if isinstance(i, unicode): # no, really, I mean it
|
||||
raise TypeError("Data must not be unicode")
|
||||
if not self.connected or not iovec or self._writeDisconnected:
|
||||
return
|
||||
self._tempDataBuffer.extend(iovec)
|
||||
for i in iovec:
|
||||
self._tempDataLen += len(i)
|
||||
self._maybePauseProducer()
|
||||
self.startWriting()
|
||||
|
||||
|
||||
def loseConnection(self, _connDone=failure.Failure(main.CONNECTION_DONE)):
|
||||
"""Close the connection at the next available opportunity.
|
||||
|
||||
Call this to cause this FileDescriptor to lose its connection. It will
|
||||
first write any data that it has buffered.
|
||||
|
||||
If there is data buffered yet to be written, this method will cause the
|
||||
transport to lose its connection as soon as it's done flushing its
|
||||
write buffer. If you have a producer registered, the connection won't
|
||||
be closed until the producer is finished. Therefore, make sure you
|
||||
unregister your producer when it's finished, or the connection will
|
||||
never close.
|
||||
"""
|
||||
|
||||
if self.connected and not self.disconnecting:
|
||||
if self._writeDisconnected:
|
||||
# doWrite won't trigger the connection close anymore
|
||||
self.stopReading()
|
||||
self.stopWriting()
|
||||
self.connectionLost(_connDone)
|
||||
else:
|
||||
self.stopReading()
|
||||
self.startWriting()
|
||||
self.disconnecting = 1
|
||||
|
||||
def loseWriteConnection(self):
|
||||
self._writeDisconnecting = True
|
||||
self.startWriting()
|
||||
|
||||
def stopReading(self):
|
||||
"""Stop waiting for read availability.
|
||||
|
||||
Call this to remove this selectable from being notified when it is
|
||||
ready for reading.
|
||||
"""
|
||||
self.reactor.removeReader(self)
|
||||
|
||||
def stopWriting(self):
|
||||
"""Stop waiting for write availability.
|
||||
|
||||
Call this to remove this selectable from being notified when it is ready
|
||||
for writing.
|
||||
"""
|
||||
self.reactor.removeWriter(self)
|
||||
|
||||
def startReading(self):
|
||||
"""Start waiting for read availability.
|
||||
"""
|
||||
self.reactor.addReader(self)
|
||||
|
||||
def startWriting(self):
|
||||
"""Start waiting for write availability.
|
||||
|
||||
Call this to have this FileDescriptor be notified whenever it is ready for
|
||||
writing.
|
||||
"""
|
||||
self.reactor.addWriter(self)
|
||||
|
||||
# Producer/consumer implementation
|
||||
|
||||
# first, the consumer stuff. This requires no additional work, as
|
||||
# any object you can write to can be a consumer, really.
|
||||
|
||||
producer = None
|
||||
bufferSize = 2**2**2**2
|
||||
|
||||
def stopConsuming(self):
|
||||
"""Stop consuming data.
|
||||
|
||||
This is called when a producer has lost its connection, to tell the
|
||||
consumer to go lose its connection (and break potential circular
|
||||
references).
|
||||
"""
|
||||
self.unregisterProducer()
|
||||
self.loseConnection()
|
||||
|
||||
# producer interface implementation
|
||||
|
||||
def resumeProducing(self):
|
||||
if self.connected and not self.disconnecting:
|
||||
self.startReading()
|
||||
|
||||
def pauseProducing(self):
|
||||
self.stopReading()
|
||||
|
||||
def stopProducing(self):
|
||||
self.loseConnection()
|
||||
|
||||
|
||||
def fileno(self):
|
||||
"""File Descriptor number for select().
|
||||
|
||||
This method must be overridden or assigned in subclasses to
|
||||
indicate a valid file descriptor for the operating system.
|
||||
"""
|
||||
return -1
|
||||
|
||||
|
||||
def isIPAddress(addr):
|
||||
"""
|
||||
Determine whether the given string represents an IPv4 address.
|
||||
|
||||
@type addr: C{str}
|
||||
@param addr: A string which may or may not be the decimal dotted
|
||||
representation of an IPv4 address.
|
||||
|
||||
@rtype: C{bool}
|
||||
@return: C{True} if C{addr} represents an IPv4 address, C{False}
|
||||
otherwise.
|
||||
"""
|
||||
dottedParts = addr.split('.')
|
||||
if len(dottedParts) == 4:
|
||||
for octet in dottedParts:
|
||||
try:
|
||||
value = int(octet)
|
||||
except ValueError:
|
||||
return False
|
||||
else:
|
||||
if value < 0 or value > 255:
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def isIPv6Address(addr):
|
||||
"""
|
||||
Determine whether the given string represents an IPv6 address.
|
||||
|
||||
@param addr: A string which may or may not be the hex
|
||||
representation of an IPv6 address.
|
||||
@type addr: C{str}
|
||||
|
||||
@return: C{True} if C{addr} represents an IPv6 address, C{False}
|
||||
otherwise.
|
||||
@rtype: C{bool}
|
||||
"""
|
||||
if '%' in addr:
|
||||
addr = addr.split('%', 1)[0]
|
||||
if not addr:
|
||||
return False
|
||||
try:
|
||||
# This might be a native implementation or the one from
|
||||
# twisted.python.compat.
|
||||
inet_pton(AF_INET6, addr)
|
||||
except (ValueError, error):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
__all__ = ["FileDescriptor", "isIPAddress", "isIPv6Address"]
|
||||
|
|
@ -0,0 +1,181 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Address objects for network connections.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import warnings, os
|
||||
|
||||
from zope.interface import implementer
|
||||
from twisted.internet.interfaces import IAddress
|
||||
from twisted.python.util import FancyEqMixin
|
||||
|
||||
|
||||
@implementer(IAddress)
|
||||
class _IPAddress(FancyEqMixin, object):
|
||||
"""
|
||||
An L{_IPAddress} represents the address of an IP socket endpoint, providing
|
||||
common behavior for IPv4 and IPv6.
|
||||
|
||||
@ivar type: A string describing the type of transport, either 'TCP' or
|
||||
'UDP'.
|
||||
|
||||
@ivar host: A string containing the presentation format of the IP address;
|
||||
for example, "127.0.0.1" or "::1".
|
||||
@type host: C{str}
|
||||
|
||||
@ivar port: An integer representing the port number.
|
||||
@type port: C{int}
|
||||
"""
|
||||
|
||||
compareAttributes = ('type', 'host', 'port')
|
||||
|
||||
def __init__(self, type, host, port):
|
||||
assert type in ('TCP', 'UDP')
|
||||
self.type = type
|
||||
self.host = host
|
||||
self.port = port
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s, %r, %d)' % (
|
||||
self.__class__.__name__, self.type, self.host, self.port)
|
||||
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.type, self.host, self.port))
|
||||
|
||||
|
||||
|
||||
class IPv4Address(_IPAddress):
|
||||
"""
|
||||
An L{IPv4Address} represents the address of an IPv4 socket endpoint.
|
||||
|
||||
@ivar host: A string containing a dotted-quad IPv4 address; for example,
|
||||
"127.0.0.1".
|
||||
@type host: C{str}
|
||||
"""
|
||||
|
||||
def __init__(self, type, host, port, _bwHack=None):
|
||||
_IPAddress.__init__(self, type, host, port)
|
||||
if _bwHack is not None:
|
||||
warnings.warn("twisted.internet.address.IPv4Address._bwHack "
|
||||
"is deprecated since Twisted 11.0",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
|
||||
|
||||
class IPv6Address(_IPAddress):
|
||||
"""
|
||||
An L{IPv6Address} represents the address of an IPv6 socket endpoint.
|
||||
|
||||
@ivar host: A string containing a colon-separated, hexadecimal formatted
|
||||
IPv6 address; for example, "::1".
|
||||
@type host: C{str}
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@implementer(IAddress)
|
||||
class _ProcessAddress(object):
|
||||
"""
|
||||
An L{interfaces.IAddress} provider for process transports.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@implementer(IAddress)
|
||||
class HostnameAddress(FancyEqMixin, object):
|
||||
"""
|
||||
A L{HostnameAddress} represents the address of a L{HostnameEndpoint}.
|
||||
|
||||
@ivar hostname: A hostname byte string; for example, b"example.com".
|
||||
@type hostname: L{bytes}
|
||||
|
||||
@ivar port: An integer representing the port number.
|
||||
@type port: L{int}
|
||||
"""
|
||||
compareAttributes = ('hostname', 'port')
|
||||
|
||||
def __init__(self, hostname, port):
|
||||
self.hostname = hostname
|
||||
self.port = port
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s, %d)' % (
|
||||
self.__class__.__name__, self.hostname, self.port)
|
||||
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.hostname, self.port))
|
||||
|
||||
|
||||
|
||||
@implementer(IAddress)
|
||||
class UNIXAddress(FancyEqMixin, object):
|
||||
"""
|
||||
Object representing a UNIX socket endpoint.
|
||||
|
||||
@ivar name: The filename associated with this socket.
|
||||
@type name: C{str}
|
||||
"""
|
||||
|
||||
compareAttributes = ('name', )
|
||||
|
||||
def __init__(self, name, _bwHack = None):
|
||||
self.name = name
|
||||
if _bwHack is not None:
|
||||
warnings.warn("twisted.internet.address.UNIXAddress._bwHack is deprecated since Twisted 11.0",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
|
||||
if getattr(os.path, 'samefile', None) is not None:
|
||||
def __eq__(self, other):
|
||||
"""
|
||||
Overriding C{FancyEqMixin} to ensure the os level samefile
|
||||
check is done if the name attributes do not match.
|
||||
"""
|
||||
res = super(UNIXAddress, self).__eq__(other)
|
||||
if not res and self.name and other.name:
|
||||
try:
|
||||
return os.path.samefile(self.name, other.name)
|
||||
except OSError:
|
||||
pass
|
||||
return res
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return 'UNIXAddress(%r)' % (self.name,)
|
||||
|
||||
|
||||
def __hash__(self):
|
||||
if self.name is None:
|
||||
return hash((self.__class__, None))
|
||||
try:
|
||||
s1 = os.stat(self.name)
|
||||
return hash((s1.st_ino, s1.st_dev))
|
||||
except OSError:
|
||||
return hash(self.name)
|
||||
|
||||
|
||||
|
||||
# These are for buildFactory backwards compatability due to
|
||||
# stupidity-induced inconsistency.
|
||||
|
||||
class _ServerFactoryIPv4Address(IPv4Address):
|
||||
"""Backwards compatability hack. Just like IPv4Address in practice."""
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, tuple):
|
||||
warnings.warn("IPv4Address.__getitem__ is deprecated. Use attributes instead.",
|
||||
category=DeprecationWarning, stacklevel=2)
|
||||
return (self.host, self.port) == other
|
||||
elif isinstance(other, IPv4Address):
|
||||
a = (self.type, self.host, self.port)
|
||||
b = (other.type, other.host, other.port)
|
||||
return a == b
|
||||
return False
|
||||
1213
Linux_i686/lib/python2.7/site-packages/twisted/internet/base.py
Normal file
1213
Linux_i686/lib/python2.7/site-packages/twisted/internet/base.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,501 @@
|
|||
# -*- test-case-name: twisted.internet.test.test_core -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
A reactor for integrating with U{CFRunLoop<http://bit.ly/cfrunloop>}, the
|
||||
CoreFoundation main loop used by MacOS X.
|
||||
|
||||
This is useful for integrating Twisted with U{PyObjC<http://pyobjc.sf.net/>}
|
||||
applications.
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'install',
|
||||
'CFReactor'
|
||||
]
|
||||
|
||||
import sys
|
||||
|
||||
from zope.interface import implements
|
||||
|
||||
from twisted.internet.interfaces import IReactorFDSet
|
||||
from twisted.internet.posixbase import PosixReactorBase, _Waker
|
||||
from twisted.internet.posixbase import _NO_FILEDESC
|
||||
|
||||
from twisted.python import log
|
||||
|
||||
from CoreFoundation import (
|
||||
CFRunLoopAddSource, CFRunLoopRemoveSource, CFRunLoopGetMain, CFRunLoopRun,
|
||||
CFRunLoopStop, CFRunLoopTimerCreate, CFRunLoopAddTimer,
|
||||
CFRunLoopTimerInvalidate, kCFAllocatorDefault, kCFRunLoopCommonModes,
|
||||
CFAbsoluteTimeGetCurrent)
|
||||
|
||||
from CFNetwork import (
|
||||
CFSocketCreateWithNative, CFSocketSetSocketFlags, CFSocketEnableCallBacks,
|
||||
CFSocketCreateRunLoopSource, CFSocketDisableCallBacks, CFSocketInvalidate,
|
||||
kCFSocketWriteCallBack, kCFSocketReadCallBack, kCFSocketConnectCallBack,
|
||||
kCFSocketAutomaticallyReenableReadCallBack,
|
||||
kCFSocketAutomaticallyReenableWriteCallBack)
|
||||
|
||||
|
||||
_READ = 0
|
||||
_WRITE = 1
|
||||
_preserveSOError = 1 << 6
|
||||
|
||||
|
||||
class _WakerPlus(_Waker):
|
||||
"""
|
||||
The normal Twisted waker will simply wake up the main loop, which causes an
|
||||
iteration to run, which in turn causes L{PosixReactorBase.runUntilCurrent}
|
||||
to get invoked.
|
||||
|
||||
L{CFReactor} has a slightly different model of iteration, though: rather
|
||||
than have each iteration process the thread queue, then timed calls, then
|
||||
file descriptors, each callback is run as it is dispatched by the CFRunLoop
|
||||
observer which triggered it.
|
||||
|
||||
So this waker needs to not only unblock the loop, but also make sure the
|
||||
work gets done; so, it reschedules the invocation of C{runUntilCurrent} to
|
||||
be immediate (0 seconds from now) even if there is no timed call work to
|
||||
do.
|
||||
"""
|
||||
|
||||
def doRead(self):
|
||||
"""
|
||||
Wake up the loop and force C{runUntilCurrent} to run immediately in the
|
||||
next timed iteration.
|
||||
"""
|
||||
result = _Waker.doRead(self)
|
||||
self.reactor._scheduleSimulate(True)
|
||||
return result
|
||||
|
||||
|
||||
|
||||
class CFReactor(PosixReactorBase):
|
||||
"""
|
||||
The CoreFoundation reactor.
|
||||
|
||||
You probably want to use this via the L{install} API.
|
||||
|
||||
@ivar _fdmap: a dictionary, mapping an integer (a file descriptor) to a
|
||||
4-tuple of:
|
||||
|
||||
- source: a C{CFRunLoopSource}; the source associated with this
|
||||
socket.
|
||||
- socket: a C{CFSocket} wrapping the file descriptor.
|
||||
- descriptor: an L{IReadDescriptor} and/or L{IWriteDescriptor}
|
||||
provider.
|
||||
- read-write: a 2-C{list} of booleans: respectively, whether this
|
||||
descriptor is currently registered for reading or registered for
|
||||
writing.
|
||||
|
||||
@ivar _idmap: a dictionary, mapping the id() of an L{IReadDescriptor} or
|
||||
L{IWriteDescriptor} to a C{fd} in L{_fdmap}. Implemented in this
|
||||
manner so that we don't have to rely (even more) on the hashability of
|
||||
L{IReadDescriptor} providers, and we know that they won't be collected
|
||||
since these are kept in sync with C{_fdmap}. Necessary because the
|
||||
.fileno() of a file descriptor may change at will, so we need to be
|
||||
able to look up what its file descriptor I{used} to be, so that we can
|
||||
look it up in C{_fdmap}
|
||||
|
||||
@ivar _cfrunloop: the L{CFRunLoop} pyobjc object wrapped by this reactor.
|
||||
|
||||
@ivar _inCFLoop: Is L{CFRunLoopRun} currently running?
|
||||
|
||||
@type _inCFLoop: C{bool}
|
||||
|
||||
@ivar _currentSimulator: if a CFTimer is currently scheduled with the CF
|
||||
run loop to run Twisted callLater calls, this is a reference to it.
|
||||
Otherwise, it is C{None}
|
||||
"""
|
||||
|
||||
implements(IReactorFDSet)
|
||||
|
||||
def __init__(self, runLoop=None, runner=None):
|
||||
self._fdmap = {}
|
||||
self._idmap = {}
|
||||
if runner is None:
|
||||
runner = CFRunLoopRun
|
||||
self._runner = runner
|
||||
|
||||
if runLoop is None:
|
||||
runLoop = CFRunLoopGetMain()
|
||||
self._cfrunloop = runLoop
|
||||
PosixReactorBase.__init__(self)
|
||||
|
||||
|
||||
def installWaker(self):
|
||||
"""
|
||||
Override C{installWaker} in order to use L{_WakerPlus}; otherwise this
|
||||
should be exactly the same as the parent implementation.
|
||||
"""
|
||||
if not self.waker:
|
||||
self.waker = _WakerPlus(self)
|
||||
self._internalReaders.add(self.waker)
|
||||
self.addReader(self.waker)
|
||||
|
||||
|
||||
def _socketCallback(self, cfSocket, callbackType,
|
||||
ignoredAddress, ignoredData, context):
|
||||
"""
|
||||
The socket callback issued by CFRunLoop. This will issue C{doRead} or
|
||||
C{doWrite} calls to the L{IReadDescriptor} and L{IWriteDescriptor}
|
||||
registered with the file descriptor that we are being notified of.
|
||||
|
||||
@param cfSocket: The L{CFSocket} which has got some activity.
|
||||
|
||||
@param callbackType: The type of activity that we are being notified
|
||||
of. Either L{kCFSocketReadCallBack} or L{kCFSocketWriteCallBack}.
|
||||
|
||||
@param ignoredAddress: Unused, because this is not used for either of
|
||||
the callback types we register for.
|
||||
|
||||
@param ignoredData: Unused, because this is not used for either of the
|
||||
callback types we register for.
|
||||
|
||||
@param context: The data associated with this callback by
|
||||
L{CFSocketCreateWithNative} (in L{CFReactor._watchFD}). A 2-tuple
|
||||
of C{(int, CFRunLoopSource)}.
|
||||
"""
|
||||
(fd, smugglesrc) = context
|
||||
if fd not in self._fdmap:
|
||||
# Spurious notifications seem to be generated sometimes if you
|
||||
# CFSocketDisableCallBacks in the middle of an event. I don't know
|
||||
# about this FD, any more, so let's get rid of it.
|
||||
CFRunLoopRemoveSource(
|
||||
self._cfrunloop, smugglesrc, kCFRunLoopCommonModes
|
||||
)
|
||||
return
|
||||
|
||||
why = None
|
||||
isRead = False
|
||||
src, skt, readWriteDescriptor, rw = self._fdmap[fd]
|
||||
try:
|
||||
if readWriteDescriptor.fileno() == -1:
|
||||
why = _NO_FILEDESC
|
||||
else:
|
||||
isRead = callbackType == kCFSocketReadCallBack
|
||||
# CFSocket seems to deliver duplicate read/write notifications
|
||||
# sometimes, especially a duplicate writability notification
|
||||
# when first registering the socket. This bears further
|
||||
# investigation, since I may have been mis-interpreting the
|
||||
# behavior I was seeing. (Running the full Twisted test suite,
|
||||
# while thorough, is not always entirely clear.) Until this has
|
||||
# been more thoroughly investigated , we consult our own
|
||||
# reading/writing state flags to determine whether we should
|
||||
# actually attempt a doRead/doWrite first. -glyph
|
||||
if isRead:
|
||||
if rw[_READ]:
|
||||
why = log.callWithLogger(
|
||||
readWriteDescriptor, readWriteDescriptor.doRead)
|
||||
else:
|
||||
if rw[_WRITE]:
|
||||
why = log.callWithLogger(
|
||||
readWriteDescriptor, readWriteDescriptor.doWrite)
|
||||
except:
|
||||
why = sys.exc_info()[1]
|
||||
log.err()
|
||||
if why:
|
||||
self._disconnectSelectable(readWriteDescriptor, why, isRead)
|
||||
|
||||
|
||||
def _watchFD(self, fd, descr, flag):
|
||||
"""
|
||||
Register a file descriptor with the L{CFRunLoop}, or modify its state
|
||||
so that it's listening for both notifications (read and write) rather
|
||||
than just one; used to implement C{addReader} and C{addWriter}.
|
||||
|
||||
@param fd: The file descriptor.
|
||||
|
||||
@type fd: C{int}
|
||||
|
||||
@param descr: the L{IReadDescriptor} or L{IWriteDescriptor}
|
||||
|
||||
@param flag: the flag to register for callbacks on, either
|
||||
L{kCFSocketReadCallBack} or L{kCFSocketWriteCallBack}
|
||||
"""
|
||||
if fd == -1:
|
||||
raise RuntimeError("Invalid file descriptor.")
|
||||
if fd in self._fdmap:
|
||||
src, cfs, gotdescr, rw = self._fdmap[fd]
|
||||
# do I need to verify that it's the same descr?
|
||||
else:
|
||||
ctx = []
|
||||
ctx.append(fd)
|
||||
cfs = CFSocketCreateWithNative(
|
||||
kCFAllocatorDefault, fd,
|
||||
kCFSocketReadCallBack | kCFSocketWriteCallBack |
|
||||
kCFSocketConnectCallBack,
|
||||
self._socketCallback, ctx
|
||||
)
|
||||
CFSocketSetSocketFlags(
|
||||
cfs,
|
||||
kCFSocketAutomaticallyReenableReadCallBack |
|
||||
kCFSocketAutomaticallyReenableWriteCallBack |
|
||||
|
||||
# This extra flag is to ensure that CF doesn't (destructively,
|
||||
# because destructively is the only way to do it) retrieve
|
||||
# SO_ERROR and thereby break twisted.internet.tcp.BaseClient,
|
||||
# which needs SO_ERROR to tell it whether or not it needs to
|
||||
# call connect_ex a second time.
|
||||
_preserveSOError
|
||||
)
|
||||
src = CFSocketCreateRunLoopSource(kCFAllocatorDefault, cfs, 0)
|
||||
ctx.append(src)
|
||||
CFRunLoopAddSource(self._cfrunloop, src, kCFRunLoopCommonModes)
|
||||
CFSocketDisableCallBacks(
|
||||
cfs,
|
||||
kCFSocketReadCallBack | kCFSocketWriteCallBack |
|
||||
kCFSocketConnectCallBack
|
||||
)
|
||||
rw = [False, False]
|
||||
self._idmap[id(descr)] = fd
|
||||
self._fdmap[fd] = src, cfs, descr, rw
|
||||
rw[self._flag2idx(flag)] = True
|
||||
CFSocketEnableCallBacks(cfs, flag)
|
||||
|
||||
|
||||
def _flag2idx(self, flag):
|
||||
"""
|
||||
Convert a C{kCFSocket...} constant to an index into the read/write
|
||||
state list (C{_READ} or C{_WRITE}) (the 4th element of the value of
|
||||
C{self._fdmap}).
|
||||
|
||||
@param flag: C{kCFSocketReadCallBack} or C{kCFSocketWriteCallBack}
|
||||
|
||||
@return: C{_READ} or C{_WRITE}
|
||||
"""
|
||||
return {kCFSocketReadCallBack: _READ,
|
||||
kCFSocketWriteCallBack: _WRITE}[flag]
|
||||
|
||||
|
||||
def _unwatchFD(self, fd, descr, flag):
|
||||
"""
|
||||
Unregister a file descriptor with the L{CFRunLoop}, or modify its state
|
||||
so that it's listening for only one notification (read or write) as
|
||||
opposed to both; used to implement C{removeReader} and C{removeWriter}.
|
||||
|
||||
@param fd: a file descriptor
|
||||
|
||||
@type fd: C{int}
|
||||
|
||||
@param descr: an L{IReadDescriptor} or L{IWriteDescriptor}
|
||||
|
||||
@param flag: L{kCFSocketWriteCallBack} L{kCFSocketReadCallBack}
|
||||
"""
|
||||
if id(descr) not in self._idmap:
|
||||
return
|
||||
if fd == -1:
|
||||
# need to deal with it in this case, I think.
|
||||
realfd = self._idmap[id(descr)]
|
||||
else:
|
||||
realfd = fd
|
||||
src, cfs, descr, rw = self._fdmap[realfd]
|
||||
CFSocketDisableCallBacks(cfs, flag)
|
||||
rw[self._flag2idx(flag)] = False
|
||||
if not rw[_READ] and not rw[_WRITE]:
|
||||
del self._idmap[id(descr)]
|
||||
del self._fdmap[realfd]
|
||||
CFRunLoopRemoveSource(self._cfrunloop, src, kCFRunLoopCommonModes)
|
||||
CFSocketInvalidate(cfs)
|
||||
|
||||
|
||||
def addReader(self, reader):
|
||||
"""
|
||||
Implement L{IReactorFDSet.addReader}.
|
||||
"""
|
||||
self._watchFD(reader.fileno(), reader, kCFSocketReadCallBack)
|
||||
|
||||
|
||||
def addWriter(self, writer):
|
||||
"""
|
||||
Implement L{IReactorFDSet.addWriter}.
|
||||
"""
|
||||
self._watchFD(writer.fileno(), writer, kCFSocketWriteCallBack)
|
||||
|
||||
|
||||
def removeReader(self, reader):
|
||||
"""
|
||||
Implement L{IReactorFDSet.removeReader}.
|
||||
"""
|
||||
self._unwatchFD(reader.fileno(), reader, kCFSocketReadCallBack)
|
||||
|
||||
|
||||
def removeWriter(self, writer):
|
||||
"""
|
||||
Implement L{IReactorFDSet.removeWriter}.
|
||||
"""
|
||||
self._unwatchFD(writer.fileno(), writer, kCFSocketWriteCallBack)
|
||||
|
||||
|
||||
def removeAll(self):
|
||||
"""
|
||||
Implement L{IReactorFDSet.removeAll}.
|
||||
"""
|
||||
allDesc = set([descr for src, cfs, descr, rw in self._fdmap.values()])
|
||||
allDesc -= set(self._internalReaders)
|
||||
for desc in allDesc:
|
||||
self.removeReader(desc)
|
||||
self.removeWriter(desc)
|
||||
return list(allDesc)
|
||||
|
||||
|
||||
def getReaders(self):
|
||||
"""
|
||||
Implement L{IReactorFDSet.getReaders}.
|
||||
"""
|
||||
return [descr for src, cfs, descr, rw in self._fdmap.values()
|
||||
if rw[_READ]]
|
||||
|
||||
|
||||
def getWriters(self):
|
||||
"""
|
||||
Implement L{IReactorFDSet.getWriters}.
|
||||
"""
|
||||
return [descr for src, cfs, descr, rw in self._fdmap.values()
|
||||
if rw[_WRITE]]
|
||||
|
||||
|
||||
def _moveCallLaterSooner(self, tple):
|
||||
"""
|
||||
Override L{PosixReactorBase}'s implementation of L{IDelayedCall.reset}
|
||||
so that it will immediately reschedule. Normally
|
||||
C{_moveCallLaterSooner} depends on the fact that C{runUntilCurrent} is
|
||||
always run before the mainloop goes back to sleep, so this forces it to
|
||||
immediately recompute how long the loop needs to stay asleep.
|
||||
"""
|
||||
result = PosixReactorBase._moveCallLaterSooner(self, tple)
|
||||
self._scheduleSimulate()
|
||||
return result
|
||||
|
||||
|
||||
_inCFLoop = False
|
||||
|
||||
def mainLoop(self):
|
||||
"""
|
||||
Run the runner (L{CFRunLoopRun} or something that calls it), which runs
|
||||
the run loop until C{crash()} is called.
|
||||
"""
|
||||
self._inCFLoop = True
|
||||
try:
|
||||
self._runner()
|
||||
finally:
|
||||
self._inCFLoop = False
|
||||
|
||||
|
||||
_currentSimulator = None
|
||||
|
||||
def _scheduleSimulate(self, force=False):
|
||||
"""
|
||||
Schedule a call to C{self.runUntilCurrent}. This will cancel the
|
||||
currently scheduled call if it is already scheduled.
|
||||
|
||||
@param force: Even if there are no timed calls, make sure that
|
||||
C{runUntilCurrent} runs immediately (in a 0-seconds-from-now
|
||||
{CFRunLoopTimer}). This is necessary for calls which need to
|
||||
trigger behavior of C{runUntilCurrent} other than running timed
|
||||
calls, such as draining the thread call queue or calling C{crash()}
|
||||
when the appropriate flags are set.
|
||||
|
||||
@type force: C{bool}
|
||||
"""
|
||||
if self._currentSimulator is not None:
|
||||
CFRunLoopTimerInvalidate(self._currentSimulator)
|
||||
self._currentSimulator = None
|
||||
timeout = self.timeout()
|
||||
if force:
|
||||
timeout = 0.0
|
||||
if timeout is not None:
|
||||
fireDate = (CFAbsoluteTimeGetCurrent() + timeout)
|
||||
def simulate(cftimer, extra):
|
||||
self._currentSimulator = None
|
||||
self.runUntilCurrent()
|
||||
self._scheduleSimulate()
|
||||
c = self._currentSimulator = CFRunLoopTimerCreate(
|
||||
kCFAllocatorDefault, fireDate,
|
||||
0, 0, 0, simulate, None
|
||||
)
|
||||
CFRunLoopAddTimer(self._cfrunloop, c, kCFRunLoopCommonModes)
|
||||
|
||||
|
||||
def callLater(self, _seconds, _f, *args, **kw):
|
||||
"""
|
||||
Implement L{IReactorTime.callLater}.
|
||||
"""
|
||||
delayedCall = PosixReactorBase.callLater(
|
||||
self, _seconds, _f, *args, **kw
|
||||
)
|
||||
self._scheduleSimulate()
|
||||
return delayedCall
|
||||
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Implement L{IReactorCore.stop}.
|
||||
"""
|
||||
PosixReactorBase.stop(self)
|
||||
self._scheduleSimulate(True)
|
||||
|
||||
|
||||
def crash(self):
|
||||
"""
|
||||
Implement L{IReactorCore.crash}
|
||||
"""
|
||||
wasStarted = self._started
|
||||
PosixReactorBase.crash(self)
|
||||
if self._inCFLoop:
|
||||
self._stopNow()
|
||||
else:
|
||||
if wasStarted:
|
||||
self.callLater(0, self._stopNow)
|
||||
|
||||
|
||||
def _stopNow(self):
|
||||
"""
|
||||
Immediately stop the CFRunLoop (which must be running!).
|
||||
"""
|
||||
CFRunLoopStop(self._cfrunloop)
|
||||
|
||||
|
||||
def iterate(self, delay=0):
|
||||
"""
|
||||
Emulate the behavior of C{iterate()} for things that want to call it,
|
||||
by letting the loop run for a little while and then scheduling a timed
|
||||
call to exit it.
|
||||
"""
|
||||
self.callLater(delay, self._stopNow)
|
||||
self.mainLoop()
|
||||
|
||||
|
||||
|
||||
def install(runLoop=None, runner=None):
|
||||
"""
|
||||
Configure the twisted mainloop to be run inside CFRunLoop.
|
||||
|
||||
@param runLoop: the run loop to use.
|
||||
|
||||
@param runner: the function to call in order to actually invoke the main
|
||||
loop. This will default to L{CFRunLoopRun} if not specified. However,
|
||||
this is not an appropriate choice for GUI applications, as you need to
|
||||
run NSApplicationMain (or something like it). For example, to run the
|
||||
Twisted mainloop in a PyObjC application, your C{main.py} should look
|
||||
something like this::
|
||||
|
||||
from PyObjCTools import AppHelper
|
||||
from twisted.internet.cfreactor import install
|
||||
install(runner=AppHelper.runEventLoop)
|
||||
# initialize your application
|
||||
reactor.run()
|
||||
|
||||
@return: The installed reactor.
|
||||
|
||||
@rtype: L{CFReactor}
|
||||
"""
|
||||
|
||||
reactor = CFReactor(runLoop=runLoop, runner=runner)
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(reactor)
|
||||
return reactor
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
# -*- test-case-name: twisted.internet.test.test_default -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
The most suitable default reactor for the current platform.
|
||||
|
||||
Depending on a specific application's needs, some other reactor may in
|
||||
fact be better.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
__all__ = ["install"]
|
||||
|
||||
from twisted.python.runtime import platform
|
||||
|
||||
|
||||
def _getInstallFunction(platform):
|
||||
"""
|
||||
Return a function to install the reactor most suited for the given platform.
|
||||
|
||||
@param platform: The platform for which to select a reactor.
|
||||
@type platform: L{twisted.python.runtime.Platform}
|
||||
|
||||
@return: A zero-argument callable which will install the selected
|
||||
reactor.
|
||||
"""
|
||||
# Linux: epoll(7) is the default, since it scales well.
|
||||
#
|
||||
# OS X: poll(2) is not exposed by Python because it doesn't support all
|
||||
# file descriptors (in particular, lack of PTY support is a problem) --
|
||||
# see <http://bugs.python.org/issue5154>. kqueue has the same restrictions
|
||||
# as poll(2) as far PTY support goes.
|
||||
#
|
||||
# Windows: IOCP should eventually be default, but still has some serious
|
||||
# bugs, e.g. <http://twistedmatrix.com/trac/ticket/4667>.
|
||||
#
|
||||
# We therefore choose epoll(7) on Linux, poll(2) on other non-OS X POSIX
|
||||
# platforms, and select(2) everywhere else.
|
||||
try:
|
||||
if platform.isLinux():
|
||||
try:
|
||||
from twisted.internet.epollreactor import install
|
||||
except ImportError:
|
||||
from twisted.internet.pollreactor import install
|
||||
elif platform.getType() == 'posix' and not platform.isMacOSX():
|
||||
from twisted.internet.pollreactor import install
|
||||
else:
|
||||
from twisted.internet.selectreactor import install
|
||||
except ImportError:
|
||||
from twisted.internet.selectreactor import install
|
||||
return install
|
||||
|
||||
|
||||
install = _getInstallFunction(platform)
|
||||
1623
Linux_i686/lib/python2.7/site-packages/twisted/internet/defer.py
Normal file
1623
Linux_i686/lib/python2.7/site-packages/twisted/internet/defer.py
Normal file
File diff suppressed because it is too large
Load diff
1792
Linux_i686/lib/python2.7/site-packages/twisted/internet/endpoints.py
Normal file
1792
Linux_i686/lib/python2.7/site-packages/twisted/internet/endpoints.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,410 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
An epoll() based implementation of the twisted main loop.
|
||||
|
||||
To install the event loop (and you should do this before any connections,
|
||||
listeners or connectors are added)::
|
||||
|
||||
from twisted.internet import epollreactor
|
||||
epollreactor.install()
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
from select import epoll, EPOLLHUP, EPOLLERR, EPOLLIN, EPOLLOUT
|
||||
import errno
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet.interfaces import IReactorFDSet
|
||||
|
||||
from twisted.python import log
|
||||
from twisted.internet import posixbase
|
||||
|
||||
|
||||
|
||||
@implementer(IReactorFDSet)
|
||||
class _ContinuousPolling(posixbase._PollLikeMixin,
|
||||
posixbase._DisconnectSelectableMixin):
|
||||
"""
|
||||
Schedule reads and writes based on the passage of time, rather than
|
||||
notification.
|
||||
|
||||
This is useful for supporting polling filesystem files, which C{epoll(7)}
|
||||
does not support.
|
||||
|
||||
The implementation uses L{posixbase._PollLikeMixin}, which is a bit hacky,
|
||||
but re-implementing and testing the relevant code yet again is
|
||||
unappealing.
|
||||
|
||||
@ivar _reactor: The L{EPollReactor} that is using this instance.
|
||||
|
||||
@ivar _loop: A C{LoopingCall} that drives the polling, or C{None}.
|
||||
|
||||
@ivar _readers: A C{set} of C{FileDescriptor} objects that should be read
|
||||
from.
|
||||
|
||||
@ivar _writers: A C{set} of C{FileDescriptor} objects that should be
|
||||
written to.
|
||||
"""
|
||||
|
||||
# Attributes for _PollLikeMixin
|
||||
_POLL_DISCONNECTED = 1
|
||||
_POLL_IN = 2
|
||||
_POLL_OUT = 4
|
||||
|
||||
|
||||
def __init__(self, reactor):
|
||||
self._reactor = reactor
|
||||
self._loop = None
|
||||
self._readers = set()
|
||||
self._writers = set()
|
||||
|
||||
|
||||
def _checkLoop(self):
|
||||
"""
|
||||
Start or stop a C{LoopingCall} based on whether there are readers and
|
||||
writers.
|
||||
"""
|
||||
if self._readers or self._writers:
|
||||
if self._loop is None:
|
||||
from twisted.internet.task import LoopingCall, _EPSILON
|
||||
self._loop = LoopingCall(self.iterate)
|
||||
self._loop.clock = self._reactor
|
||||
# LoopingCall seems unhappy with timeout of 0, so use very
|
||||
# small number:
|
||||
self._loop.start(_EPSILON, now=False)
|
||||
elif self._loop:
|
||||
self._loop.stop()
|
||||
self._loop = None
|
||||
|
||||
|
||||
def iterate(self):
|
||||
"""
|
||||
Call C{doRead} and C{doWrite} on all readers and writers respectively.
|
||||
"""
|
||||
for reader in list(self._readers):
|
||||
self._doReadOrWrite(reader, reader, self._POLL_IN)
|
||||
for reader in list(self._writers):
|
||||
self._doReadOrWrite(reader, reader, self._POLL_OUT)
|
||||
|
||||
|
||||
def addReader(self, reader):
|
||||
"""
|
||||
Add a C{FileDescriptor} for notification of data available to read.
|
||||
"""
|
||||
self._readers.add(reader)
|
||||
self._checkLoop()
|
||||
|
||||
|
||||
def addWriter(self, writer):
|
||||
"""
|
||||
Add a C{FileDescriptor} for notification of data available to write.
|
||||
"""
|
||||
self._writers.add(writer)
|
||||
self._checkLoop()
|
||||
|
||||
|
||||
def removeReader(self, reader):
|
||||
"""
|
||||
Remove a C{FileDescriptor} from notification of data available to read.
|
||||
"""
|
||||
try:
|
||||
self._readers.remove(reader)
|
||||
except KeyError:
|
||||
return
|
||||
self._checkLoop()
|
||||
|
||||
|
||||
def removeWriter(self, writer):
|
||||
"""
|
||||
Remove a C{FileDescriptor} from notification of data available to
|
||||
write.
|
||||
"""
|
||||
try:
|
||||
self._writers.remove(writer)
|
||||
except KeyError:
|
||||
return
|
||||
self._checkLoop()
|
||||
|
||||
|
||||
def removeAll(self):
|
||||
"""
|
||||
Remove all readers and writers.
|
||||
"""
|
||||
result = list(self._readers | self._writers)
|
||||
# Don't reset to new value, since self.isWriting and .isReading refer
|
||||
# to the existing instance:
|
||||
self._readers.clear()
|
||||
self._writers.clear()
|
||||
return result
|
||||
|
||||
|
||||
def getReaders(self):
|
||||
"""
|
||||
Return a list of the readers.
|
||||
"""
|
||||
return list(self._readers)
|
||||
|
||||
|
||||
def getWriters(self):
|
||||
"""
|
||||
Return a list of the writers.
|
||||
"""
|
||||
return list(self._writers)
|
||||
|
||||
|
||||
def isReading(self, fd):
|
||||
"""
|
||||
Checks if the file descriptor is currently being observed for read
|
||||
readiness.
|
||||
|
||||
@param fd: The file descriptor being checked.
|
||||
@type fd: L{twisted.internet.abstract.FileDescriptor}
|
||||
@return: C{True} if the file descriptor is being observed for read
|
||||
readiness, C{False} otherwise.
|
||||
@rtype: C{bool}
|
||||
"""
|
||||
return fd in self._readers
|
||||
|
||||
|
||||
def isWriting(self, fd):
|
||||
"""
|
||||
Checks if the file descriptor is currently being observed for write
|
||||
readiness.
|
||||
|
||||
@param fd: The file descriptor being checked.
|
||||
@type fd: L{twisted.internet.abstract.FileDescriptor}
|
||||
@return: C{True} if the file descriptor is being observed for write
|
||||
readiness, C{False} otherwise.
|
||||
@rtype: C{bool}
|
||||
"""
|
||||
return fd in self._writers
|
||||
|
||||
|
||||
|
||||
@implementer(IReactorFDSet)
|
||||
class EPollReactor(posixbase.PosixReactorBase, posixbase._PollLikeMixin):
|
||||
"""
|
||||
A reactor that uses epoll(7).
|
||||
|
||||
@ivar _poller: A C{epoll} which will be used to check for I/O
|
||||
readiness.
|
||||
|
||||
@ivar _selectables: A dictionary mapping integer file descriptors to
|
||||
instances of C{FileDescriptor} which have been registered with the
|
||||
reactor. All C{FileDescriptors} which are currently receiving read or
|
||||
write readiness notifications will be present as values in this
|
||||
dictionary.
|
||||
|
||||
@ivar _reads: A set containing integer file descriptors. Values in this
|
||||
set will be registered with C{_poller} for read readiness notifications
|
||||
which will be dispatched to the corresponding C{FileDescriptor}
|
||||
instances in C{_selectables}.
|
||||
|
||||
@ivar _writes: A set containing integer file descriptors. Values in this
|
||||
set will be registered with C{_poller} for write readiness
|
||||
notifications which will be dispatched to the corresponding
|
||||
C{FileDescriptor} instances in C{_selectables}.
|
||||
|
||||
@ivar _continuousPolling: A L{_ContinuousPolling} instance, used to handle
|
||||
file descriptors (e.g. filesytem files) that are not supported by
|
||||
C{epoll(7)}.
|
||||
"""
|
||||
|
||||
# Attributes for _PollLikeMixin
|
||||
_POLL_DISCONNECTED = (EPOLLHUP | EPOLLERR)
|
||||
_POLL_IN = EPOLLIN
|
||||
_POLL_OUT = EPOLLOUT
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize epoll object, file descriptor tracking dictionaries, and the
|
||||
base class.
|
||||
"""
|
||||
# Create the poller we're going to use. The 1024 here is just a hint
|
||||
# to the kernel, it is not a hard maximum. After Linux 2.6.8, the size
|
||||
# argument is completely ignored.
|
||||
self._poller = epoll(1024)
|
||||
self._reads = set()
|
||||
self._writes = set()
|
||||
self._selectables = {}
|
||||
self._continuousPolling = _ContinuousPolling(self)
|
||||
posixbase.PosixReactorBase.__init__(self)
|
||||
|
||||
|
||||
def _add(self, xer, primary, other, selectables, event, antievent):
|
||||
"""
|
||||
Private method for adding a descriptor from the event loop.
|
||||
|
||||
It takes care of adding it if new or modifying it if already added
|
||||
for another state (read -> read/write for example).
|
||||
"""
|
||||
fd = xer.fileno()
|
||||
if fd not in primary:
|
||||
flags = event
|
||||
# epoll_ctl can raise all kinds of IOErrors, and every one
|
||||
# indicates a bug either in the reactor or application-code.
|
||||
# Let them all through so someone sees a traceback and fixes
|
||||
# something. We'll do the same thing for every other call to
|
||||
# this method in this file.
|
||||
if fd in other:
|
||||
flags |= antievent
|
||||
self._poller.modify(fd, flags)
|
||||
else:
|
||||
self._poller.register(fd, flags)
|
||||
|
||||
# Update our own tracking state *only* after the epoll call has
|
||||
# succeeded. Otherwise we may get out of sync.
|
||||
primary.add(fd)
|
||||
selectables[fd] = xer
|
||||
|
||||
|
||||
def addReader(self, reader):
|
||||
"""
|
||||
Add a FileDescriptor for notification of data available to read.
|
||||
"""
|
||||
try:
|
||||
self._add(reader, self._reads, self._writes, self._selectables,
|
||||
EPOLLIN, EPOLLOUT)
|
||||
except IOError as e:
|
||||
if e.errno == errno.EPERM:
|
||||
# epoll(7) doesn't support certain file descriptors,
|
||||
# e.g. filesystem files, so for those we just poll
|
||||
# continuously:
|
||||
self._continuousPolling.addReader(reader)
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def addWriter(self, writer):
|
||||
"""
|
||||
Add a FileDescriptor for notification of data available to write.
|
||||
"""
|
||||
try:
|
||||
self._add(writer, self._writes, self._reads, self._selectables,
|
||||
EPOLLOUT, EPOLLIN)
|
||||
except IOError as e:
|
||||
if e.errno == errno.EPERM:
|
||||
# epoll(7) doesn't support certain file descriptors,
|
||||
# e.g. filesystem files, so for those we just poll
|
||||
# continuously:
|
||||
self._continuousPolling.addWriter(writer)
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def _remove(self, xer, primary, other, selectables, event, antievent):
|
||||
"""
|
||||
Private method for removing a descriptor from the event loop.
|
||||
|
||||
It does the inverse job of _add, and also add a check in case of the fd
|
||||
has gone away.
|
||||
"""
|
||||
fd = xer.fileno()
|
||||
if fd == -1:
|
||||
for fd, fdes in selectables.items():
|
||||
if xer is fdes:
|
||||
break
|
||||
else:
|
||||
return
|
||||
if fd in primary:
|
||||
if fd in other:
|
||||
flags = antievent
|
||||
# See comment above modify call in _add.
|
||||
self._poller.modify(fd, flags)
|
||||
else:
|
||||
del selectables[fd]
|
||||
# See comment above _control call in _add.
|
||||
self._poller.unregister(fd)
|
||||
primary.remove(fd)
|
||||
|
||||
|
||||
def removeReader(self, reader):
|
||||
"""
|
||||
Remove a Selectable for notification of data available to read.
|
||||
"""
|
||||
if self._continuousPolling.isReading(reader):
|
||||
self._continuousPolling.removeReader(reader)
|
||||
return
|
||||
self._remove(reader, self._reads, self._writes, self._selectables,
|
||||
EPOLLIN, EPOLLOUT)
|
||||
|
||||
|
||||
def removeWriter(self, writer):
|
||||
"""
|
||||
Remove a Selectable for notification of data available to write.
|
||||
"""
|
||||
if self._continuousPolling.isWriting(writer):
|
||||
self._continuousPolling.removeWriter(writer)
|
||||
return
|
||||
self._remove(writer, self._writes, self._reads, self._selectables,
|
||||
EPOLLOUT, EPOLLIN)
|
||||
|
||||
|
||||
def removeAll(self):
|
||||
"""
|
||||
Remove all selectables, and return a list of them.
|
||||
"""
|
||||
return (self._removeAll(
|
||||
[self._selectables[fd] for fd in self._reads],
|
||||
[self._selectables[fd] for fd in self._writes]) +
|
||||
self._continuousPolling.removeAll())
|
||||
|
||||
|
||||
def getReaders(self):
|
||||
return ([self._selectables[fd] for fd in self._reads] +
|
||||
self._continuousPolling.getReaders())
|
||||
|
||||
|
||||
def getWriters(self):
|
||||
return ([self._selectables[fd] for fd in self._writes] +
|
||||
self._continuousPolling.getWriters())
|
||||
|
||||
|
||||
def doPoll(self, timeout):
|
||||
"""
|
||||
Poll the poller for new events.
|
||||
"""
|
||||
if timeout is None:
|
||||
timeout = -1 # Wait indefinitely.
|
||||
|
||||
try:
|
||||
# Limit the number of events to the number of io objects we're
|
||||
# currently tracking (because that's maybe a good heuristic) and
|
||||
# the amount of time we block to the value specified by our
|
||||
# caller.
|
||||
l = self._poller.poll(timeout, len(self._selectables))
|
||||
except IOError as err:
|
||||
if err.errno == errno.EINTR:
|
||||
return
|
||||
# See epoll_wait(2) for documentation on the other conditions
|
||||
# under which this can fail. They can only be due to a serious
|
||||
# programming error on our part, so let's just announce them
|
||||
# loudly.
|
||||
raise
|
||||
|
||||
_drdw = self._doReadOrWrite
|
||||
for fd, event in l:
|
||||
try:
|
||||
selectable = self._selectables[fd]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
log.callWithLogger(selectable, _drdw, selectable, fd, event)
|
||||
|
||||
doIteration = doPoll
|
||||
|
||||
|
||||
def install():
|
||||
"""
|
||||
Install the epoll() reactor.
|
||||
"""
|
||||
p = EPollReactor()
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(p)
|
||||
|
||||
|
||||
__all__ = ["EPollReactor", "install"]
|
||||
498
Linux_i686/lib/python2.7/site-packages/twisted/internet/error.py
Normal file
498
Linux_i686/lib/python2.7/site-packages/twisted/internet/error.py
Normal file
|
|
@ -0,0 +1,498 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Exceptions and errors for use in twisted.internet modules.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import socket
|
||||
|
||||
from twisted.python import deprecate
|
||||
from twisted.python.versions import Version
|
||||
|
||||
|
||||
|
||||
class BindError(Exception):
|
||||
"""An error occurred binding to an interface"""
|
||||
|
||||
def __str__(self):
|
||||
s = self.__doc__
|
||||
if self.args:
|
||||
s = '%s: %s' % (s, ' '.join(self.args))
|
||||
s = '%s.' % s
|
||||
return s
|
||||
|
||||
|
||||
|
||||
class CannotListenError(BindError):
|
||||
"""
|
||||
This gets raised by a call to startListening, when the object cannotstart
|
||||
listening.
|
||||
|
||||
@ivar interface: the interface I tried to listen on
|
||||
@ivar port: the port I tried to listen on
|
||||
@ivar socketError: the exception I got when I tried to listen
|
||||
@type socketError: L{socket.error}
|
||||
"""
|
||||
def __init__(self, interface, port, socketError):
|
||||
BindError.__init__(self, interface, port, socketError)
|
||||
self.interface = interface
|
||||
self.port = port
|
||||
self.socketError = socketError
|
||||
|
||||
def __str__(self):
|
||||
iface = self.interface or 'any'
|
||||
return "Couldn't listen on %s:%s: %s." % (iface, self.port,
|
||||
self.socketError)
|
||||
|
||||
|
||||
|
||||
class MulticastJoinError(Exception):
|
||||
"""
|
||||
An attempt to join a multicast group failed.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class MessageLengthError(Exception):
|
||||
"""Message is too long to send"""
|
||||
|
||||
def __str__(self):
|
||||
s = self.__doc__
|
||||
if self.args:
|
||||
s = '%s: %s' % (s, ' '.join(self.args))
|
||||
s = '%s.' % s
|
||||
return s
|
||||
|
||||
|
||||
|
||||
class DNSLookupError(IOError):
|
||||
"""DNS lookup failed"""
|
||||
|
||||
def __str__(self):
|
||||
s = self.__doc__
|
||||
if self.args:
|
||||
s = '%s: %s' % (s, ' '.join(self.args))
|
||||
s = '%s.' % s
|
||||
return s
|
||||
|
||||
|
||||
|
||||
class ConnectInProgressError(Exception):
|
||||
"""A connect operation was started and isn't done yet."""
|
||||
|
||||
|
||||
# connection errors
|
||||
|
||||
class ConnectError(Exception):
|
||||
"""An error occurred while connecting"""
|
||||
|
||||
def __init__(self, osError=None, string=""):
|
||||
self.osError = osError
|
||||
Exception.__init__(self, string)
|
||||
|
||||
def __str__(self):
|
||||
s = self.__doc__ or self.__class__.__name__
|
||||
if self.osError:
|
||||
s = '%s: %s' % (s, self.osError)
|
||||
if self.args[0]:
|
||||
s = '%s: %s' % (s, self.args[0])
|
||||
s = '%s.' % s
|
||||
return s
|
||||
|
||||
|
||||
|
||||
class ConnectBindError(ConnectError):
|
||||
"""Couldn't bind"""
|
||||
|
||||
|
||||
|
||||
class UnknownHostError(ConnectError):
|
||||
"""Hostname couldn't be looked up"""
|
||||
|
||||
|
||||
|
||||
class NoRouteError(ConnectError):
|
||||
"""No route to host"""
|
||||
|
||||
|
||||
|
||||
class ConnectionRefusedError(ConnectError):
|
||||
"""Connection was refused by other side"""
|
||||
|
||||
|
||||
|
||||
class TCPTimedOutError(ConnectError):
|
||||
"""TCP connection timed out"""
|
||||
|
||||
|
||||
|
||||
class BadFileError(ConnectError):
|
||||
"""File used for UNIX socket is no good"""
|
||||
|
||||
|
||||
|
||||
class ServiceNameUnknownError(ConnectError):
|
||||
"""Service name given as port is unknown"""
|
||||
|
||||
|
||||
|
||||
class UserError(ConnectError):
|
||||
"""User aborted connection"""
|
||||
|
||||
|
||||
|
||||
class TimeoutError(UserError):
|
||||
"""User timeout caused connection failure"""
|
||||
|
||||
|
||||
|
||||
class SSLError(ConnectError):
|
||||
"""An SSL error occurred"""
|
||||
|
||||
|
||||
|
||||
class VerifyError(Exception):
|
||||
"""Could not verify something that was supposed to be signed.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class PeerVerifyError(VerifyError):
|
||||
"""The peer rejected our verify error.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class CertificateError(Exception):
|
||||
"""
|
||||
We did not find a certificate where we expected to find one.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
try:
|
||||
import errno
|
||||
errnoMapping = {
|
||||
errno.ENETUNREACH: NoRouteError,
|
||||
errno.ECONNREFUSED: ConnectionRefusedError,
|
||||
errno.ETIMEDOUT: TCPTimedOutError,
|
||||
}
|
||||
if hasattr(errno, "WSAECONNREFUSED"):
|
||||
errnoMapping[errno.WSAECONNREFUSED] = ConnectionRefusedError
|
||||
errnoMapping[errno.WSAENETUNREACH] = NoRouteError
|
||||
except ImportError:
|
||||
errnoMapping = {}
|
||||
|
||||
|
||||
|
||||
def getConnectError(e):
|
||||
"""Given a socket exception, return connection error."""
|
||||
if isinstance(e, Exception):
|
||||
args = e.args
|
||||
else:
|
||||
args = e
|
||||
try:
|
||||
number, string = args
|
||||
except ValueError:
|
||||
return ConnectError(string=e)
|
||||
|
||||
if hasattr(socket, 'gaierror') and isinstance(e, socket.gaierror):
|
||||
# Only works in 2.2 in newer. Really that means always; #5978 covers
|
||||
# this and other wierdnesses in this function.
|
||||
klass = UnknownHostError
|
||||
else:
|
||||
klass = errnoMapping.get(number, ConnectError)
|
||||
return klass(number, string)
|
||||
|
||||
|
||||
|
||||
class ConnectionClosed(Exception):
|
||||
"""
|
||||
Connection was closed, whether cleanly or non-cleanly.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class ConnectionLost(ConnectionClosed):
|
||||
"""Connection to the other side was lost in a non-clean fashion"""
|
||||
|
||||
def __str__(self):
|
||||
s = self.__doc__.strip().splitlines()[0]
|
||||
if self.args:
|
||||
s = '%s: %s' % (s, ' '.join(self.args))
|
||||
s = '%s.' % s
|
||||
return s
|
||||
|
||||
|
||||
|
||||
class ConnectionAborted(ConnectionLost):
|
||||
"""
|
||||
Connection was aborted locally, using
|
||||
L{twisted.internet.interfaces.ITCPTransport.abortConnection}.
|
||||
|
||||
@since: 11.1
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class ConnectionDone(ConnectionClosed):
|
||||
"""Connection was closed cleanly"""
|
||||
|
||||
def __str__(self):
|
||||
s = self.__doc__
|
||||
if self.args:
|
||||
s = '%s: %s' % (s, ' '.join(self.args))
|
||||
s = '%s.' % s
|
||||
return s
|
||||
|
||||
|
||||
|
||||
class FileDescriptorOverrun(ConnectionLost):
|
||||
"""
|
||||
A mis-use of L{IUNIXTransport.sendFileDescriptor} caused the connection to
|
||||
be closed.
|
||||
|
||||
Each file descriptor sent using C{sendFileDescriptor} must be associated
|
||||
with at least one byte sent using L{ITransport.write}. If at any point
|
||||
fewer bytes have been written than file descriptors have been sent, the
|
||||
connection is closed with this exception.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class ConnectionFdescWentAway(ConnectionLost):
|
||||
"""Uh""" #TODO
|
||||
|
||||
|
||||
|
||||
class AlreadyCalled(ValueError):
|
||||
"""Tried to cancel an already-called event"""
|
||||
|
||||
def __str__(self):
|
||||
s = self.__doc__
|
||||
if self.args:
|
||||
s = '%s: %s' % (s, ' '.join(self.args))
|
||||
s = '%s.' % s
|
||||
return s
|
||||
|
||||
|
||||
|
||||
class AlreadyCancelled(ValueError):
|
||||
"""Tried to cancel an already-cancelled event"""
|
||||
|
||||
def __str__(self):
|
||||
s = self.__doc__
|
||||
if self.args:
|
||||
s = '%s: %s' % (s, ' '.join(self.args))
|
||||
s = '%s.' % s
|
||||
return s
|
||||
|
||||
|
||||
|
||||
class PotentialZombieWarning(Warning):
|
||||
"""
|
||||
Emitted when L{IReactorProcess.spawnProcess} is called in a way which may
|
||||
result in termination of the created child process not being reported.
|
||||
|
||||
Deprecated in Twisted 10.0.
|
||||
"""
|
||||
MESSAGE = (
|
||||
"spawnProcess called, but the SIGCHLD handler is not "
|
||||
"installed. This probably means you have not yet "
|
||||
"called reactor.run, or called "
|
||||
"reactor.run(installSignalHandler=0). You will probably "
|
||||
"never see this process finish, and it may become a "
|
||||
"zombie process.")
|
||||
|
||||
deprecate.deprecatedModuleAttribute(
|
||||
Version("Twisted", 10, 0, 0),
|
||||
"There is no longer any potential for zombie process.",
|
||||
__name__,
|
||||
"PotentialZombieWarning")
|
||||
|
||||
|
||||
|
||||
class ProcessDone(ConnectionDone):
|
||||
"""A process has ended without apparent errors"""
|
||||
|
||||
def __init__(self, status):
|
||||
Exception.__init__(self, "process finished with exit code 0")
|
||||
self.exitCode = 0
|
||||
self.signal = None
|
||||
self.status = status
|
||||
|
||||
|
||||
|
||||
class ProcessTerminated(ConnectionLost):
|
||||
"""
|
||||
A process has ended with a probable error condition
|
||||
|
||||
@ivar exitCode: See L{__init__}
|
||||
@ivar signal: See L{__init__}
|
||||
@ivar status: See L{__init__}
|
||||
"""
|
||||
def __init__(self, exitCode=None, signal=None, status=None):
|
||||
"""
|
||||
@param exitCode: The exit status of the process. This is roughly like
|
||||
the value you might pass to L{os.exit}. This is L{None} if the
|
||||
process exited due to a signal.
|
||||
@type exitCode: L{int} or L{types.NoneType}
|
||||
|
||||
@param signal: The exit signal of the process. This is L{None} if the
|
||||
process did not exit due to a signal.
|
||||
@type signal: L{int} or L{types.NoneType}
|
||||
|
||||
@param status: The exit code of the process. This is a platform
|
||||
specific combination of the exit code and the exit signal. See
|
||||
L{os.WIFEXITED} and related functions.
|
||||
@type status: L{int}
|
||||
"""
|
||||
self.exitCode = exitCode
|
||||
self.signal = signal
|
||||
self.status = status
|
||||
s = "process ended"
|
||||
if exitCode is not None: s = s + " with exit code %s" % exitCode
|
||||
if signal is not None: s = s + " by signal %s" % signal
|
||||
Exception.__init__(self, s)
|
||||
|
||||
|
||||
|
||||
class ProcessExitedAlready(Exception):
|
||||
"""
|
||||
The process has already exited and the operation requested can no longer
|
||||
be performed.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class NotConnectingError(RuntimeError):
|
||||
"""The Connector was not connecting when it was asked to stop connecting"""
|
||||
|
||||
def __str__(self):
|
||||
s = self.__doc__
|
||||
if self.args:
|
||||
s = '%s: %s' % (s, ' '.join(self.args))
|
||||
s = '%s.' % s
|
||||
return s
|
||||
|
||||
|
||||
|
||||
class NotListeningError(RuntimeError):
|
||||
"""The Port was not listening when it was asked to stop listening"""
|
||||
|
||||
def __str__(self):
|
||||
s = self.__doc__
|
||||
if self.args:
|
||||
s = '%s: %s' % (s, ' '.join(self.args))
|
||||
s = '%s.' % s
|
||||
return s
|
||||
|
||||
|
||||
|
||||
class ReactorNotRunning(RuntimeError):
|
||||
"""
|
||||
Error raised when trying to stop a reactor which is not running.
|
||||
"""
|
||||
|
||||
|
||||
class ReactorNotRestartable(RuntimeError):
|
||||
"""
|
||||
Error raised when trying to run a reactor which was stopped.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class ReactorAlreadyRunning(RuntimeError):
|
||||
"""
|
||||
Error raised when trying to start the reactor multiple times.
|
||||
"""
|
||||
|
||||
|
||||
class ReactorAlreadyInstalledError(AssertionError):
|
||||
"""
|
||||
Could not install reactor because one is already installed.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class ConnectingCancelledError(Exception):
|
||||
"""
|
||||
An C{Exception} that will be raised when an L{IStreamClientEndpoint} is
|
||||
cancelled before it connects.
|
||||
|
||||
@ivar address: The L{IAddress} that is the destination of the
|
||||
cancelled L{IStreamClientEndpoint}.
|
||||
"""
|
||||
|
||||
def __init__(self, address):
|
||||
"""
|
||||
@param address: The L{IAddress} that is the destination of the
|
||||
L{IStreamClientEndpoint} that was cancelled.
|
||||
"""
|
||||
Exception.__init__(self, address)
|
||||
self.address = address
|
||||
|
||||
|
||||
|
||||
class UnsupportedAddressFamily(Exception):
|
||||
"""
|
||||
An attempt was made to use a socket with an address family (eg I{AF_INET},
|
||||
I{AF_INET6}, etc) which is not supported by the reactor.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class UnsupportedSocketType(Exception):
|
||||
"""
|
||||
An attempt was made to use a socket of a type (eg I{SOCK_STREAM},
|
||||
I{SOCK_DGRAM}, etc) which is not supported by the reactor.
|
||||
"""
|
||||
|
||||
|
||||
class AlreadyListened(Exception):
|
||||
"""
|
||||
An attempt was made to listen on a file descriptor which can only be
|
||||
listened on once.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class InvalidAddressError(ValueError):
|
||||
"""
|
||||
An invalid address was specified (i.e. neither IPv4 or IPv6, or expected
|
||||
one and got the other).
|
||||
|
||||
@ivar address: See L{__init__}
|
||||
@ivar message: See L{__init__}
|
||||
"""
|
||||
|
||||
def __init__(self, address, message):
|
||||
"""
|
||||
@param address: The address that was provided.
|
||||
@type address: L{bytes}
|
||||
@param message: A native string of additional information provided by
|
||||
the calling context.
|
||||
@type address: L{str}
|
||||
"""
|
||||
self.address = address
|
||||
self.message = message
|
||||
|
||||
|
||||
|
||||
__all__ = [
|
||||
'BindError', 'CannotListenError', 'MulticastJoinError',
|
||||
'MessageLengthError', 'DNSLookupError', 'ConnectInProgressError',
|
||||
'ConnectError', 'ConnectBindError', 'UnknownHostError', 'NoRouteError',
|
||||
'ConnectionRefusedError', 'TCPTimedOutError', 'BadFileError',
|
||||
'ServiceNameUnknownError', 'UserError', 'TimeoutError', 'SSLError',
|
||||
'VerifyError', 'PeerVerifyError', 'CertificateError',
|
||||
'getConnectError', 'ConnectionClosed', 'ConnectionLost',
|
||||
'ConnectionDone', 'ConnectionFdescWentAway', 'AlreadyCalled',
|
||||
'AlreadyCancelled', 'PotentialZombieWarning', 'ProcessDone',
|
||||
'ProcessTerminated', 'ProcessExitedAlready', 'NotConnectingError',
|
||||
'NotListeningError', 'ReactorNotRunning', 'ReactorAlreadyRunning',
|
||||
'ReactorAlreadyInstalledError', 'ConnectingCancelledError',
|
||||
'UnsupportedAddressFamily', 'UnsupportedSocketType', 'InvalidAddressError']
|
||||
118
Linux_i686/lib/python2.7/site-packages/twisted/internet/fdesc.py
Normal file
118
Linux_i686/lib/python2.7/site-packages/twisted/internet/fdesc.py
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
# -*- test-case-name: twisted.test.test_fdesc -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
"""
|
||||
Utility functions for dealing with POSIX file descriptors.
|
||||
"""
|
||||
|
||||
import os
|
||||
import errno
|
||||
try:
|
||||
import fcntl
|
||||
except ImportError:
|
||||
fcntl = None
|
||||
|
||||
# twisted imports
|
||||
from twisted.internet.main import CONNECTION_LOST, CONNECTION_DONE
|
||||
|
||||
|
||||
def setNonBlocking(fd):
|
||||
"""
|
||||
Set the file description of the given file descriptor to non-blocking.
|
||||
"""
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
|
||||
flags = flags | os.O_NONBLOCK
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
||||
|
||||
|
||||
def setBlocking(fd):
|
||||
"""
|
||||
Set the file description of the given file descriptor to blocking.
|
||||
"""
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
|
||||
flags = flags & ~os.O_NONBLOCK
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
||||
|
||||
|
||||
if fcntl is None:
|
||||
# fcntl isn't available on Windows. By default, handles aren't
|
||||
# inherited on Windows, so we can do nothing here.
|
||||
_setCloseOnExec = _unsetCloseOnExec = lambda fd: None
|
||||
else:
|
||||
def _setCloseOnExec(fd):
|
||||
"""
|
||||
Make a file descriptor close-on-exec.
|
||||
"""
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
|
||||
flags = flags | fcntl.FD_CLOEXEC
|
||||
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
|
||||
|
||||
|
||||
def _unsetCloseOnExec(fd):
|
||||
"""
|
||||
Make a file descriptor close-on-exec.
|
||||
"""
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
|
||||
flags = flags & ~fcntl.FD_CLOEXEC
|
||||
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
|
||||
|
||||
|
||||
def readFromFD(fd, callback):
|
||||
"""
|
||||
Read from file descriptor, calling callback with resulting data.
|
||||
|
||||
If successful, call 'callback' with a single argument: the
|
||||
resulting data.
|
||||
|
||||
Returns same thing FileDescriptor.doRead would: CONNECTION_LOST,
|
||||
CONNECTION_DONE, or None.
|
||||
|
||||
@type fd: C{int}
|
||||
@param fd: non-blocking file descriptor to be read from.
|
||||
@param callback: a callable which accepts a single argument. If
|
||||
data is read from the file descriptor it will be called with this
|
||||
data. Handling exceptions from calling the callback is up to the
|
||||
caller.
|
||||
|
||||
Note that if the descriptor is still connected but no data is read,
|
||||
None will be returned but callback will not be called.
|
||||
|
||||
@return: CONNECTION_LOST on error, CONNECTION_DONE when fd is
|
||||
closed, otherwise None.
|
||||
"""
|
||||
try:
|
||||
output = os.read(fd, 8192)
|
||||
except (OSError, IOError) as ioe:
|
||||
if ioe.args[0] in (errno.EAGAIN, errno.EINTR):
|
||||
return
|
||||
else:
|
||||
return CONNECTION_LOST
|
||||
if not output:
|
||||
return CONNECTION_DONE
|
||||
callback(output)
|
||||
|
||||
|
||||
def writeToFD(fd, data):
|
||||
"""
|
||||
Write data to file descriptor.
|
||||
|
||||
Returns same thing FileDescriptor.writeSomeData would.
|
||||
|
||||
@type fd: C{int}
|
||||
@param fd: non-blocking file descriptor to be written to.
|
||||
@type data: C{str} or C{buffer}
|
||||
@param data: bytes to write to fd.
|
||||
|
||||
@return: number of bytes written, or CONNECTION_LOST.
|
||||
"""
|
||||
try:
|
||||
return os.write(fd, data)
|
||||
except (OSError, IOError) as io:
|
||||
if io.errno in (errno.EAGAIN, errno.EINTR):
|
||||
return 0
|
||||
return CONNECTION_LOST
|
||||
|
||||
|
||||
__all__ = ["setNonBlocking", "setBlocking", "readFromFD", "writeToFD"]
|
||||
|
|
@ -0,0 +1,188 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
This module provides support for Twisted to interact with the glib
|
||||
mainloop via GObject Introspection.
|
||||
|
||||
In order to use this support, simply do the following::
|
||||
|
||||
from twisted.internet import gireactor
|
||||
gireactor.install()
|
||||
|
||||
If you wish to use a GApplication, register it with the reactor::
|
||||
|
||||
from twisted.internet import reactor
|
||||
reactor.registerGApplication(app)
|
||||
|
||||
Then use twisted.internet APIs as usual.
|
||||
|
||||
On Python 3, pygobject v3.4 or later is required.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
from twisted.python.compat import _PY3
|
||||
from twisted.internet.error import ReactorAlreadyRunning
|
||||
from twisted.internet import _glibbase
|
||||
from twisted.python import runtime
|
||||
|
||||
if _PY3:
|
||||
# We require a sufficiently new version of pygobject, so always exists:
|
||||
_pygtkcompatPresent = True
|
||||
else:
|
||||
# We can't just try to import gi.pygtkcompat, because that would import
|
||||
# gi, and the goal here is to not import gi in cases where that would
|
||||
# cause segfault.
|
||||
from twisted.python.modules import theSystemPath
|
||||
_pygtkcompatPresent = True
|
||||
try:
|
||||
theSystemPath["gi.pygtkcompat"]
|
||||
except KeyError:
|
||||
_pygtkcompatPresent = False
|
||||
|
||||
|
||||
# Modules that we want to ensure aren't imported if we're on older versions of
|
||||
# GI:
|
||||
_PYGTK_MODULES = ['gobject', 'glib', 'gio', 'gtk']
|
||||
|
||||
def _oldGiInit():
|
||||
"""
|
||||
Make sure pygtk and gi aren't loaded at the same time, and import Glib if
|
||||
possible.
|
||||
"""
|
||||
# We can't immediately prevent imports, because that confuses some buggy
|
||||
# code in gi:
|
||||
_glibbase.ensureNotImported(
|
||||
_PYGTK_MODULES,
|
||||
"Introspected and static glib/gtk bindings must not be mixed; can't "
|
||||
"import gireactor since pygtk2 module is already imported.")
|
||||
|
||||
global GLib
|
||||
from gi.repository import GLib
|
||||
if getattr(GLib, "threads_init", None) is not None:
|
||||
GLib.threads_init()
|
||||
|
||||
_glibbase.ensureNotImported([], "",
|
||||
preventImports=_PYGTK_MODULES)
|
||||
|
||||
|
||||
if not _pygtkcompatPresent:
|
||||
# Older versions of gi don't have compatability layer, so just enforce no
|
||||
# imports of pygtk and gi at same time:
|
||||
_oldGiInit()
|
||||
else:
|
||||
# Newer version of gi, so we can try to initialize compatibility layer; if
|
||||
# real pygtk was already imported we'll get ImportError at this point
|
||||
# rather than segfault, so unconditional import is fine.
|
||||
import gi.pygtkcompat
|
||||
gi.pygtkcompat.enable()
|
||||
# At this point importing gobject will get you gi version, and importing
|
||||
# e.g. gtk will either fail in non-segfaulty way or use gi version if user
|
||||
# does gi.pygtkcompat.enable_gtk(). So, no need to prevent imports of
|
||||
# old school pygtk modules.
|
||||
from gi.repository import GLib
|
||||
if getattr(GLib, "threads_init", None) is not None:
|
||||
GLib.threads_init()
|
||||
|
||||
|
||||
|
||||
class GIReactor(_glibbase.GlibReactorBase):
|
||||
"""
|
||||
GObject-introspection event loop reactor.
|
||||
|
||||
@ivar _gapplication: A C{Gio.Application} instance that was registered
|
||||
with C{registerGApplication}.
|
||||
"""
|
||||
_POLL_DISCONNECTED = (GLib.IOCondition.HUP | GLib.IOCondition.ERR |
|
||||
GLib.IOCondition.NVAL)
|
||||
_POLL_IN = GLib.IOCondition.IN
|
||||
_POLL_OUT = GLib.IOCondition.OUT
|
||||
|
||||
# glib's iochannel sources won't tell us about any events that we haven't
|
||||
# asked for, even if those events aren't sensible inputs to the poll()
|
||||
# call.
|
||||
INFLAGS = _POLL_IN | _POLL_DISCONNECTED
|
||||
OUTFLAGS = _POLL_OUT | _POLL_DISCONNECTED
|
||||
|
||||
# By default no Application is registered:
|
||||
_gapplication = None
|
||||
|
||||
|
||||
def __init__(self, useGtk=False):
|
||||
_gtk = None
|
||||
if useGtk is True:
|
||||
from gi.repository import Gtk as _gtk
|
||||
|
||||
_glibbase.GlibReactorBase.__init__(self, GLib, _gtk, useGtk=useGtk)
|
||||
|
||||
|
||||
def registerGApplication(self, app):
|
||||
"""
|
||||
Register a C{Gio.Application} or C{Gtk.Application}, whose main loop
|
||||
will be used instead of the default one.
|
||||
|
||||
We will C{hold} the application so it doesn't exit on its own. In
|
||||
versions of C{python-gi} 3.2 and later, we exit the event loop using
|
||||
the C{app.quit} method which overrides any holds. Older versions are
|
||||
not supported.
|
||||
"""
|
||||
if self._gapplication is not None:
|
||||
raise RuntimeError(
|
||||
"Can't register more than one application instance.")
|
||||
if self._started:
|
||||
raise ReactorAlreadyRunning(
|
||||
"Can't register application after reactor was started.")
|
||||
if not hasattr(app, "quit"):
|
||||
raise RuntimeError("Application registration is not supported in"
|
||||
" versions of PyGObject prior to 3.2.")
|
||||
self._gapplication = app
|
||||
def run():
|
||||
app.hold()
|
||||
app.run(None)
|
||||
self._run = run
|
||||
|
||||
self._crash = app.quit
|
||||
|
||||
|
||||
|
||||
class PortableGIReactor(_glibbase.PortableGlibReactorBase):
|
||||
"""
|
||||
Portable GObject Introspection event loop reactor.
|
||||
"""
|
||||
def __init__(self, useGtk=False):
|
||||
_gtk = None
|
||||
if useGtk is True:
|
||||
from gi.repository import Gtk as _gtk
|
||||
|
||||
_glibbase.PortableGlibReactorBase.__init__(self, GLib, _gtk,
|
||||
useGtk=useGtk)
|
||||
|
||||
|
||||
def registerGApplication(self, app):
|
||||
"""
|
||||
Register a C{Gio.Application} or C{Gtk.Application}, whose main loop
|
||||
will be used instead of the default one.
|
||||
"""
|
||||
raise NotImplementedError("GApplication is not currently supported on Windows.")
|
||||
|
||||
|
||||
|
||||
def install(useGtk=False):
|
||||
"""
|
||||
Configure the twisted mainloop to be run inside the glib mainloop.
|
||||
|
||||
@param useGtk: should GTK+ rather than glib event loop be
|
||||
used (this will be slightly slower but does support GUI).
|
||||
"""
|
||||
if runtime.platform.getType() == 'posix':
|
||||
reactor = GIReactor(useGtk=useGtk)
|
||||
else:
|
||||
reactor = PortableGIReactor(useGtk=useGtk)
|
||||
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(reactor)
|
||||
return reactor
|
||||
|
||||
|
||||
__all__ = ['install']
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
This module provides support for Twisted to interact with the glib mainloop.
|
||||
This is like gtk2, but slightly faster and does not require a working
|
||||
$DISPLAY. However, you cannot run GUIs under this reactor: for that you must
|
||||
use the gtk2reactor instead.
|
||||
|
||||
In order to use this support, simply do the following::
|
||||
|
||||
from twisted.internet import glib2reactor
|
||||
glib2reactor.install()
|
||||
|
||||
Then use twisted.internet APIs as usual. The other methods here are not
|
||||
intended to be called directly.
|
||||
"""
|
||||
|
||||
from twisted.internet import gtk2reactor
|
||||
|
||||
|
||||
class Glib2Reactor(gtk2reactor.Gtk2Reactor):
|
||||
"""
|
||||
The reactor using the glib mainloop.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Override init to set the C{useGtk} flag.
|
||||
"""
|
||||
gtk2reactor.Gtk2Reactor.__init__(self, useGtk=False)
|
||||
|
||||
|
||||
|
||||
def install():
|
||||
"""
|
||||
Configure the twisted mainloop to be run inside the glib mainloop.
|
||||
"""
|
||||
reactor = Glib2Reactor()
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(reactor)
|
||||
|
||||
|
||||
__all__ = ['install']
|
||||
|
|
@ -0,0 +1,119 @@
|
|||
# -*- test-case-name: twisted.internet.test -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
"""
|
||||
This module provides support for Twisted to interact with the glib/gtk2
|
||||
mainloop.
|
||||
|
||||
In order to use this support, simply do the following::
|
||||
|
||||
from twisted.internet import gtk2reactor
|
||||
gtk2reactor.install()
|
||||
|
||||
Then use twisted.internet APIs as usual. The other methods here are not
|
||||
intended to be called directly.
|
||||
"""
|
||||
|
||||
# System Imports
|
||||
import sys
|
||||
|
||||
# Twisted Imports
|
||||
from twisted.internet import _glibbase
|
||||
from twisted.python import runtime
|
||||
|
||||
# Certain old versions of pygtk and gi crash if imported at the same
|
||||
# time. This is a problem when running Twisted's unit tests, since they will
|
||||
# attempt to run both gtk2 and gtk3/gi tests. However, gireactor makes sure
|
||||
# that if we are in such an old version, and gireactor was imported,
|
||||
# gtk2reactor will not be importable. So we don't *need* to enforce that here
|
||||
# as well; whichever is imported first will still win. Moreover, additional
|
||||
# enforcement in this module is unnecessary in modern versions, and downright
|
||||
# problematic in certain versions where for some reason importing gtk also
|
||||
# imports some subset of gi. So we do nothing here, relying on gireactor to
|
||||
# prevent the crash.
|
||||
|
||||
try:
|
||||
if not hasattr(sys, 'frozen'):
|
||||
# Don't want to check this for py2exe
|
||||
import pygtk
|
||||
pygtk.require('2.0')
|
||||
except (ImportError, AttributeError):
|
||||
pass # maybe we're using pygtk before this hack existed.
|
||||
|
||||
import gobject
|
||||
if hasattr(gobject, "threads_init"):
|
||||
# recent versions of python-gtk expose this. python-gtk=2.4.1
|
||||
# (wrapping glib-2.4.7) does. python-gtk=2.0.0 (wrapping
|
||||
# glib-2.2.3) does not.
|
||||
gobject.threads_init()
|
||||
|
||||
|
||||
|
||||
class Gtk2Reactor(_glibbase.GlibReactorBase):
|
||||
"""
|
||||
PyGTK+ 2 event loop reactor.
|
||||
"""
|
||||
_POLL_DISCONNECTED = gobject.IO_HUP | gobject.IO_ERR | gobject.IO_NVAL
|
||||
_POLL_IN = gobject.IO_IN
|
||||
_POLL_OUT = gobject.IO_OUT
|
||||
|
||||
# glib's iochannel sources won't tell us about any events that we haven't
|
||||
# asked for, even if those events aren't sensible inputs to the poll()
|
||||
# call.
|
||||
INFLAGS = _POLL_IN | _POLL_DISCONNECTED
|
||||
OUTFLAGS = _POLL_OUT | _POLL_DISCONNECTED
|
||||
|
||||
def __init__(self, useGtk=True):
|
||||
_gtk = None
|
||||
if useGtk is True:
|
||||
import gtk as _gtk
|
||||
|
||||
_glibbase.GlibReactorBase.__init__(self, gobject, _gtk, useGtk=useGtk)
|
||||
|
||||
|
||||
|
||||
class PortableGtkReactor(_glibbase.PortableGlibReactorBase):
|
||||
"""
|
||||
Reactor that works on Windows.
|
||||
|
||||
Sockets aren't supported by GTK+'s input_add on Win32.
|
||||
"""
|
||||
def __init__(self, useGtk=True):
|
||||
_gtk = None
|
||||
if useGtk is True:
|
||||
import gtk as _gtk
|
||||
|
||||
_glibbase.PortableGlibReactorBase.__init__(self, gobject, _gtk,
|
||||
useGtk=useGtk)
|
||||
|
||||
|
||||
def install(useGtk=True):
|
||||
"""
|
||||
Configure the twisted mainloop to be run inside the gtk mainloop.
|
||||
|
||||
@param useGtk: should glib rather than GTK+ event loop be
|
||||
used (this will be slightly faster but does not support GUI).
|
||||
"""
|
||||
reactor = Gtk2Reactor(useGtk)
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(reactor)
|
||||
return reactor
|
||||
|
||||
|
||||
def portableInstall(useGtk=True):
|
||||
"""
|
||||
Configure the twisted mainloop to be run inside the gtk mainloop.
|
||||
"""
|
||||
reactor = PortableGtkReactor()
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(reactor)
|
||||
return reactor
|
||||
|
||||
|
||||
if runtime.platform.getType() != 'posix':
|
||||
install = portableInstall
|
||||
|
||||
|
||||
__all__ = ['install']
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
This module provides support for Twisted to interact with the gtk3 mainloop
|
||||
via Gobject introspection. This is like gi, but slightly slower and requires a
|
||||
working $DISPLAY.
|
||||
|
||||
In order to use this support, simply do the following::
|
||||
|
||||
from twisted.internet import gtk3reactor
|
||||
gtk3reactor.install()
|
||||
|
||||
If you wish to use a GApplication, register it with the reactor::
|
||||
|
||||
from twisted.internet import reactor
|
||||
reactor.registerGApplication(app)
|
||||
|
||||
Then use twisted.internet APIs as usual.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import os
|
||||
|
||||
from twisted.internet import gireactor
|
||||
from twisted.python import runtime
|
||||
|
||||
# Newer versions of gtk3/pygoject raise a RuntimeError, or just break in a
|
||||
# confusing manner, if the program is not running under X11. We therefore try
|
||||
# to fail in a more reasonable manner, and check for $DISPLAY as a reasonable
|
||||
# approximation of availability of X11. This is somewhat over-aggressive,
|
||||
# since some older versions of gtk3/pygobject do work with missing $DISPLAY,
|
||||
# but it's too hard to figure out which, so we always require it.
|
||||
if (runtime.platform.getType() == 'posix' and
|
||||
not runtime.platform.isMacOSX() and not os.environ.get("DISPLAY")):
|
||||
raise ImportError(
|
||||
"Gtk3 requires X11, and no DISPLAY environment variable is set")
|
||||
|
||||
|
||||
class Gtk3Reactor(gireactor.GIReactor):
|
||||
"""
|
||||
A reactor using the gtk3+ event loop.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Override init to set the C{useGtk} flag.
|
||||
"""
|
||||
gireactor.GIReactor.__init__(self, useGtk=True)
|
||||
|
||||
|
||||
|
||||
class PortableGtk3Reactor(gireactor.PortableGIReactor):
|
||||
"""
|
||||
Portable GTK+ 3.x reactor.
|
||||
"""
|
||||
def __init__(self):
|
||||
"""
|
||||
Override init to set the C{useGtk} flag.
|
||||
"""
|
||||
gireactor.PortableGIReactor.__init__(self, useGtk=True)
|
||||
|
||||
|
||||
|
||||
def install():
|
||||
"""
|
||||
Configure the Twisted mainloop to be run inside the gtk3+ mainloop.
|
||||
"""
|
||||
if runtime.platform.getType() == 'posix':
|
||||
reactor = Gtk3Reactor()
|
||||
else:
|
||||
reactor = PortableGtk3Reactor()
|
||||
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(reactor)
|
||||
return reactor
|
||||
|
||||
|
||||
__all__ = ['install']
|
||||
|
|
@ -0,0 +1,250 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
This module provides support for Twisted to interact with the PyGTK mainloop.
|
||||
|
||||
In order to use this support, simply do the following::
|
||||
|
||||
| from twisted.internet import gtkreactor
|
||||
| gtkreactor.install()
|
||||
|
||||
Then use twisted.internet APIs as usual. The other methods here are not
|
||||
intended to be called directly.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
# System Imports
|
||||
try:
|
||||
import pygtk
|
||||
pygtk.require('1.2')
|
||||
except ImportError, AttributeError:
|
||||
pass # maybe we're using pygtk before this hack existed.
|
||||
import gtk
|
||||
|
||||
from zope.interface import implements
|
||||
|
||||
# Twisted Imports
|
||||
from twisted.python import log, runtime, deprecate, versions
|
||||
from twisted.internet.interfaces import IReactorFDSet
|
||||
|
||||
# Sibling Imports
|
||||
from twisted.internet import posixbase, selectreactor
|
||||
|
||||
|
||||
deprecatedSince = versions.Version("Twisted", 10, 1, 0)
|
||||
deprecationMessage = ("All new applications should be written with gtk 2.x, "
|
||||
"which is supported by twisted.internet.gtk2reactor.")
|
||||
|
||||
|
||||
class GtkReactor(posixbase.PosixReactorBase):
|
||||
"""
|
||||
GTK+ event loop reactor.
|
||||
|
||||
@ivar _reads: A dictionary mapping L{FileDescriptor} instances to gtk INPUT_READ
|
||||
watch handles.
|
||||
|
||||
@ivar _writes: A dictionary mapping L{FileDescriptor} instances to gtk
|
||||
INTPUT_WRITE watch handles.
|
||||
|
||||
@ivar _simtag: A gtk timeout handle for the next L{simulate} call.
|
||||
"""
|
||||
implements(IReactorFDSet)
|
||||
|
||||
deprecate.deprecatedModuleAttribute(deprecatedSince, deprecationMessage,
|
||||
__name__, "GtkReactor")
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize the file descriptor tracking dictionaries and the base
|
||||
class.
|
||||
"""
|
||||
self._simtag = None
|
||||
self._reads = {}
|
||||
self._writes = {}
|
||||
posixbase.PosixReactorBase.__init__(self)
|
||||
|
||||
|
||||
def addReader(self, reader):
|
||||
if reader not in self._reads:
|
||||
self._reads[reader] = gtk.input_add(reader, gtk.GDK.INPUT_READ, self.callback)
|
||||
|
||||
def addWriter(self, writer):
|
||||
if writer not in self._writes:
|
||||
self._writes[writer] = gtk.input_add(writer, gtk.GDK.INPUT_WRITE, self.callback)
|
||||
|
||||
|
||||
def getReaders(self):
|
||||
return self._reads.keys()
|
||||
|
||||
|
||||
def getWriters(self):
|
||||
return self._writes.keys()
|
||||
|
||||
|
||||
def removeAll(self):
|
||||
return self._removeAll(self._reads, self._writes)
|
||||
|
||||
|
||||
def removeReader(self, reader):
|
||||
if reader in self._reads:
|
||||
gtk.input_remove(self._reads[reader])
|
||||
del self._reads[reader]
|
||||
|
||||
def removeWriter(self, writer):
|
||||
if writer in self._writes:
|
||||
gtk.input_remove(self._writes[writer])
|
||||
del self._writes[writer]
|
||||
|
||||
doIterationTimer = None
|
||||
|
||||
def doIterationTimeout(self, *args):
|
||||
self.doIterationTimer = None
|
||||
return 0 # auto-remove
|
||||
def doIteration(self, delay):
|
||||
# flush some pending events, return if there was something to do
|
||||
# don't use the usual "while gtk.events_pending(): mainiteration()"
|
||||
# idiom because lots of IO (in particular test_tcp's
|
||||
# ProperlyCloseFilesTestCase) can keep us from ever exiting.
|
||||
log.msg(channel='system', event='iteration', reactor=self)
|
||||
if gtk.events_pending():
|
||||
gtk.mainiteration(0)
|
||||
return
|
||||
# nothing to do, must delay
|
||||
if delay == 0:
|
||||
return # shouldn't delay, so just return
|
||||
self.doIterationTimer = gtk.timeout_add(int(delay * 1000),
|
||||
self.doIterationTimeout)
|
||||
# This will either wake up from IO or from a timeout.
|
||||
gtk.mainiteration(1) # block
|
||||
# note: with the .simulate timer below, delays > 0.1 will always be
|
||||
# woken up by the .simulate timer
|
||||
if self.doIterationTimer:
|
||||
# if woken by IO, need to cancel the timer
|
||||
gtk.timeout_remove(self.doIterationTimer)
|
||||
self.doIterationTimer = None
|
||||
|
||||
def crash(self):
|
||||
posixbase.PosixReactorBase.crash(self)
|
||||
gtk.mainquit()
|
||||
|
||||
def run(self, installSignalHandlers=1):
|
||||
self.startRunning(installSignalHandlers=installSignalHandlers)
|
||||
gtk.timeout_add(0, self.simulate)
|
||||
gtk.mainloop()
|
||||
|
||||
def _readAndWrite(self, source, condition):
|
||||
# note: gtk-1.2's gtk_input_add presents an API in terms of gdk
|
||||
# constants like INPUT_READ and INPUT_WRITE. Internally, it will add
|
||||
# POLL_HUP and POLL_ERR to the poll() events, but if they happen it
|
||||
# will turn them back into INPUT_READ and INPUT_WRITE. gdkevents.c
|
||||
# maps IN/HUP/ERR to INPUT_READ, and OUT/ERR to INPUT_WRITE. This
|
||||
# means there is no immediate way to detect a disconnected socket.
|
||||
|
||||
# The g_io_add_watch() API is more suited to this task. I don't think
|
||||
# pygtk exposes it, though.
|
||||
why = None
|
||||
didRead = None
|
||||
try:
|
||||
if condition & gtk.GDK.INPUT_READ:
|
||||
why = source.doRead()
|
||||
didRead = source.doRead
|
||||
if not why and condition & gtk.GDK.INPUT_WRITE:
|
||||
# if doRead caused connectionLost, don't call doWrite
|
||||
# if doRead is doWrite, don't call it again.
|
||||
if not source.disconnected and source.doWrite != didRead:
|
||||
why = source.doWrite()
|
||||
didRead = source.doWrite # if failed it was in write
|
||||
except:
|
||||
why = sys.exc_info()[1]
|
||||
log.msg('Error In %s' % source)
|
||||
log.deferr()
|
||||
|
||||
if why:
|
||||
self._disconnectSelectable(source, why, didRead == source.doRead)
|
||||
|
||||
def callback(self, source, condition):
|
||||
log.callWithLogger(source, self._readAndWrite, source, condition)
|
||||
self.simulate() # fire Twisted timers
|
||||
return 1 # 1=don't auto-remove the source
|
||||
|
||||
def simulate(self):
|
||||
"""Run simulation loops and reschedule callbacks.
|
||||
"""
|
||||
if self._simtag is not None:
|
||||
gtk.timeout_remove(self._simtag)
|
||||
self.runUntilCurrent()
|
||||
timeout = min(self.timeout(), 0.1)
|
||||
if timeout is None:
|
||||
timeout = 0.1
|
||||
# Quoth someone other than me, "grumble", yet I know not why. Try to be
|
||||
# more specific in your complaints, guys. -exarkun
|
||||
self._simtag = gtk.timeout_add(int(timeout * 1010), self.simulate)
|
||||
|
||||
|
||||
|
||||
class PortableGtkReactor(selectreactor.SelectReactor):
|
||||
"""Reactor that works on Windows.
|
||||
|
||||
input_add is not supported on GTK+ for Win32, apparently.
|
||||
|
||||
@ivar _simtag: A gtk timeout handle for the next L{simulate} call.
|
||||
"""
|
||||
_simtag = None
|
||||
|
||||
deprecate.deprecatedModuleAttribute(deprecatedSince, deprecationMessage,
|
||||
__name__, "PortableGtkReactor")
|
||||
|
||||
def crash(self):
|
||||
selectreactor.SelectReactor.crash(self)
|
||||
gtk.mainquit()
|
||||
|
||||
def run(self, installSignalHandlers=1):
|
||||
self.startRunning(installSignalHandlers=installSignalHandlers)
|
||||
self.simulate()
|
||||
gtk.mainloop()
|
||||
|
||||
def simulate(self):
|
||||
"""Run simulation loops and reschedule callbacks.
|
||||
"""
|
||||
if self._simtag is not None:
|
||||
gtk.timeout_remove(self._simtag)
|
||||
self.iterate()
|
||||
timeout = min(self.timeout(), 0.1)
|
||||
if timeout is None:
|
||||
timeout = 0.1
|
||||
|
||||
# See comment for identical line in GtkReactor.simulate.
|
||||
self._simtag = gtk.timeout_add((timeout * 1010), self.simulate)
|
||||
|
||||
|
||||
|
||||
def install():
|
||||
"""Configure the twisted mainloop to be run inside the gtk mainloop.
|
||||
"""
|
||||
reactor = GtkReactor()
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(reactor)
|
||||
return reactor
|
||||
|
||||
deprecate.deprecatedModuleAttribute(deprecatedSince, deprecationMessage,
|
||||
__name__, "install")
|
||||
|
||||
|
||||
def portableInstall():
|
||||
"""Configure the twisted mainloop to be run inside the gtk mainloop.
|
||||
"""
|
||||
reactor = PortableGtkReactor()
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(reactor)
|
||||
return reactor
|
||||
|
||||
deprecate.deprecatedModuleAttribute(deprecatedSince, deprecationMessage,
|
||||
__name__, "portableInstall")
|
||||
|
||||
|
||||
if runtime.platform.getType() != 'posix':
|
||||
install = portableInstall
|
||||
|
||||
__all__ = ['install']
|
||||
|
|
@ -0,0 +1,405 @@
|
|||
# -*- test-case-name: twisted.internet.test.test_inotify -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
This module provides support for Twisted to linux inotify API.
|
||||
|
||||
In order to use this support, simply do the following (and start a reactor
|
||||
at some point)::
|
||||
|
||||
from twisted.internet import inotify
|
||||
from twisted.python import filepath
|
||||
|
||||
def notify(ignored, filepath, mask):
|
||||
\"""
|
||||
For historical reasons, an opaque handle is passed as first
|
||||
parameter. This object should never be used.
|
||||
|
||||
@param filepath: FilePath on which the event happened.
|
||||
@param mask: inotify event as hexadecimal masks
|
||||
\"""
|
||||
print "event %s on %s" % (
|
||||
', '.join(inotify.humanReadableMask(mask)), filepath)
|
||||
|
||||
notifier = inotify.INotify()
|
||||
notifier.startReading()
|
||||
notifier.watch(filepath.FilePath("/some/directory"), callbacks=[notify])
|
||||
|
||||
@since: 10.1
|
||||
"""
|
||||
|
||||
import os
|
||||
import struct
|
||||
|
||||
from twisted.internet import fdesc
|
||||
from twisted.internet.abstract import FileDescriptor
|
||||
from twisted.python import log, _inotify
|
||||
|
||||
|
||||
# from /usr/src/linux/include/linux/inotify.h
|
||||
|
||||
IN_ACCESS = 0x00000001L # File was accessed
|
||||
IN_MODIFY = 0x00000002L # File was modified
|
||||
IN_ATTRIB = 0x00000004L # Metadata changed
|
||||
IN_CLOSE_WRITE = 0x00000008L # Writeable file was closed
|
||||
IN_CLOSE_NOWRITE = 0x00000010L # Unwriteable file closed
|
||||
IN_OPEN = 0x00000020L # File was opened
|
||||
IN_MOVED_FROM = 0x00000040L # File was moved from X
|
||||
IN_MOVED_TO = 0x00000080L # File was moved to Y
|
||||
IN_CREATE = 0x00000100L # Subfile was created
|
||||
IN_DELETE = 0x00000200L # Subfile was delete
|
||||
IN_DELETE_SELF = 0x00000400L # Self was deleted
|
||||
IN_MOVE_SELF = 0x00000800L # Self was moved
|
||||
IN_UNMOUNT = 0x00002000L # Backing fs was unmounted
|
||||
IN_Q_OVERFLOW = 0x00004000L # Event queued overflowed
|
||||
IN_IGNORED = 0x00008000L # File was ignored
|
||||
|
||||
IN_ONLYDIR = 0x01000000 # only watch the path if it is a directory
|
||||
IN_DONT_FOLLOW = 0x02000000 # don't follow a sym link
|
||||
IN_MASK_ADD = 0x20000000 # add to the mask of an already existing watch
|
||||
IN_ISDIR = 0x40000000 # event occurred against dir
|
||||
IN_ONESHOT = 0x80000000 # only send event once
|
||||
|
||||
IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # closes
|
||||
IN_MOVED = IN_MOVED_FROM | IN_MOVED_TO # moves
|
||||
IN_CHANGED = IN_MODIFY | IN_ATTRIB # changes
|
||||
|
||||
IN_WATCH_MASK = (IN_MODIFY | IN_ATTRIB |
|
||||
IN_CREATE | IN_DELETE |
|
||||
IN_DELETE_SELF | IN_MOVE_SELF |
|
||||
IN_UNMOUNT | IN_MOVED_FROM | IN_MOVED_TO)
|
||||
|
||||
|
||||
_FLAG_TO_HUMAN = [
|
||||
(IN_ACCESS, 'access'),
|
||||
(IN_MODIFY, 'modify'),
|
||||
(IN_ATTRIB, 'attrib'),
|
||||
(IN_CLOSE_WRITE, 'close_write'),
|
||||
(IN_CLOSE_NOWRITE, 'close_nowrite'),
|
||||
(IN_OPEN, 'open'),
|
||||
(IN_MOVED_FROM, 'moved_from'),
|
||||
(IN_MOVED_TO, 'moved_to'),
|
||||
(IN_CREATE, 'create'),
|
||||
(IN_DELETE, 'delete'),
|
||||
(IN_DELETE_SELF, 'delete_self'),
|
||||
(IN_MOVE_SELF, 'move_self'),
|
||||
(IN_UNMOUNT, 'unmount'),
|
||||
(IN_Q_OVERFLOW, 'queue_overflow'),
|
||||
(IN_IGNORED, 'ignored'),
|
||||
(IN_ONLYDIR, 'only_dir'),
|
||||
(IN_DONT_FOLLOW, 'dont_follow'),
|
||||
(IN_MASK_ADD, 'mask_add'),
|
||||
(IN_ISDIR, 'is_dir'),
|
||||
(IN_ONESHOT, 'one_shot')
|
||||
]
|
||||
|
||||
|
||||
|
||||
def humanReadableMask(mask):
|
||||
"""
|
||||
Auxiliary function that converts an hexadecimal mask into a series
|
||||
of human readable flags.
|
||||
"""
|
||||
s = []
|
||||
for k, v in _FLAG_TO_HUMAN:
|
||||
if k & mask:
|
||||
s.append(v)
|
||||
return s
|
||||
|
||||
|
||||
|
||||
class _Watch(object):
|
||||
"""
|
||||
Watch object that represents a Watch point in the filesystem. The
|
||||
user should let INotify to create these objects
|
||||
|
||||
@ivar path: The path over which this watch point is monitoring
|
||||
@ivar mask: The events monitored by this watchpoint
|
||||
@ivar autoAdd: Flag that determines whether this watch point
|
||||
should automatically add created subdirectories
|
||||
@ivar callbacks: C{list} of callback functions that will be called
|
||||
when an event occurs on this watch.
|
||||
"""
|
||||
def __init__(self, path, mask=IN_WATCH_MASK, autoAdd=False,
|
||||
callbacks=None):
|
||||
self.path = path
|
||||
self.mask = mask
|
||||
self.autoAdd = autoAdd
|
||||
if callbacks is None:
|
||||
callbacks = []
|
||||
self.callbacks = callbacks
|
||||
|
||||
|
||||
def _notify(self, filepath, events):
|
||||
"""
|
||||
Callback function used by L{INotify} to dispatch an event.
|
||||
"""
|
||||
for callback in self.callbacks:
|
||||
callback(self, filepath, events)
|
||||
|
||||
|
||||
|
||||
class INotify(FileDescriptor, object):
|
||||
"""
|
||||
The INotify file descriptor, it basically does everything related
|
||||
to INotify, from reading to notifying watch points.
|
||||
|
||||
@ivar _buffer: a C{str} containing the data read from the inotify fd.
|
||||
|
||||
@ivar _watchpoints: a C{dict} that maps from inotify watch ids to
|
||||
watchpoints objects
|
||||
|
||||
@ivar _watchpaths: a C{dict} that maps from watched paths to the
|
||||
inotify watch ids
|
||||
"""
|
||||
_inotify = _inotify
|
||||
|
||||
def __init__(self, reactor=None):
|
||||
FileDescriptor.__init__(self, reactor=reactor)
|
||||
|
||||
# Smart way to allow parametrization of libc so I can override
|
||||
# it and test for the system errors.
|
||||
self._fd = self._inotify.init()
|
||||
|
||||
fdesc.setNonBlocking(self._fd)
|
||||
fdesc._setCloseOnExec(self._fd)
|
||||
|
||||
# The next 2 lines are needed to have self.loseConnection()
|
||||
# to call connectionLost() on us. Since we already created the
|
||||
# fd that talks to inotify we want to be notified even if we
|
||||
# haven't yet started reading.
|
||||
self.connected = 1
|
||||
self._writeDisconnected = True
|
||||
|
||||
self._buffer = ''
|
||||
self._watchpoints = {}
|
||||
self._watchpaths = {}
|
||||
|
||||
|
||||
def _addWatch(self, path, mask, autoAdd, callbacks):
|
||||
"""
|
||||
Private helper that abstracts the use of ctypes.
|
||||
|
||||
Calls the internal inotify API and checks for any errors after the
|
||||
call. If there's an error L{INotify._addWatch} can raise an
|
||||
INotifyError. If there's no error it proceeds creating a watchpoint and
|
||||
adding a watchpath for inverse lookup of the file descriptor from the
|
||||
path.
|
||||
"""
|
||||
wd = self._inotify.add(self._fd, path.path, mask)
|
||||
|
||||
iwp = _Watch(path, mask, autoAdd, callbacks)
|
||||
|
||||
self._watchpoints[wd] = iwp
|
||||
self._watchpaths[path] = wd
|
||||
|
||||
return wd
|
||||
|
||||
|
||||
def _rmWatch(self, wd):
|
||||
"""
|
||||
Private helper that abstracts the use of ctypes.
|
||||
|
||||
Calls the internal inotify API to remove an fd from inotify then
|
||||
removes the corresponding watchpoint from the internal mapping together
|
||||
with the file descriptor from the watchpath.
|
||||
"""
|
||||
self._inotify.remove(self._fd, wd)
|
||||
iwp = self._watchpoints.pop(wd)
|
||||
self._watchpaths.pop(iwp.path)
|
||||
|
||||
|
||||
def connectionLost(self, reason):
|
||||
"""
|
||||
Release the inotify file descriptor and do the necessary cleanup
|
||||
"""
|
||||
FileDescriptor.connectionLost(self, reason)
|
||||
if self._fd >= 0:
|
||||
try:
|
||||
os.close(self._fd)
|
||||
except OSError, e:
|
||||
log.err(e, "Couldn't close INotify file descriptor.")
|
||||
|
||||
|
||||
def fileno(self):
|
||||
"""
|
||||
Get the underlying file descriptor from this inotify observer.
|
||||
Required by L{abstract.FileDescriptor} subclasses.
|
||||
"""
|
||||
return self._fd
|
||||
|
||||
|
||||
def doRead(self):
|
||||
"""
|
||||
Read some data from the observed file descriptors
|
||||
"""
|
||||
fdesc.readFromFD(self._fd, self._doRead)
|
||||
|
||||
|
||||
def _doRead(self, in_):
|
||||
"""
|
||||
Work on the data just read from the file descriptor.
|
||||
"""
|
||||
self._buffer += in_
|
||||
while len(self._buffer) >= 16:
|
||||
|
||||
wd, mask, cookie, size = struct.unpack("=LLLL", self._buffer[0:16])
|
||||
|
||||
if size:
|
||||
name = self._buffer[16:16 + size].rstrip('\0')
|
||||
else:
|
||||
name = None
|
||||
|
||||
self._buffer = self._buffer[16 + size:]
|
||||
|
||||
try:
|
||||
iwp = self._watchpoints[wd]
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
path = iwp.path
|
||||
if name:
|
||||
path = path.child(name)
|
||||
iwp._notify(path, mask)
|
||||
|
||||
if (iwp.autoAdd and mask & IN_ISDIR and mask & IN_CREATE):
|
||||
# mask & IN_ISDIR already guarantees that the path is a
|
||||
# directory. There's no way you can get here without a
|
||||
# directory anyway, so no point in checking for that again.
|
||||
new_wd = self.watch(
|
||||
path, mask=iwp.mask, autoAdd=True,
|
||||
callbacks=iwp.callbacks
|
||||
)
|
||||
# This is very very very hacky and I'd rather not do this but
|
||||
# we have no other alternative that is less hacky other than
|
||||
# surrender. We use callLater because we don't want to have
|
||||
# too many events waiting while we process these subdirs, we
|
||||
# must always answer events as fast as possible or the overflow
|
||||
# might come.
|
||||
self.reactor.callLater(0,
|
||||
self._addChildren, self._watchpoints[new_wd])
|
||||
if mask & IN_DELETE_SELF:
|
||||
self._rmWatch(wd)
|
||||
|
||||
|
||||
def _addChildren(self, iwp):
|
||||
"""
|
||||
This is a very private method, please don't even think about using it.
|
||||
|
||||
Note that this is a fricking hack... it's because we cannot be fast
|
||||
enough in adding a watch to a directory and so we basically end up
|
||||
getting here too late if some operations have already been going on in
|
||||
the subdir, we basically need to catchup. This eventually ends up
|
||||
meaning that we generate double events, your app must be resistant.
|
||||
"""
|
||||
try:
|
||||
listdir = iwp.path.children()
|
||||
except OSError:
|
||||
# Somebody or something (like a test) removed this directory while
|
||||
# we were in the callLater(0...) waiting. It doesn't make sense to
|
||||
# process it anymore
|
||||
return
|
||||
|
||||
# note that it's true that listdir will only see the subdirs inside
|
||||
# path at the moment of the call but path is monitored already so if
|
||||
# something is created we will receive an event.
|
||||
for f in listdir:
|
||||
# It's a directory, watch it and then add its children
|
||||
if f.isdir():
|
||||
wd = self.watch(
|
||||
f, mask=iwp.mask, autoAdd=True,
|
||||
callbacks=iwp.callbacks
|
||||
)
|
||||
iwp._notify(f, IN_ISDIR|IN_CREATE)
|
||||
# now f is watched, we can add its children the callLater is to
|
||||
# avoid recursion
|
||||
self.reactor.callLater(0,
|
||||
self._addChildren, self._watchpoints[wd])
|
||||
|
||||
# It's a file and we notify it.
|
||||
if f.isfile():
|
||||
iwp._notify(f, IN_CREATE|IN_CLOSE_WRITE)
|
||||
|
||||
|
||||
def watch(self, path, mask=IN_WATCH_MASK, autoAdd=False,
|
||||
callbacks=None, recursive=False):
|
||||
"""
|
||||
Watch the 'mask' events in given path. Can raise C{INotifyError} when
|
||||
there's a problem while adding a directory.
|
||||
|
||||
@param path: The path needing monitoring
|
||||
@type path: L{FilePath}
|
||||
|
||||
@param mask: The events that should be watched
|
||||
@type mask: C{int}
|
||||
|
||||
@param autoAdd: if True automatically add newly created
|
||||
subdirectories
|
||||
@type autoAdd: C{boolean}
|
||||
|
||||
@param callbacks: A list of callbacks that should be called
|
||||
when an event happens in the given path.
|
||||
The callback should accept 3 arguments:
|
||||
(ignored, filepath, mask)
|
||||
@type callbacks: C{list} of callables
|
||||
|
||||
@param recursive: Also add all the subdirectories in this path
|
||||
@type recursive: C{boolean}
|
||||
"""
|
||||
if recursive:
|
||||
# This behavior is needed to be compatible with the windows
|
||||
# interface for filesystem changes:
|
||||
# http://msdn.microsoft.com/en-us/library/aa365465(VS.85).aspx
|
||||
# ReadDirectoryChangesW can do bWatchSubtree so it doesn't
|
||||
# make sense to implement this at an higher abstraction
|
||||
# level when other platforms support it already
|
||||
for child in path.walk():
|
||||
if child.isdir():
|
||||
self.watch(child, mask, autoAdd, callbacks,
|
||||
recursive=False)
|
||||
else:
|
||||
wd = self._isWatched(path)
|
||||
if wd:
|
||||
return wd
|
||||
|
||||
mask = mask | IN_DELETE_SELF # need this to remove the watch
|
||||
|
||||
return self._addWatch(path, mask, autoAdd, callbacks)
|
||||
|
||||
|
||||
def ignore(self, path):
|
||||
"""
|
||||
Remove the watch point monitoring the given path
|
||||
|
||||
@param path: The path that should be ignored
|
||||
@type path: L{FilePath}
|
||||
"""
|
||||
wd = self._isWatched(path)
|
||||
if wd is None:
|
||||
raise KeyError("%r is not watched" % (path,))
|
||||
else:
|
||||
self._rmWatch(wd)
|
||||
|
||||
|
||||
def _isWatched(self, path):
|
||||
"""
|
||||
Helper function that checks if the path is already monitored
|
||||
and returns its watchdescriptor if so or None otherwise.
|
||||
|
||||
@param path: The path that should be checked
|
||||
@type path: L{FilePath}
|
||||
"""
|
||||
return self._watchpaths.get(path, None)
|
||||
|
||||
|
||||
INotifyError = _inotify.INotifyError
|
||||
|
||||
|
||||
__all__ = ["INotify", "humanReadableMask", "IN_WATCH_MASK", "IN_ACCESS",
|
||||
"IN_MODIFY", "IN_ATTRIB", "IN_CLOSE_NOWRITE", "IN_CLOSE_WRITE",
|
||||
"IN_OPEN", "IN_MOVED_FROM", "IN_MOVED_TO", "IN_CREATE",
|
||||
"IN_DELETE", "IN_DELETE_SELF", "IN_MOVE_SELF", "IN_UNMOUNT",
|
||||
"IN_Q_OVERFLOW", "IN_IGNORED", "IN_ONLYDIR", "IN_DONT_FOLLOW",
|
||||
"IN_MASK_ADD", "IN_ISDIR", "IN_ONESHOT", "IN_CLOSE",
|
||||
"IN_MOVED", "IN_CHANGED"]
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,10 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
I/O Completion Ports reactor
|
||||
"""
|
||||
|
||||
from twisted.internet.iocpreactor.reactor import install
|
||||
|
||||
__all__ = ['install']
|
||||
|
|
@ -0,0 +1,400 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Abstract file handle class
|
||||
"""
|
||||
|
||||
from twisted.internet import main, error, interfaces
|
||||
from twisted.internet.abstract import _ConsumerMixin, _LogOwner
|
||||
from twisted.python import failure
|
||||
|
||||
from zope.interface import implements
|
||||
import errno
|
||||
|
||||
from twisted.internet.iocpreactor.const import ERROR_HANDLE_EOF
|
||||
from twisted.internet.iocpreactor.const import ERROR_IO_PENDING
|
||||
from twisted.internet.iocpreactor import iocpsupport as _iocp
|
||||
|
||||
|
||||
|
||||
class FileHandle(_ConsumerMixin, _LogOwner):
|
||||
"""
|
||||
File handle that can read and write asynchronously
|
||||
"""
|
||||
implements(interfaces.IPushProducer, interfaces.IConsumer,
|
||||
interfaces.ITransport, interfaces.IHalfCloseableDescriptor)
|
||||
# read stuff
|
||||
maxReadBuffers = 16
|
||||
readBufferSize = 4096
|
||||
reading = False
|
||||
dynamicReadBuffers = True # set this to false if subclass doesn't do iovecs
|
||||
_readNextBuffer = 0
|
||||
_readSize = 0 # how much data we have in the read buffer
|
||||
_readScheduled = None
|
||||
_readScheduledInOS = False
|
||||
|
||||
|
||||
def startReading(self):
|
||||
self.reactor.addActiveHandle(self)
|
||||
if not self._readScheduled and not self.reading:
|
||||
self.reading = True
|
||||
self._readScheduled = self.reactor.callLater(0,
|
||||
self._resumeReading)
|
||||
|
||||
|
||||
def stopReading(self):
|
||||
if self._readScheduled:
|
||||
self._readScheduled.cancel()
|
||||
self._readScheduled = None
|
||||
self.reading = False
|
||||
|
||||
|
||||
def _resumeReading(self):
|
||||
self._readScheduled = None
|
||||
if self._dispatchData() and not self._readScheduledInOS:
|
||||
self.doRead()
|
||||
|
||||
|
||||
def _dispatchData(self):
|
||||
"""
|
||||
Dispatch previously read data. Return True if self.reading and we don't
|
||||
have any more data
|
||||
"""
|
||||
if not self._readSize:
|
||||
return self.reading
|
||||
size = self._readSize
|
||||
full_buffers = size // self.readBufferSize
|
||||
while self._readNextBuffer < full_buffers:
|
||||
self.dataReceived(self._readBuffers[self._readNextBuffer])
|
||||
self._readNextBuffer += 1
|
||||
if not self.reading:
|
||||
return False
|
||||
remainder = size % self.readBufferSize
|
||||
if remainder:
|
||||
self.dataReceived(buffer(self._readBuffers[full_buffers],
|
||||
0, remainder))
|
||||
if self.dynamicReadBuffers:
|
||||
total_buffer_size = self.readBufferSize * len(self._readBuffers)
|
||||
# we have one buffer too many
|
||||
if size < total_buffer_size - self.readBufferSize:
|
||||
del self._readBuffers[-1]
|
||||
# we filled all buffers, so allocate one more
|
||||
elif (size == total_buffer_size and
|
||||
len(self._readBuffers) < self.maxReadBuffers):
|
||||
self._readBuffers.append(_iocp.AllocateReadBuffer(
|
||||
self.readBufferSize))
|
||||
self._readNextBuffer = 0
|
||||
self._readSize = 0
|
||||
return self.reading
|
||||
|
||||
|
||||
def _cbRead(self, rc, bytes, evt):
|
||||
self._readScheduledInOS = False
|
||||
if self._handleRead(rc, bytes, evt):
|
||||
self.doRead()
|
||||
|
||||
|
||||
def _handleRead(self, rc, bytes, evt):
|
||||
"""
|
||||
Returns False if we should stop reading for now
|
||||
"""
|
||||
if self.disconnected:
|
||||
return False
|
||||
# graceful disconnection
|
||||
if (not (rc or bytes)) or rc in (errno.WSAEDISCON, ERROR_HANDLE_EOF):
|
||||
self.reactor.removeActiveHandle(self)
|
||||
self.readConnectionLost(failure.Failure(main.CONNECTION_DONE))
|
||||
return False
|
||||
# XXX: not handling WSAEWOULDBLOCK
|
||||
# ("too many outstanding overlapped I/O requests")
|
||||
elif rc:
|
||||
self.connectionLost(failure.Failure(
|
||||
error.ConnectionLost("read error -- %s (%s)" %
|
||||
(errno.errorcode.get(rc, 'unknown'), rc))))
|
||||
return False
|
||||
else:
|
||||
assert self._readSize == 0
|
||||
assert self._readNextBuffer == 0
|
||||
self._readSize = bytes
|
||||
return self._dispatchData()
|
||||
|
||||
|
||||
def doRead(self):
|
||||
evt = _iocp.Event(self._cbRead, self)
|
||||
|
||||
evt.buff = buff = self._readBuffers
|
||||
rc, bytes = self.readFromHandle(buff, evt)
|
||||
|
||||
if not rc or rc == ERROR_IO_PENDING:
|
||||
self._readScheduledInOS = True
|
||||
else:
|
||||
self._handleRead(rc, bytes, evt)
|
||||
|
||||
|
||||
def readFromHandle(self, bufflist, evt):
|
||||
raise NotImplementedError() # TODO: this should default to ReadFile
|
||||
|
||||
|
||||
def dataReceived(self, data):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def readConnectionLost(self, reason):
|
||||
self.connectionLost(reason)
|
||||
|
||||
|
||||
# write stuff
|
||||
dataBuffer = ''
|
||||
offset = 0
|
||||
writing = False
|
||||
_writeScheduled = None
|
||||
_writeDisconnecting = False
|
||||
_writeDisconnected = False
|
||||
writeBufferSize = 2**2**2**2
|
||||
|
||||
|
||||
def loseWriteConnection(self):
|
||||
self._writeDisconnecting = True
|
||||
self.startWriting()
|
||||
|
||||
|
||||
def _closeWriteConnection(self):
|
||||
# override in subclasses
|
||||
pass
|
||||
|
||||
|
||||
def writeConnectionLost(self, reason):
|
||||
# in current code should never be called
|
||||
self.connectionLost(reason)
|
||||
|
||||
|
||||
def startWriting(self):
|
||||
self.reactor.addActiveHandle(self)
|
||||
self.writing = True
|
||||
if not self._writeScheduled:
|
||||
self._writeScheduled = self.reactor.callLater(0,
|
||||
self._resumeWriting)
|
||||
|
||||
|
||||
def stopWriting(self):
|
||||
if self._writeScheduled:
|
||||
self._writeScheduled.cancel()
|
||||
self._writeScheduled = None
|
||||
self.writing = False
|
||||
|
||||
|
||||
def _resumeWriting(self):
|
||||
self._writeScheduled = None
|
||||
self.doWrite()
|
||||
|
||||
|
||||
def _cbWrite(self, rc, bytes, evt):
|
||||
if self._handleWrite(rc, bytes, evt):
|
||||
self.doWrite()
|
||||
|
||||
|
||||
def _handleWrite(self, rc, bytes, evt):
|
||||
"""
|
||||
Returns false if we should stop writing for now
|
||||
"""
|
||||
if self.disconnected or self._writeDisconnected:
|
||||
return False
|
||||
# XXX: not handling WSAEWOULDBLOCK
|
||||
# ("too many outstanding overlapped I/O requests")
|
||||
if rc:
|
||||
self.connectionLost(failure.Failure(
|
||||
error.ConnectionLost("write error -- %s (%s)" %
|
||||
(errno.errorcode.get(rc, 'unknown'), rc))))
|
||||
return False
|
||||
else:
|
||||
self.offset += bytes
|
||||
# If there is nothing left to send,
|
||||
if self.offset == len(self.dataBuffer) and not self._tempDataLen:
|
||||
self.dataBuffer = ""
|
||||
self.offset = 0
|
||||
# stop writing
|
||||
self.stopWriting()
|
||||
# If I've got a producer who is supposed to supply me with data
|
||||
if self.producer is not None and ((not self.streamingProducer)
|
||||
or self.producerPaused):
|
||||
# tell them to supply some more.
|
||||
self.producerPaused = True
|
||||
self.producer.resumeProducing()
|
||||
elif self.disconnecting:
|
||||
# But if I was previously asked to let the connection die,
|
||||
# do so.
|
||||
self.connectionLost(failure.Failure(main.CONNECTION_DONE))
|
||||
elif self._writeDisconnecting:
|
||||
# I was previously asked to to half-close the connection.
|
||||
self._writeDisconnected = True
|
||||
self._closeWriteConnection()
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def doWrite(self):
|
||||
if len(self.dataBuffer) - self.offset < self.SEND_LIMIT:
|
||||
# If there is currently less than SEND_LIMIT bytes left to send
|
||||
# in the string, extend it with the array data.
|
||||
self.dataBuffer = (buffer(self.dataBuffer, self.offset) +
|
||||
"".join(self._tempDataBuffer))
|
||||
self.offset = 0
|
||||
self._tempDataBuffer = []
|
||||
self._tempDataLen = 0
|
||||
|
||||
evt = _iocp.Event(self._cbWrite, self)
|
||||
|
||||
# Send as much data as you can.
|
||||
if self.offset:
|
||||
evt.buff = buff = buffer(self.dataBuffer, self.offset)
|
||||
else:
|
||||
evt.buff = buff = self.dataBuffer
|
||||
rc, bytes = self.writeToHandle(buff, evt)
|
||||
if rc and rc != ERROR_IO_PENDING:
|
||||
self._handleWrite(rc, bytes, evt)
|
||||
|
||||
|
||||
def writeToHandle(self, buff, evt):
|
||||
raise NotImplementedError() # TODO: this should default to WriteFile
|
||||
|
||||
|
||||
def write(self, data):
|
||||
"""Reliably write some data.
|
||||
|
||||
The data is buffered until his file descriptor is ready for writing.
|
||||
"""
|
||||
if isinstance(data, unicode): # no, really, I mean it
|
||||
raise TypeError("Data must not be unicode")
|
||||
if not self.connected or self._writeDisconnected:
|
||||
return
|
||||
if data:
|
||||
self._tempDataBuffer.append(data)
|
||||
self._tempDataLen += len(data)
|
||||
if self.producer is not None and self.streamingProducer:
|
||||
if (len(self.dataBuffer) + self._tempDataLen
|
||||
> self.writeBufferSize):
|
||||
self.producerPaused = True
|
||||
self.producer.pauseProducing()
|
||||
self.startWriting()
|
||||
|
||||
|
||||
def writeSequence(self, iovec):
|
||||
for i in iovec:
|
||||
if isinstance(i, unicode): # no, really, I mean it
|
||||
raise TypeError("Data must not be unicode")
|
||||
if not self.connected or not iovec or self._writeDisconnected:
|
||||
return
|
||||
self._tempDataBuffer.extend(iovec)
|
||||
for i in iovec:
|
||||
self._tempDataLen += len(i)
|
||||
if self.producer is not None and self.streamingProducer:
|
||||
if len(self.dataBuffer) + self._tempDataLen > self.writeBufferSize:
|
||||
self.producerPaused = True
|
||||
self.producer.pauseProducing()
|
||||
self.startWriting()
|
||||
|
||||
|
||||
# general stuff
|
||||
connected = False
|
||||
disconnected = False
|
||||
disconnecting = False
|
||||
logstr = "Uninitialized"
|
||||
|
||||
SEND_LIMIT = 128*1024
|
||||
|
||||
|
||||
def __init__(self, reactor = None):
|
||||
if not reactor:
|
||||
from twisted.internet import reactor
|
||||
self.reactor = reactor
|
||||
self._tempDataBuffer = [] # will be added to dataBuffer in doWrite
|
||||
self._tempDataLen = 0
|
||||
self._readBuffers = [_iocp.AllocateReadBuffer(self.readBufferSize)]
|
||||
|
||||
|
||||
def connectionLost(self, reason):
|
||||
"""
|
||||
The connection was lost.
|
||||
|
||||
This is called when the connection on a selectable object has been
|
||||
lost. It will be called whether the connection was closed explicitly,
|
||||
an exception occurred in an event handler, or the other end of the
|
||||
connection closed it first.
|
||||
|
||||
Clean up state here, but make sure to call back up to FileDescriptor.
|
||||
"""
|
||||
|
||||
self.disconnected = True
|
||||
self.connected = False
|
||||
if self.producer is not None:
|
||||
self.producer.stopProducing()
|
||||
self.producer = None
|
||||
self.stopReading()
|
||||
self.stopWriting()
|
||||
self.reactor.removeActiveHandle(self)
|
||||
|
||||
|
||||
def getFileHandle(self):
|
||||
return -1
|
||||
|
||||
|
||||
def loseConnection(self, _connDone=failure.Failure(main.CONNECTION_DONE)):
|
||||
"""
|
||||
Close the connection at the next available opportunity.
|
||||
|
||||
Call this to cause this FileDescriptor to lose its connection. It will
|
||||
first write any data that it has buffered.
|
||||
|
||||
If there is data buffered yet to be written, this method will cause the
|
||||
transport to lose its connection as soon as it's done flushing its
|
||||
write buffer. If you have a producer registered, the connection won't
|
||||
be closed until the producer is finished. Therefore, make sure you
|
||||
unregister your producer when it's finished, or the connection will
|
||||
never close.
|
||||
"""
|
||||
|
||||
if self.connected and not self.disconnecting:
|
||||
if self._writeDisconnected:
|
||||
# doWrite won't trigger the connection close anymore
|
||||
self.stopReading()
|
||||
self.stopWriting
|
||||
self.connectionLost(_connDone)
|
||||
else:
|
||||
self.stopReading()
|
||||
self.startWriting()
|
||||
self.disconnecting = 1
|
||||
|
||||
|
||||
# Producer/consumer implementation
|
||||
|
||||
def stopConsuming(self):
|
||||
"""
|
||||
Stop consuming data.
|
||||
|
||||
This is called when a producer has lost its connection, to tell the
|
||||
consumer to go lose its connection (and break potential circular
|
||||
references).
|
||||
"""
|
||||
self.unregisterProducer()
|
||||
self.loseConnection()
|
||||
|
||||
|
||||
# producer interface implementation
|
||||
|
||||
def resumeProducing(self):
|
||||
if self.connected and not self.disconnecting:
|
||||
self.startReading()
|
||||
|
||||
|
||||
def pauseProducing(self):
|
||||
self.stopReading()
|
||||
|
||||
|
||||
def stopProducing(self):
|
||||
self.loseConnection()
|
||||
|
||||
|
||||
__all__ = ['FileHandle']
|
||||
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
del iocpsupport\iocpsupport.c iocpsupport.pyd
|
||||
del /f /s /q build
|
||||
python setup.py build_ext -i -c mingw32
|
||||
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
"""
|
||||
Windows constants for IOCP
|
||||
"""
|
||||
|
||||
|
||||
# this stuff should really be gotten from Windows headers via pyrex, but it
|
||||
# probably is not going to change
|
||||
|
||||
ERROR_PORT_UNREACHABLE = 1234
|
||||
ERROR_NETWORK_UNREACHABLE = 1231
|
||||
ERROR_CONNECTION_REFUSED = 1225
|
||||
ERROR_IO_PENDING = 997
|
||||
ERROR_OPERATION_ABORTED = 995
|
||||
WAIT_TIMEOUT = 258
|
||||
ERROR_NETNAME_DELETED = 64
|
||||
ERROR_HANDLE_EOF = 38
|
||||
|
||||
INFINITE = -1
|
||||
|
||||
SO_UPDATE_CONNECT_CONTEXT = 0x7010
|
||||
SO_UPDATE_ACCEPT_CONTEXT = 0x700B
|
||||
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
"""
|
||||
Interfaces for iocpreactor
|
||||
"""
|
||||
|
||||
|
||||
from zope.interface import Interface
|
||||
|
||||
|
||||
|
||||
class IReadHandle(Interface):
|
||||
def readFromHandle(bufflist, evt):
|
||||
"""
|
||||
Read into the given buffers from this handle.
|
||||
|
||||
@param buff: the buffers to read into
|
||||
@type buff: list of objects implementing the read/write buffer protocol
|
||||
|
||||
@param evt: an IOCP Event object
|
||||
|
||||
@return: tuple (return code, number of bytes read)
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class IWriteHandle(Interface):
|
||||
def writeToHandle(buff, evt):
|
||||
"""
|
||||
Write the given buffer to this handle.
|
||||
|
||||
@param buff: the buffer to write
|
||||
@type buff: any object implementing the buffer protocol
|
||||
|
||||
@param evt: an IOCP Event object
|
||||
|
||||
@return: tuple (return code, number of bytes written)
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class IReadWriteHandle(IReadHandle, IWriteHandle):
|
||||
pass
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
def accept(long listening, long accepting, object buff, object obj):
|
||||
"""
|
||||
CAUTION: unlike system AcceptEx(), this function returns 0 on success
|
||||
"""
|
||||
cdef unsigned long bytes
|
||||
cdef int rc
|
||||
cdef Py_ssize_t size
|
||||
cdef void *mem_buffer
|
||||
cdef myOVERLAPPED *ov
|
||||
|
||||
PyObject_AsWriteBuffer(buff, &mem_buffer, &size)
|
||||
|
||||
ov = makeOV()
|
||||
if obj is not None:
|
||||
ov.obj = <PyObject *>obj
|
||||
|
||||
rc = lpAcceptEx(listening, accepting, mem_buffer, 0,
|
||||
<DWORD>size / 2, <DWORD>size / 2,
|
||||
&bytes, <OVERLAPPED *>ov)
|
||||
if not rc:
|
||||
rc = WSAGetLastError()
|
||||
if rc != ERROR_IO_PENDING:
|
||||
PyMem_Free(ov)
|
||||
return rc
|
||||
|
||||
# operation is in progress
|
||||
Py_XINCREF(obj)
|
||||
return 0
|
||||
|
||||
def get_accept_addrs(long s, object buff):
|
||||
cdef WSAPROTOCOL_INFO wsa_pi
|
||||
cdef int locallen, remotelen
|
||||
cdef Py_ssize_t size
|
||||
cdef void *mem_buffer
|
||||
cdef sockaddr *localaddr, *remoteaddr
|
||||
|
||||
PyObject_AsReadBuffer(buff, &mem_buffer, &size)
|
||||
|
||||
lpGetAcceptExSockaddrs(mem_buffer, 0, <DWORD>size / 2, <DWORD>size / 2,
|
||||
&localaddr, &locallen, &remoteaddr, &remotelen)
|
||||
return remoteaddr.sa_family, _makesockaddr(localaddr, locallen), _makesockaddr(remoteaddr, remotelen)
|
||||
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
def connect(long s, object addr, object obj):
|
||||
"""
|
||||
CAUTION: unlike system ConnectEx(), this function returns 0 on success
|
||||
"""
|
||||
cdef int family, rc
|
||||
cdef myOVERLAPPED *ov
|
||||
cdef sockaddr_in ipv4_name
|
||||
cdef sockaddr_in6 ipv6_name
|
||||
cdef sockaddr *name
|
||||
cdef int namelen
|
||||
|
||||
if not have_connectex:
|
||||
raise ValueError, 'ConnectEx is not available on this system'
|
||||
|
||||
family = getAddrFamily(s)
|
||||
if family == AF_INET:
|
||||
name = <sockaddr *>&ipv4_name
|
||||
namelen = sizeof(ipv4_name)
|
||||
fillinetaddr(&ipv4_name, addr)
|
||||
elif family == AF_INET6:
|
||||
name = <sockaddr *>&ipv6_name
|
||||
namelen = sizeof(ipv6_name)
|
||||
fillinet6addr(&ipv6_name, addr)
|
||||
else:
|
||||
raise ValueError, 'unsupported address family'
|
||||
name.sa_family = family
|
||||
|
||||
ov = makeOV()
|
||||
if obj is not None:
|
||||
ov.obj = <PyObject *>obj
|
||||
|
||||
rc = lpConnectEx(s, name, namelen, NULL, 0, NULL, <OVERLAPPED *>ov)
|
||||
|
||||
if not rc:
|
||||
rc = WSAGetLastError()
|
||||
if rc != ERROR_IO_PENDING:
|
||||
PyMem_Free(ov)
|
||||
return rc
|
||||
|
||||
# operation is in progress
|
||||
Py_XINCREF(obj)
|
||||
return 0
|
||||
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,312 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
# HANDLE and SOCKET are pointer-sized (they are 64 bit wide in 64-bit builds)
|
||||
ctypedef size_t HANDLE
|
||||
ctypedef size_t SOCKET
|
||||
ctypedef unsigned long DWORD
|
||||
# it's really a pointer, but we use it as an integer
|
||||
ctypedef size_t ULONG_PTR
|
||||
ctypedef int BOOL
|
||||
|
||||
cdef extern from 'io.h':
|
||||
long _get_osfhandle(int filehandle)
|
||||
|
||||
cdef extern from 'errno.h':
|
||||
int errno
|
||||
enum:
|
||||
EBADF
|
||||
|
||||
cdef extern from 'winsock2.h':
|
||||
pass
|
||||
|
||||
cdef extern from 'ws2tcpip.h':
|
||||
pass
|
||||
|
||||
cdef extern from 'windows.h':
|
||||
ctypedef struct OVERLAPPED:
|
||||
pass
|
||||
HANDLE CreateIoCompletionPort(HANDLE fileHandle, HANDLE existing, ULONG_PTR key, DWORD numThreads)
|
||||
BOOL GetQueuedCompletionStatus(HANDLE port, DWORD *bytes, ULONG_PTR *key, OVERLAPPED **ov, DWORD timeout)
|
||||
BOOL PostQueuedCompletionStatus(HANDLE port, DWORD bytes, ULONG_PTR key, OVERLAPPED *ov)
|
||||
DWORD GetLastError()
|
||||
BOOL CloseHandle(HANDLE h)
|
||||
enum:
|
||||
INVALID_HANDLE_VALUE
|
||||
void DebugBreak()
|
||||
|
||||
cdef extern from 'python.h':
|
||||
struct PyObject:
|
||||
pass
|
||||
void *PyMem_Malloc(size_t n) except NULL
|
||||
void PyMem_Free(void *p)
|
||||
struct PyThreadState:
|
||||
pass
|
||||
PyThreadState *PyEval_SaveThread()
|
||||
void PyEval_RestoreThread(PyThreadState *tstate)
|
||||
void Py_INCREF(object o)
|
||||
void Py_XINCREF(object o)
|
||||
void Py_DECREF(object o)
|
||||
void Py_XDECREF(object o)
|
||||
int PyObject_AsWriteBuffer(object obj, void **buffer, Py_ssize_t *buffer_len) except -1
|
||||
int PyObject_AsReadBuffer(object obj, void **buffer, Py_ssize_t *buffer_len) except -1
|
||||
object PyString_FromString(char *v)
|
||||
object PyString_FromStringAndSize(char *v, Py_ssize_t len)
|
||||
object PyBuffer_New(Py_ssize_t size)
|
||||
char *PyString_AsString(object obj) except NULL
|
||||
object PySequence_Fast(object o, char *m)
|
||||
# object PySequence_Fast_GET_ITEM(object o, Py_ssize_t i)
|
||||
PyObject** PySequence_Fast_ITEMS(object o)
|
||||
PyObject* PySequence_ITEM(PyObject *o, Py_ssize_t i)
|
||||
Py_ssize_t PySequence_Fast_GET_SIZE(object o)
|
||||
|
||||
cdef extern from '':
|
||||
struct sockaddr:
|
||||
unsigned short int sa_family
|
||||
char sa_data[0]
|
||||
cdef struct in_addr:
|
||||
unsigned long s_addr
|
||||
struct sockaddr_in:
|
||||
int sin_port
|
||||
in_addr sin_addr
|
||||
cdef struct in6_addr:
|
||||
char s6_addr[16]
|
||||
struct sockaddr_in6:
|
||||
short int sin6_family
|
||||
unsigned short int sin6_port
|
||||
unsigned long int sin6_flowinfo
|
||||
in6_addr sin6_addr
|
||||
unsigned long int sin6_scope_id
|
||||
int getsockopt(SOCKET s, int level, int optname, char *optval, int *optlen)
|
||||
enum:
|
||||
SOL_SOCKET
|
||||
SO_PROTOCOL_INFO
|
||||
SOCKET_ERROR
|
||||
ERROR_IO_PENDING
|
||||
AF_INET
|
||||
AF_INET6
|
||||
INADDR_ANY
|
||||
ctypedef struct WSAPROTOCOL_INFO:
|
||||
int iMaxSockAddr
|
||||
int iAddressFamily
|
||||
int WSAGetLastError()
|
||||
char *inet_ntoa(in_addr ina)
|
||||
unsigned long inet_addr(char *cp)
|
||||
unsigned short ntohs(unsigned short netshort)
|
||||
unsigned short htons(unsigned short hostshort)
|
||||
ctypedef struct WSABUF:
|
||||
long len
|
||||
char *buf
|
||||
# cdef struct TRANSMIT_FILE_BUFFERS:
|
||||
# pass
|
||||
int WSARecv(SOCKET s, WSABUF *buffs, DWORD buffcount, DWORD *bytes, DWORD *flags, OVERLAPPED *ov, void *crud)
|
||||
int WSARecvFrom(SOCKET s, WSABUF *buffs, DWORD buffcount, DWORD *bytes, DWORD *flags, sockaddr *fromaddr, int *fromlen, OVERLAPPED *ov, void *crud)
|
||||
int WSASend(SOCKET s, WSABUF *buffs, DWORD buffcount, DWORD *bytes, DWORD flags, OVERLAPPED *ov, void *crud)
|
||||
int WSAAddressToStringA(sockaddr *lpsaAddress, DWORD dwAddressLength,
|
||||
WSAPROTOCOL_INFO *lpProtocolInfo,
|
||||
char *lpszAddressString,
|
||||
DWORD *lpdwAddressStringLength)
|
||||
int WSAStringToAddressA(char *AddressString, int AddressFamily,
|
||||
WSAPROTOCOL_INFO *lpProtocolInfo,
|
||||
sockaddr *lpAddress, int *lpAddressLength)
|
||||
|
||||
cdef extern from 'string.h':
|
||||
void *memset(void *s, int c, size_t n)
|
||||
|
||||
cdef extern from 'winsock_pointers.h':
|
||||
int initWinsockPointers()
|
||||
BOOL (*lpAcceptEx)(SOCKET listening, SOCKET accepting, void *buffer, DWORD recvlen, DWORD locallen, DWORD remotelen, DWORD *bytes, OVERLAPPED *ov)
|
||||
void (*lpGetAcceptExSockaddrs)(void *buffer, DWORD recvlen, DWORD locallen, DWORD remotelen, sockaddr **localaddr, int *locallen, sockaddr **remoteaddr, int *remotelen)
|
||||
BOOL (*lpConnectEx)(SOCKET s, sockaddr *name, int namelen, void *buff, DWORD sendlen, DWORD *sentlen, OVERLAPPED *ov)
|
||||
# BOOL (*lpTransmitFile)(SOCKET s, HANDLE hFile, DWORD size, DWORD buffer_size, OVERLAPPED *ov, TRANSMIT_FILE_BUFFERS *buff, DWORD flags)
|
||||
|
||||
cdef struct myOVERLAPPED:
|
||||
OVERLAPPED ov
|
||||
PyObject *obj
|
||||
|
||||
cdef myOVERLAPPED *makeOV() except NULL:
|
||||
cdef myOVERLAPPED *res
|
||||
res = <myOVERLAPPED *>PyMem_Malloc(sizeof(myOVERLAPPED))
|
||||
if not res:
|
||||
raise MemoryError
|
||||
memset(res, 0, sizeof(myOVERLAPPED))
|
||||
return res
|
||||
|
||||
cdef void raise_error(int err, object message) except *:
|
||||
if not err:
|
||||
err = GetLastError()
|
||||
raise WindowsError(message, err)
|
||||
|
||||
class Event:
|
||||
def __init__(self, callback, owner, **kw):
|
||||
self.callback = callback
|
||||
self.owner = owner
|
||||
for k, v in kw.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
cdef class CompletionPort:
|
||||
cdef HANDLE port
|
||||
def __init__(self):
|
||||
cdef HANDLE res
|
||||
res = CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, 0)
|
||||
if not res:
|
||||
raise_error(0, 'CreateIoCompletionPort')
|
||||
self.port = res
|
||||
|
||||
def addHandle(self, HANDLE handle, size_t key=0):
|
||||
cdef HANDLE res
|
||||
res = CreateIoCompletionPort(handle, self.port, key, 0)
|
||||
if not res:
|
||||
raise_error(0, 'CreateIoCompletionPort')
|
||||
|
||||
def getEvent(self, long timeout):
|
||||
cdef PyThreadState *_save
|
||||
cdef unsigned long bytes, rc
|
||||
cdef size_t key
|
||||
cdef myOVERLAPPED *ov
|
||||
|
||||
_save = PyEval_SaveThread()
|
||||
rc = GetQueuedCompletionStatus(self.port, &bytes, &key, <OVERLAPPED **>&ov, timeout)
|
||||
PyEval_RestoreThread(_save)
|
||||
|
||||
if not rc:
|
||||
rc = GetLastError()
|
||||
else:
|
||||
rc = 0
|
||||
|
||||
obj = None
|
||||
if ov:
|
||||
if ov.obj:
|
||||
obj = <object>ov.obj
|
||||
Py_DECREF(obj) # we are stealing a reference here
|
||||
PyMem_Free(ov)
|
||||
|
||||
return (rc, bytes, key, obj)
|
||||
|
||||
def postEvent(self, unsigned long bytes, size_t key, obj):
|
||||
cdef myOVERLAPPED *ov
|
||||
cdef unsigned long rc
|
||||
|
||||
if obj is not None:
|
||||
ov = makeOV()
|
||||
Py_INCREF(obj) # give ov its own reference to obj
|
||||
ov.obj = <PyObject *>obj
|
||||
else:
|
||||
ov = NULL
|
||||
|
||||
rc = PostQueuedCompletionStatus(self.port, bytes, key, <OVERLAPPED *>ov)
|
||||
if not rc:
|
||||
if ov:
|
||||
Py_DECREF(obj)
|
||||
PyMem_Free(ov)
|
||||
raise_error(0, 'PostQueuedCompletionStatus')
|
||||
|
||||
def __del__(self):
|
||||
CloseHandle(self.port)
|
||||
|
||||
def makesockaddr(object buff):
|
||||
cdef void *mem_buffer
|
||||
cdef Py_ssize_t size
|
||||
|
||||
PyObject_AsReadBuffer(buff, &mem_buffer, &size)
|
||||
# XXX: this should really return the address family as well
|
||||
return _makesockaddr(<sockaddr *>mem_buffer, size)
|
||||
|
||||
cdef object _makesockaddr(sockaddr *addr, Py_ssize_t len):
|
||||
cdef sockaddr_in *sin
|
||||
cdef sockaddr_in6 *sin6
|
||||
cdef char buff[256]
|
||||
cdef int rc
|
||||
cdef DWORD buff_size = sizeof(buff)
|
||||
if not len:
|
||||
return None
|
||||
if addr.sa_family == AF_INET:
|
||||
sin = <sockaddr_in *>addr
|
||||
return PyString_FromString(inet_ntoa(sin.sin_addr)), ntohs(sin.sin_port)
|
||||
elif addr.sa_family == AF_INET6:
|
||||
sin6 = <sockaddr_in6 *>addr
|
||||
rc = WSAAddressToStringA(addr, sizeof(sockaddr_in6), NULL, buff, &buff_size)
|
||||
if rc == SOCKET_ERROR:
|
||||
raise_error(0, 'WSAAddressToString')
|
||||
host, sa_port = PyString_FromString(buff), ntohs(sin6.sin6_port)
|
||||
host, port = host.rsplit(':', 1)
|
||||
port = int(port)
|
||||
assert host[0] == '['
|
||||
assert host[-1] == ']'
|
||||
assert port == sa_port
|
||||
return host[1:-1], port
|
||||
else:
|
||||
return PyString_FromStringAndSize(addr.sa_data, sizeof(addr.sa_data))
|
||||
|
||||
|
||||
cdef object fillinetaddr(sockaddr_in *dest, object addr):
|
||||
cdef unsigned short port
|
||||
cdef unsigned long res
|
||||
cdef char *hoststr
|
||||
host, port = addr
|
||||
|
||||
hoststr = PyString_AsString(host)
|
||||
res = inet_addr(hoststr)
|
||||
if res == INADDR_ANY:
|
||||
raise ValueError, 'invalid IP address'
|
||||
dest.sin_addr.s_addr = res
|
||||
|
||||
dest.sin_port = htons(port)
|
||||
|
||||
|
||||
cdef object fillinet6addr(sockaddr_in6 *dest, object addr):
|
||||
cdef unsigned short port
|
||||
cdef unsigned long res
|
||||
cdef char *hoststr
|
||||
cdef int addrlen = sizeof(sockaddr_in6)
|
||||
host, port, flow, scope = addr
|
||||
host = host.split("%")[0] # remove scope ID, if any
|
||||
|
||||
hoststr = PyString_AsString(host)
|
||||
cdef int parseresult = WSAStringToAddressA(hoststr, AF_INET6, NULL,
|
||||
<sockaddr *>dest, &addrlen)
|
||||
if parseresult == SOCKET_ERROR:
|
||||
raise ValueError, 'invalid IPv6 address %r' % (host,)
|
||||
if parseresult != 0:
|
||||
raise RuntimeError, 'undefined error occurred during address parsing'
|
||||
# sin6_host field was handled by WSAStringToAddress
|
||||
dest.sin6_port = htons(port)
|
||||
dest.sin6_flowinfo = flow
|
||||
dest.sin6_scope_id = scope
|
||||
|
||||
|
||||
def AllocateReadBuffer(int size):
|
||||
return PyBuffer_New(size)
|
||||
|
||||
def maxAddrLen(long s):
|
||||
cdef WSAPROTOCOL_INFO wsa_pi
|
||||
cdef int size, rc
|
||||
|
||||
size = sizeof(wsa_pi)
|
||||
rc = getsockopt(s, SOL_SOCKET, SO_PROTOCOL_INFO, <char *>&wsa_pi, &size)
|
||||
if rc == SOCKET_ERROR:
|
||||
raise_error(WSAGetLastError(), 'getsockopt')
|
||||
return wsa_pi.iMaxSockAddr
|
||||
|
||||
cdef int getAddrFamily(SOCKET s) except *:
|
||||
cdef WSAPROTOCOL_INFO wsa_pi
|
||||
cdef int size, rc
|
||||
|
||||
size = sizeof(wsa_pi)
|
||||
rc = getsockopt(s, SOL_SOCKET, SO_PROTOCOL_INFO, <char *>&wsa_pi, &size)
|
||||
if rc == SOCKET_ERROR:
|
||||
raise_error(WSAGetLastError(), 'getsockopt')
|
||||
return wsa_pi.iAddressFamily
|
||||
|
||||
import socket # for WSAStartup
|
||||
if not initWinsockPointers():
|
||||
raise ValueError, 'Failed to initialize Winsock function vectors'
|
||||
|
||||
have_connectex = (lpConnectEx != NULL)
|
||||
|
||||
include 'acceptex.pxi'
|
||||
include 'connectex.pxi'
|
||||
include 'wsarecv.pxi'
|
||||
include 'wsasend.pxi'
|
||||
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
/* Copyright (c) 2008 Twisted Matrix Laboratories.
|
||||
* See LICENSE for details.
|
||||
*/
|
||||
|
||||
|
||||
#include<winsock2.h>
|
||||
#include<assert.h>
|
||||
#include<stdio.h>
|
||||
#include<stdlib.h>
|
||||
|
||||
#ifndef WSAID_CONNECTEX
|
||||
#define WSAID_CONNECTEX {0x25a207b9,0xddf3,0x4660,{0x8e,0xe9,0x76,0xe5,0x8c,0x74,0x06,0x3e}}
|
||||
#endif
|
||||
#ifndef WSAID_GETACCEPTEXSOCKADDRS
|
||||
#define WSAID_GETACCEPTEXSOCKADDRS {0xb5367df2,0xcbac,0x11cf,{0x95,0xca,0x00,0x80,0x5f,0x48,0xa1,0x92}}
|
||||
#endif
|
||||
#ifndef WSAID_ACCEPTEX
|
||||
#define WSAID_ACCEPTEX {0xb5367df1,0xcbac,0x11cf,{0x95,0xca,0x00,0x80,0x5f,0x48,0xa1,0x92}}
|
||||
#endif
|
||||
/*#ifndef WSAID_TRANSMITFILE
|
||||
#define WSAID_TRANSMITFILE {0xb5367df0,0xcbac,0x11cf,{0x95,0xca,0x00,0x80,0x5f,0x48,0xa1,0x92}}
|
||||
#endif*/
|
||||
|
||||
|
||||
void *lpAcceptEx, *lpGetAcceptExSockaddrs, *lpConnectEx, *lpTransmitFile;
|
||||
|
||||
int initPointer(SOCKET s, void **fun, GUID guid) {
|
||||
int res;
|
||||
DWORD bytes;
|
||||
|
||||
*fun = NULL;
|
||||
res = WSAIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER,
|
||||
&guid, sizeof(guid),
|
||||
fun, sizeof(fun),
|
||||
&bytes, NULL, NULL);
|
||||
return !res;
|
||||
}
|
||||
|
||||
int initWinsockPointers() {
|
||||
SOCKET s = socket(AF_INET, SOCK_STREAM, 0);
|
||||
/* I hate C */
|
||||
GUID guid1 = WSAID_ACCEPTEX;
|
||||
GUID guid2 = WSAID_GETACCEPTEXSOCKADDRS;
|
||||
GUID guid3 = WSAID_CONNECTEX;
|
||||
/*GUID guid4 = WSAID_TRANSMITFILE;*/
|
||||
if (!s) {
|
||||
return 0;
|
||||
}
|
||||
if (!initPointer(s, &lpAcceptEx, guid1))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if (!initPointer(s, &lpGetAcceptExSockaddrs, guid2)) {
|
||||
return 0;
|
||||
}
|
||||
if (!initPointer(s, &lpConnectEx, guid3)) {
|
||||
return 0;
|
||||
};
|
||||
/*initPointer(s, &lpTransmitFile, guid4);*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
/* Copyright (c) 2008 Twisted Matrix Laboratories.
|
||||
* See LICENSE for details.
|
||||
*/
|
||||
|
||||
|
||||
#include<windows.h>
|
||||
|
||||
int initWinsockPointers();
|
||||
BOOL
|
||||
(PASCAL FAR * lpAcceptEx)(
|
||||
IN SOCKET sListenSocket,
|
||||
IN SOCKET sAcceptSocket,
|
||||
IN PVOID lpOutputBuffer,
|
||||
IN DWORD dwReceiveDataLength,
|
||||
IN DWORD dwLocalAddressLength,
|
||||
IN DWORD dwRemoteAddressLength,
|
||||
OUT LPDWORD lpdwBytesReceived,
|
||||
IN LPOVERLAPPED lpOverlapped
|
||||
);
|
||||
VOID
|
||||
(PASCAL FAR * lpGetAcceptExSockaddrs)(
|
||||
IN PVOID lpOutputBuffer,
|
||||
IN DWORD dwReceiveDataLength,
|
||||
IN DWORD dwLocalAddressLength,
|
||||
IN DWORD dwRemoteAddressLength,
|
||||
OUT struct sockaddr **LocalSockaddr,
|
||||
OUT LPINT LocalSockaddrLength,
|
||||
OUT struct sockaddr **RemoteSockaddr,
|
||||
OUT LPINT RemoteSockaddrLength
|
||||
);
|
||||
BOOL
|
||||
(PASCAL FAR * lpConnectEx) (
|
||||
IN SOCKET s,
|
||||
IN const struct sockaddr FAR *name,
|
||||
IN int namelen,
|
||||
IN PVOID lpSendBuffer OPTIONAL,
|
||||
IN DWORD dwSendDataLength,
|
||||
OUT LPDWORD lpdwBytesSent,
|
||||
IN LPOVERLAPPED lpOverlapped
|
||||
);
|
||||
/*BOOL
|
||||
(PASCAL FAR * lpTransmitFile)(
|
||||
IN SOCKET hSocket,
|
||||
IN HANDLE hFile,
|
||||
IN DWORD nNumberOfBytesToWrite,
|
||||
IN DWORD nNumberOfBytesPerSend,
|
||||
IN LPOVERLAPPED lpOverlapped,
|
||||
IN LPTRANSMIT_FILE_BUFFERS lpTransmitBuffers,
|
||||
IN DWORD dwReserved
|
||||
);*/
|
||||
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
def recv(long s, object bufflist, object obj, unsigned long flags = 0):
|
||||
cdef int rc, res
|
||||
cdef myOVERLAPPED *ov
|
||||
cdef WSABUF *ws_buf
|
||||
cdef unsigned long bytes
|
||||
cdef PyObject **buffers
|
||||
cdef Py_ssize_t i, size, buffcount
|
||||
|
||||
bufflist = PySequence_Fast(bufflist, 'second argument needs to be a list')
|
||||
buffcount = PySequence_Fast_GET_SIZE(bufflist)
|
||||
buffers = PySequence_Fast_ITEMS(bufflist)
|
||||
|
||||
ws_buf = <WSABUF *>PyMem_Malloc(buffcount*sizeof(WSABUF))
|
||||
|
||||
try:
|
||||
for i from 0 <= i < buffcount:
|
||||
PyObject_AsWriteBuffer(<object>buffers[i], <void **>&ws_buf[i].buf, &size)
|
||||
ws_buf[i].len = <DWORD>size
|
||||
|
||||
ov = makeOV()
|
||||
if obj is not None:
|
||||
ov.obj = <PyObject *>obj
|
||||
|
||||
rc = WSARecv(s, ws_buf, <DWORD>buffcount, &bytes, &flags, <OVERLAPPED *>ov, NULL)
|
||||
|
||||
if rc == SOCKET_ERROR:
|
||||
rc = WSAGetLastError()
|
||||
if rc != ERROR_IO_PENDING:
|
||||
PyMem_Free(ov)
|
||||
return rc, 0
|
||||
|
||||
Py_XINCREF(obj)
|
||||
return rc, bytes
|
||||
finally:
|
||||
PyMem_Free(ws_buf)
|
||||
|
||||
def recvfrom(long s, object buff, object addr_buff, object addr_len_buff, object obj, unsigned long flags = 0):
|
||||
cdef int rc, c_addr_buff_len, c_addr_len_buff_len
|
||||
cdef myOVERLAPPED *ov
|
||||
cdef WSABUF ws_buf
|
||||
cdef unsigned long bytes
|
||||
cdef sockaddr *c_addr_buff
|
||||
cdef int *c_addr_len_buff
|
||||
cdef Py_ssize_t size
|
||||
|
||||
PyObject_AsWriteBuffer(buff, <void **>&ws_buf.buf, &size)
|
||||
ws_buf.len = <DWORD>size
|
||||
PyObject_AsWriteBuffer(addr_buff, <void **>&c_addr_buff, &size)
|
||||
c_addr_buff_len = <int>size
|
||||
PyObject_AsWriteBuffer(addr_len_buff, <void **>&c_addr_len_buff, &size)
|
||||
c_addr_len_buff_len = <int>size
|
||||
|
||||
if c_addr_len_buff_len != sizeof(int):
|
||||
raise ValueError, 'length of address length buffer needs to be sizeof(int)'
|
||||
|
||||
c_addr_len_buff[0] = c_addr_buff_len
|
||||
|
||||
ov = makeOV()
|
||||
if obj is not None:
|
||||
ov.obj = <PyObject *>obj
|
||||
|
||||
rc = WSARecvFrom(s, &ws_buf, 1, &bytes, &flags, c_addr_buff, c_addr_len_buff, <OVERLAPPED *>ov, NULL)
|
||||
|
||||
if rc == SOCKET_ERROR:
|
||||
rc = WSAGetLastError()
|
||||
if rc != ERROR_IO_PENDING:
|
||||
PyMem_Free(ov)
|
||||
return rc, 0
|
||||
|
||||
Py_XINCREF(obj)
|
||||
return rc, bytes
|
||||
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
def send(long s, object buff, object obj, unsigned long flags = 0):
|
||||
cdef int rc
|
||||
cdef myOVERLAPPED *ov
|
||||
cdef WSABUF ws_buf
|
||||
cdef unsigned long bytes
|
||||
cdef Py_ssize_t size
|
||||
|
||||
PyObject_AsReadBuffer(buff, <void **>&ws_buf.buf, &size)
|
||||
ws_buf.len = <DWORD>size
|
||||
|
||||
ov = makeOV()
|
||||
if obj is not None:
|
||||
ov.obj = <PyObject *>obj
|
||||
|
||||
rc = WSASend(s, &ws_buf, 1, &bytes, flags, <OVERLAPPED *>ov, NULL)
|
||||
|
||||
if rc == SOCKET_ERROR:
|
||||
rc = WSAGetLastError()
|
||||
if rc != ERROR_IO_PENDING:
|
||||
PyMem_Free(ov)
|
||||
return rc, bytes
|
||||
|
||||
Py_XINCREF(obj)
|
||||
return rc, bytes
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
test specifically:
|
||||
failed accept error message -- similar to test_tcp_internals
|
||||
immediate success on accept/connect/recv, including Event.ignore
|
||||
parametrize iocpsupport somehow -- via reactor?
|
||||
|
||||
do:
|
||||
break handling -- WaitForSingleObject on the IOCP handle?
|
||||
iovecs for write buffer
|
||||
do not wait for a mainloop iteration if resumeProducing (in _handleWrite) does startWriting
|
||||
don't addActiveHandle in every call to startWriting/startReading
|
||||
iocpified process support
|
||||
win32er-in-a-thread (or run GQCS in a thread -- it can't receive SIGBREAK)
|
||||
blocking in sendto() -- I think Windows can do that, especially with local UDP
|
||||
|
||||
buildbot:
|
||||
run in vmware
|
||||
start from a persistent snapshot
|
||||
|
||||
use a stub inside the vm to svnup/run tests/collect stdio
|
||||
lift logs through SMB? or ship them via tcp beams to the VM host
|
||||
|
||||
have a timeout on the test run
|
||||
if we time out, take a screenshot, save it, kill the VM
|
||||
|
||||
|
|
@ -0,0 +1,273 @@
|
|||
# -*- test-case-name: twisted.internet.test.test_iocp -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Reactor that uses IO completion ports
|
||||
"""
|
||||
|
||||
import warnings, socket, sys
|
||||
|
||||
from zope.interface import implements
|
||||
|
||||
from twisted.internet import base, interfaces, main, error
|
||||
from twisted.python import log, failure
|
||||
from twisted.internet._dumbwin32proc import Process
|
||||
from twisted.internet.win32eventreactor import _ThreadedWin32EventsMixin
|
||||
|
||||
from twisted.internet.iocpreactor import iocpsupport as _iocp
|
||||
from twisted.internet.iocpreactor.const import WAIT_TIMEOUT
|
||||
from twisted.internet.iocpreactor import tcp, udp
|
||||
|
||||
try:
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
except ImportError:
|
||||
# Either pyOpenSSL isn't installed, or it is too old for this code to work.
|
||||
# The reactor won't provide IReactorSSL.
|
||||
TLSMemoryBIOFactory = None
|
||||
_extraInterfaces = ()
|
||||
warnings.warn(
|
||||
"pyOpenSSL 0.10 or newer is required for SSL support in iocpreactor. "
|
||||
"It is missing, so the reactor will not support SSL APIs.")
|
||||
else:
|
||||
_extraInterfaces = (interfaces.IReactorSSL,)
|
||||
|
||||
MAX_TIMEOUT = 2000 # 2 seconds, see doIteration for explanation
|
||||
|
||||
EVENTS_PER_LOOP = 1000 # XXX: what's a good value here?
|
||||
|
||||
# keys to associate with normal and waker events
|
||||
KEY_NORMAL, KEY_WAKEUP = range(2)
|
||||
|
||||
_NO_GETHANDLE = error.ConnectionFdescWentAway(
|
||||
'Handler has no getFileHandle method')
|
||||
_NO_FILEDESC = error.ConnectionFdescWentAway('Filedescriptor went away')
|
||||
|
||||
|
||||
|
||||
class IOCPReactor(base._SignalReactorMixin, base.ReactorBase,
|
||||
_ThreadedWin32EventsMixin):
|
||||
implements(interfaces.IReactorTCP, interfaces.IReactorUDP,
|
||||
interfaces.IReactorMulticast, interfaces.IReactorProcess,
|
||||
*_extraInterfaces)
|
||||
|
||||
port = None
|
||||
|
||||
def __init__(self):
|
||||
base.ReactorBase.__init__(self)
|
||||
self.port = _iocp.CompletionPort()
|
||||
self.handles = set()
|
||||
|
||||
|
||||
def addActiveHandle(self, handle):
|
||||
self.handles.add(handle)
|
||||
|
||||
|
||||
def removeActiveHandle(self, handle):
|
||||
self.handles.discard(handle)
|
||||
|
||||
|
||||
def doIteration(self, timeout):
|
||||
"""
|
||||
Poll the IO completion port for new events.
|
||||
"""
|
||||
# This function sits and waits for an IO completion event.
|
||||
#
|
||||
# There are two requirements: process IO events as soon as they arrive
|
||||
# and process ctrl-break from the user in a reasonable amount of time.
|
||||
#
|
||||
# There are three kinds of waiting.
|
||||
# 1) GetQueuedCompletionStatus (self.port.getEvent) to wait for IO
|
||||
# events only.
|
||||
# 2) Msg* family of wait functions that can stop waiting when
|
||||
# ctrl-break is detected (then, I think, Python converts it into a
|
||||
# KeyboardInterrupt)
|
||||
# 3) *Ex family of wait functions that put the thread into an
|
||||
# "alertable" wait state which is supposedly triggered by IO completion
|
||||
#
|
||||
# 2) and 3) can be combined. Trouble is, my IO completion is not
|
||||
# causing 3) to trigger, possibly because I do not use an IO completion
|
||||
# callback. Windows is weird.
|
||||
# There are two ways to handle this. I could use MsgWaitForSingleObject
|
||||
# here and GetQueuedCompletionStatus in a thread. Or I could poll with
|
||||
# a reasonable interval. Guess what! Threads are hard.
|
||||
|
||||
processed_events = 0
|
||||
if timeout is None:
|
||||
timeout = MAX_TIMEOUT
|
||||
else:
|
||||
timeout = min(MAX_TIMEOUT, int(1000*timeout))
|
||||
rc, bytes, key, evt = self.port.getEvent(timeout)
|
||||
while 1:
|
||||
if rc == WAIT_TIMEOUT:
|
||||
break
|
||||
if key != KEY_WAKEUP:
|
||||
assert key == KEY_NORMAL
|
||||
log.callWithLogger(evt.owner, self._callEventCallback,
|
||||
rc, bytes, evt)
|
||||
processed_events += 1
|
||||
if processed_events >= EVENTS_PER_LOOP:
|
||||
break
|
||||
rc, bytes, key, evt = self.port.getEvent(0)
|
||||
|
||||
|
||||
def _callEventCallback(self, rc, bytes, evt):
|
||||
owner = evt.owner
|
||||
why = None
|
||||
try:
|
||||
evt.callback(rc, bytes, evt)
|
||||
handfn = getattr(owner, 'getFileHandle', None)
|
||||
if not handfn:
|
||||
why = _NO_GETHANDLE
|
||||
elif handfn() == -1:
|
||||
why = _NO_FILEDESC
|
||||
if why:
|
||||
return # ignore handles that were closed
|
||||
except:
|
||||
why = sys.exc_info()[1]
|
||||
log.err()
|
||||
if why:
|
||||
owner.loseConnection(failure.Failure(why))
|
||||
|
||||
|
||||
def installWaker(self):
|
||||
pass
|
||||
|
||||
|
||||
def wakeUp(self):
|
||||
self.port.postEvent(0, KEY_WAKEUP, None)
|
||||
|
||||
|
||||
def registerHandle(self, handle):
|
||||
self.port.addHandle(handle, KEY_NORMAL)
|
||||
|
||||
|
||||
def createSocket(self, af, stype):
|
||||
skt = socket.socket(af, stype)
|
||||
self.registerHandle(skt.fileno())
|
||||
return skt
|
||||
|
||||
|
||||
def listenTCP(self, port, factory, backlog=50, interface=''):
|
||||
"""
|
||||
@see: twisted.internet.interfaces.IReactorTCP.listenTCP
|
||||
"""
|
||||
p = tcp.Port(port, factory, backlog, interface, self)
|
||||
p.startListening()
|
||||
return p
|
||||
|
||||
|
||||
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
|
||||
"""
|
||||
@see: twisted.internet.interfaces.IReactorTCP.connectTCP
|
||||
"""
|
||||
c = tcp.Connector(host, port, factory, timeout, bindAddress, self)
|
||||
c.connect()
|
||||
return c
|
||||
|
||||
|
||||
if TLSMemoryBIOFactory is not None:
|
||||
def listenSSL(self, port, factory, contextFactory, backlog=50, interface=''):
|
||||
"""
|
||||
@see: twisted.internet.interfaces.IReactorSSL.listenSSL
|
||||
"""
|
||||
port = self.listenTCP(
|
||||
port,
|
||||
TLSMemoryBIOFactory(contextFactory, False, factory),
|
||||
backlog, interface)
|
||||
port._type = 'TLS'
|
||||
return port
|
||||
|
||||
|
||||
def connectSSL(self, host, port, factory, contextFactory, timeout=30, bindAddress=None):
|
||||
"""
|
||||
@see: twisted.internet.interfaces.IReactorSSL.connectSSL
|
||||
"""
|
||||
return self.connectTCP(
|
||||
host, port,
|
||||
TLSMemoryBIOFactory(contextFactory, True, factory),
|
||||
timeout, bindAddress)
|
||||
else:
|
||||
def listenSSL(self, port, factory, contextFactory, backlog=50, interface=''):
|
||||
"""
|
||||
Non-implementation of L{IReactorSSL.listenSSL}. Some dependency
|
||||
is not satisfied. This implementation always raises
|
||||
L{NotImplementedError}.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"pyOpenSSL 0.10 or newer is required for SSL support in "
|
||||
"iocpreactor. It is missing, so the reactor does not support "
|
||||
"SSL APIs.")
|
||||
|
||||
|
||||
def connectSSL(self, host, port, factory, contextFactory, timeout=30, bindAddress=None):
|
||||
"""
|
||||
Non-implementation of L{IReactorSSL.connectSSL}. Some dependency
|
||||
is not satisfied. This implementation always raises
|
||||
L{NotImplementedError}.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"pyOpenSSL 0.10 or newer is required for SSL support in "
|
||||
"iocpreactor. It is missing, so the reactor does not support "
|
||||
"SSL APIs.")
|
||||
|
||||
|
||||
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
|
||||
"""
|
||||
Connects a given L{DatagramProtocol} to the given numeric UDP port.
|
||||
|
||||
@returns: object conforming to L{IListeningPort}.
|
||||
"""
|
||||
p = udp.Port(port, protocol, interface, maxPacketSize, self)
|
||||
p.startListening()
|
||||
return p
|
||||
|
||||
|
||||
def listenMulticast(self, port, protocol, interface='', maxPacketSize=8192,
|
||||
listenMultiple=False):
|
||||
"""
|
||||
Connects a given DatagramProtocol to the given numeric UDP port.
|
||||
|
||||
EXPERIMENTAL.
|
||||
|
||||
@returns: object conforming to IListeningPort.
|
||||
"""
|
||||
p = udp.MulticastPort(port, protocol, interface, maxPacketSize, self,
|
||||
listenMultiple)
|
||||
p.startListening()
|
||||
return p
|
||||
|
||||
|
||||
def spawnProcess(self, processProtocol, executable, args=(), env={},
|
||||
path=None, uid=None, gid=None, usePTY=0, childFDs=None):
|
||||
"""
|
||||
Spawn a process.
|
||||
"""
|
||||
if uid is not None:
|
||||
raise ValueError("Setting UID is unsupported on this platform.")
|
||||
if gid is not None:
|
||||
raise ValueError("Setting GID is unsupported on this platform.")
|
||||
if usePTY:
|
||||
raise ValueError("PTYs are unsupported on this platform.")
|
||||
if childFDs is not None:
|
||||
raise ValueError(
|
||||
"Custom child file descriptor mappings are unsupported on "
|
||||
"this platform.")
|
||||
args, env = self._checkProcessArgs(args, env)
|
||||
return Process(self, processProtocol, executable, args, env, path)
|
||||
|
||||
|
||||
def removeAll(self):
|
||||
res = list(self.handles)
|
||||
self.handles.clear()
|
||||
return res
|
||||
|
||||
|
||||
|
||||
def install():
|
||||
r = IOCPReactor()
|
||||
main.installReactor(r)
|
||||
|
||||
|
||||
__all__ = ['IOCPReactor', 'install']
|
||||
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
"""
|
||||
Distutils file for building low-level IOCP bindings from their Pyrex source
|
||||
"""
|
||||
|
||||
|
||||
from distutils.core import setup
|
||||
from distutils.extension import Extension
|
||||
from Cython.Distutils import build_ext
|
||||
|
||||
setup(name='iocpsupport',
|
||||
ext_modules=[Extension('iocpsupport',
|
||||
['iocpsupport/iocpsupport.pyx',
|
||||
'iocpsupport/winsock_pointers.c'],
|
||||
libraries = ['ws2_32'],
|
||||
)
|
||||
],
|
||||
cmdclass = {'build_ext': build_ext},
|
||||
)
|
||||
|
||||
|
|
@ -0,0 +1,578 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
TCP support for IOCP reactor
|
||||
"""
|
||||
|
||||
import socket, operator, errno, struct
|
||||
|
||||
from zope.interface import implements, classImplements
|
||||
|
||||
from twisted.internet import interfaces, error, address, main, defer
|
||||
from twisted.internet.abstract import _LogOwner, isIPAddress, isIPv6Address
|
||||
from twisted.internet.tcp import _SocketCloser, Connector as TCPConnector
|
||||
from twisted.internet.tcp import _AbortingMixin, _BaseBaseClient, _BaseTCPClient
|
||||
from twisted.python import log, failure, reflect, util
|
||||
|
||||
from twisted.internet.iocpreactor import iocpsupport as _iocp, abstract
|
||||
from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
|
||||
from twisted.internet.iocpreactor.const import ERROR_IO_PENDING
|
||||
from twisted.internet.iocpreactor.const import SO_UPDATE_CONNECT_CONTEXT
|
||||
from twisted.internet.iocpreactor.const import SO_UPDATE_ACCEPT_CONTEXT
|
||||
from twisted.internet.iocpreactor.const import ERROR_CONNECTION_REFUSED
|
||||
from twisted.internet.iocpreactor.const import ERROR_NETWORK_UNREACHABLE
|
||||
|
||||
try:
|
||||
from twisted.internet._newtls import startTLS as _startTLS
|
||||
except ImportError:
|
||||
_startTLS = None
|
||||
|
||||
# ConnectEx returns these. XXX: find out what it does for timeout
|
||||
connectExErrors = {
|
||||
ERROR_CONNECTION_REFUSED: errno.WSAECONNREFUSED,
|
||||
ERROR_NETWORK_UNREACHABLE: errno.WSAENETUNREACH,
|
||||
}
|
||||
|
||||
class Connection(abstract.FileHandle, _SocketCloser, _AbortingMixin):
|
||||
"""
|
||||
@ivar TLS: C{False} to indicate the connection is in normal TCP mode,
|
||||
C{True} to indicate that TLS has been started and that operations must
|
||||
be routed through the L{TLSMemoryBIOProtocol} instance.
|
||||
"""
|
||||
implements(IReadWriteHandle, interfaces.ITCPTransport,
|
||||
interfaces.ISystemHandle)
|
||||
|
||||
TLS = False
|
||||
|
||||
|
||||
def __init__(self, sock, proto, reactor=None):
|
||||
abstract.FileHandle.__init__(self, reactor)
|
||||
self.socket = sock
|
||||
self.getFileHandle = sock.fileno
|
||||
self.protocol = proto
|
||||
|
||||
|
||||
def getHandle(self):
|
||||
return self.socket
|
||||
|
||||
|
||||
def dataReceived(self, rbuffer):
|
||||
# XXX: some day, we'll have protocols that can handle raw buffers
|
||||
self.protocol.dataReceived(str(rbuffer))
|
||||
|
||||
|
||||
def readFromHandle(self, bufflist, evt):
|
||||
return _iocp.recv(self.getFileHandle(), bufflist, evt)
|
||||
|
||||
|
||||
def writeToHandle(self, buff, evt):
|
||||
"""
|
||||
Send C{buff} to current file handle using C{_iocp.send}. The buffer
|
||||
sent is limited to a size of C{self.SEND_LIMIT}.
|
||||
"""
|
||||
return _iocp.send(self.getFileHandle(),
|
||||
buffer(buff, 0, self.SEND_LIMIT), evt)
|
||||
|
||||
|
||||
def _closeWriteConnection(self):
|
||||
try:
|
||||
self.socket.shutdown(1)
|
||||
except socket.error:
|
||||
pass
|
||||
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
|
||||
if p:
|
||||
try:
|
||||
p.writeConnectionLost()
|
||||
except:
|
||||
f = failure.Failure()
|
||||
log.err()
|
||||
self.connectionLost(f)
|
||||
|
||||
|
||||
def readConnectionLost(self, reason):
|
||||
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
|
||||
if p:
|
||||
try:
|
||||
p.readConnectionLost()
|
||||
except:
|
||||
log.err()
|
||||
self.connectionLost(failure.Failure())
|
||||
else:
|
||||
self.connectionLost(reason)
|
||||
|
||||
|
||||
def connectionLost(self, reason):
|
||||
if self.disconnected:
|
||||
return
|
||||
abstract.FileHandle.connectionLost(self, reason)
|
||||
isClean = (reason is None or
|
||||
not reason.check(error.ConnectionAborted))
|
||||
self._closeSocket(isClean)
|
||||
protocol = self.protocol
|
||||
del self.protocol
|
||||
del self.socket
|
||||
del self.getFileHandle
|
||||
protocol.connectionLost(reason)
|
||||
|
||||
|
||||
def logPrefix(self):
|
||||
"""
|
||||
Return the prefix to log with when I own the logging thread.
|
||||
"""
|
||||
return self.logstr
|
||||
|
||||
|
||||
def getTcpNoDelay(self):
|
||||
return operator.truth(self.socket.getsockopt(socket.IPPROTO_TCP,
|
||||
socket.TCP_NODELAY))
|
||||
|
||||
|
||||
def setTcpNoDelay(self, enabled):
|
||||
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
|
||||
|
||||
|
||||
def getTcpKeepAlive(self):
|
||||
return operator.truth(self.socket.getsockopt(socket.SOL_SOCKET,
|
||||
socket.SO_KEEPALIVE))
|
||||
|
||||
|
||||
def setTcpKeepAlive(self, enabled):
|
||||
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
|
||||
|
||||
|
||||
if _startTLS is not None:
|
||||
def startTLS(self, contextFactory, normal=True):
|
||||
"""
|
||||
@see: L{ITLSTransport.startTLS}
|
||||
"""
|
||||
_startTLS(self, contextFactory, normal, abstract.FileHandle)
|
||||
|
||||
|
||||
def write(self, data):
|
||||
"""
|
||||
Write some data, either directly to the underlying handle or, if TLS
|
||||
has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and
|
||||
send.
|
||||
|
||||
@see: L{ITCPTransport.write}
|
||||
"""
|
||||
if self.disconnected:
|
||||
return
|
||||
if self.TLS:
|
||||
self.protocol.write(data)
|
||||
else:
|
||||
abstract.FileHandle.write(self, data)
|
||||
|
||||
|
||||
def writeSequence(self, iovec):
|
||||
"""
|
||||
Write some data, either directly to the underlying handle or, if TLS
|
||||
has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and
|
||||
send.
|
||||
|
||||
@see: L{ITCPTransport.writeSequence}
|
||||
"""
|
||||
if self.disconnected:
|
||||
return
|
||||
if self.TLS:
|
||||
self.protocol.writeSequence(iovec)
|
||||
else:
|
||||
abstract.FileHandle.writeSequence(self, iovec)
|
||||
|
||||
|
||||
def loseConnection(self, reason=None):
|
||||
"""
|
||||
Close the underlying handle or, if TLS has been started, first shut it
|
||||
down.
|
||||
|
||||
@see: L{ITCPTransport.loseConnection}
|
||||
"""
|
||||
if self.TLS:
|
||||
if self.connected and not self.disconnecting:
|
||||
self.protocol.loseConnection()
|
||||
else:
|
||||
abstract.FileHandle.loseConnection(self, reason)
|
||||
|
||||
|
||||
def registerProducer(self, producer, streaming):
|
||||
"""
|
||||
Register a producer.
|
||||
|
||||
If TLS is enabled, the TLS connection handles this.
|
||||
"""
|
||||
if self.TLS:
|
||||
# Registering a producer before we're connected shouldn't be a
|
||||
# problem. If we end up with a write(), that's already handled in
|
||||
# the write() code above, and there are no other potential
|
||||
# side-effects.
|
||||
self.protocol.registerProducer(producer, streaming)
|
||||
else:
|
||||
abstract.FileHandle.registerProducer(self, producer, streaming)
|
||||
|
||||
|
||||
def unregisterProducer(self):
|
||||
"""
|
||||
Unregister a producer.
|
||||
|
||||
If TLS is enabled, the TLS connection handles this.
|
||||
"""
|
||||
if self.TLS:
|
||||
self.protocol.unregisterProducer()
|
||||
else:
|
||||
abstract.FileHandle.unregisterProducer(self)
|
||||
|
||||
if _startTLS is not None:
|
||||
classImplements(Connection, interfaces.ITLSTransport)
|
||||
|
||||
|
||||
|
||||
class Client(_BaseBaseClient, _BaseTCPClient, Connection):
|
||||
"""
|
||||
@ivar _tlsClientDefault: Always C{True}, indicating that this is a client
|
||||
connection, and by default when TLS is negotiated this class will act as
|
||||
a TLS client.
|
||||
"""
|
||||
addressFamily = socket.AF_INET
|
||||
socketType = socket.SOCK_STREAM
|
||||
|
||||
_tlsClientDefault = True
|
||||
_commonConnection = Connection
|
||||
|
||||
def __init__(self, host, port, bindAddress, connector, reactor):
|
||||
# ConnectEx documentation says socket _has_ to be bound
|
||||
if bindAddress is None:
|
||||
bindAddress = ('', 0)
|
||||
self.reactor = reactor # createInternetSocket needs this
|
||||
_BaseTCPClient.__init__(self, host, port, bindAddress, connector,
|
||||
reactor)
|
||||
|
||||
|
||||
def createInternetSocket(self):
|
||||
"""
|
||||
Create a socket registered with the IOCP reactor.
|
||||
|
||||
@see: L{_BaseTCPClient}
|
||||
"""
|
||||
return self.reactor.createSocket(self.addressFamily, self.socketType)
|
||||
|
||||
|
||||
def _collectSocketDetails(self):
|
||||
"""
|
||||
Clean up potentially circular references to the socket and to its
|
||||
C{getFileHandle} method.
|
||||
|
||||
@see: L{_BaseBaseClient}
|
||||
"""
|
||||
del self.socket, self.getFileHandle
|
||||
|
||||
|
||||
def _stopReadingAndWriting(self):
|
||||
"""
|
||||
Remove the active handle from the reactor.
|
||||
|
||||
@see: L{_BaseBaseClient}
|
||||
"""
|
||||
self.reactor.removeActiveHandle(self)
|
||||
|
||||
|
||||
def cbConnect(self, rc, bytes, evt):
|
||||
if rc:
|
||||
rc = connectExErrors.get(rc, rc)
|
||||
self.failIfNotConnected(error.getConnectError((rc,
|
||||
errno.errorcode.get(rc, 'Unknown error'))))
|
||||
else:
|
||||
self.socket.setsockopt(
|
||||
socket.SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT,
|
||||
struct.pack('P', self.socket.fileno()))
|
||||
self.protocol = self.connector.buildProtocol(self.getPeer())
|
||||
self.connected = True
|
||||
logPrefix = self._getLogPrefix(self.protocol)
|
||||
self.logstr = logPrefix + ",client"
|
||||
self.protocol.makeConnection(self)
|
||||
self.startReading()
|
||||
|
||||
|
||||
def doConnect(self):
|
||||
if not hasattr(self, "connector"):
|
||||
# this happens if we connector.stopConnecting in
|
||||
# factory.startedConnecting
|
||||
return
|
||||
assert _iocp.have_connectex
|
||||
self.reactor.addActiveHandle(self)
|
||||
evt = _iocp.Event(self.cbConnect, self)
|
||||
|
||||
rc = _iocp.connect(self.socket.fileno(), self.realAddress, evt)
|
||||
if rc and rc != ERROR_IO_PENDING:
|
||||
self.cbConnect(rc, 0, evt)
|
||||
|
||||
|
||||
|
||||
class Server(Connection):
|
||||
"""
|
||||
Serverside socket-stream connection class.
|
||||
|
||||
I am a serverside network connection transport; a socket which came from an
|
||||
accept() on a server.
|
||||
|
||||
@ivar _tlsClientDefault: Always C{False}, indicating that this is a server
|
||||
connection, and by default when TLS is negotiated this class will act as
|
||||
a TLS server.
|
||||
"""
|
||||
|
||||
_tlsClientDefault = False
|
||||
|
||||
|
||||
def __init__(self, sock, protocol, clientAddr, serverAddr, sessionno, reactor):
|
||||
"""
|
||||
Server(sock, protocol, client, server, sessionno)
|
||||
|
||||
Initialize me with a socket, a protocol, a descriptor for my peer (a
|
||||
tuple of host, port describing the other end of the connection), an
|
||||
instance of Port, and a session number.
|
||||
"""
|
||||
Connection.__init__(self, sock, protocol, reactor)
|
||||
self.serverAddr = serverAddr
|
||||
self.clientAddr = clientAddr
|
||||
self.sessionno = sessionno
|
||||
logPrefix = self._getLogPrefix(self.protocol)
|
||||
self.logstr = "%s,%s,%s" % (logPrefix, sessionno, self.clientAddr.host)
|
||||
self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__,
|
||||
self.sessionno, self.serverAddr.port)
|
||||
self.connected = True
|
||||
self.startReading()
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
"""
|
||||
A string representation of this connection.
|
||||
"""
|
||||
return self.repstr
|
||||
|
||||
|
||||
def getHost(self):
|
||||
"""
|
||||
Returns an IPv4Address.
|
||||
|
||||
This indicates the server's address.
|
||||
"""
|
||||
return self.serverAddr
|
||||
|
||||
|
||||
def getPeer(self):
|
||||
"""
|
||||
Returns an IPv4Address.
|
||||
|
||||
This indicates the client's address.
|
||||
"""
|
||||
return self.clientAddr
|
||||
|
||||
|
||||
|
||||
class Connector(TCPConnector):
|
||||
def _makeTransport(self):
|
||||
return Client(self.host, self.port, self.bindAddress, self,
|
||||
self.reactor)
|
||||
|
||||
|
||||
|
||||
class Port(_SocketCloser, _LogOwner):
|
||||
implements(interfaces.IListeningPort)
|
||||
|
||||
connected = False
|
||||
disconnected = False
|
||||
disconnecting = False
|
||||
addressFamily = socket.AF_INET
|
||||
socketType = socket.SOCK_STREAM
|
||||
_addressType = address.IPv4Address
|
||||
sessionno = 0
|
||||
|
||||
# Actual port number being listened on, only set to a non-None
|
||||
# value when we are actually listening.
|
||||
_realPortNumber = None
|
||||
|
||||
# A string describing the connections which will be created by this port.
|
||||
# Normally this is C{"TCP"}, since this is a TCP port, but when the TLS
|
||||
# implementation re-uses this class it overrides the value with C{"TLS"}.
|
||||
# Only used for logging.
|
||||
_type = 'TCP'
|
||||
|
||||
def __init__(self, port, factory, backlog=50, interface='', reactor=None):
|
||||
self.port = port
|
||||
self.factory = factory
|
||||
self.backlog = backlog
|
||||
self.interface = interface
|
||||
self.reactor = reactor
|
||||
if isIPv6Address(interface):
|
||||
self.addressFamily = socket.AF_INET6
|
||||
self._addressType = address.IPv6Address
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
if self._realPortNumber is not None:
|
||||
return "<%s of %s on %s>" % (self.__class__,
|
||||
self.factory.__class__,
|
||||
self._realPortNumber)
|
||||
else:
|
||||
return "<%s of %s (not listening)>" % (self.__class__,
|
||||
self.factory.__class__)
|
||||
|
||||
|
||||
def startListening(self):
|
||||
try:
|
||||
skt = self.reactor.createSocket(self.addressFamily,
|
||||
self.socketType)
|
||||
# TODO: resolve self.interface if necessary
|
||||
if self.addressFamily == socket.AF_INET6:
|
||||
addr = socket.getaddrinfo(self.interface, self.port)[0][4]
|
||||
else:
|
||||
addr = (self.interface, self.port)
|
||||
skt.bind(addr)
|
||||
except socket.error, le:
|
||||
raise error.CannotListenError, (self.interface, self.port, le)
|
||||
|
||||
self.addrLen = _iocp.maxAddrLen(skt.fileno())
|
||||
|
||||
# Make sure that if we listened on port 0, we update that to
|
||||
# reflect what the OS actually assigned us.
|
||||
self._realPortNumber = skt.getsockname()[1]
|
||||
|
||||
log.msg("%s starting on %s" % (self._getLogPrefix(self.factory),
|
||||
self._realPortNumber))
|
||||
|
||||
self.factory.doStart()
|
||||
skt.listen(self.backlog)
|
||||
self.connected = True
|
||||
self.disconnected = False
|
||||
self.reactor.addActiveHandle(self)
|
||||
self.socket = skt
|
||||
self.getFileHandle = self.socket.fileno
|
||||
self.doAccept()
|
||||
|
||||
|
||||
def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)):
|
||||
"""
|
||||
Stop accepting connections on this port.
|
||||
|
||||
This will shut down my socket and call self.connectionLost().
|
||||
It returns a deferred which will fire successfully when the
|
||||
port is actually closed.
|
||||
"""
|
||||
self.disconnecting = True
|
||||
if self.connected:
|
||||
self.deferred = defer.Deferred()
|
||||
self.reactor.callLater(0, self.connectionLost, connDone)
|
||||
return self.deferred
|
||||
|
||||
stopListening = loseConnection
|
||||
|
||||
|
||||
def _logConnectionLostMsg(self):
|
||||
"""
|
||||
Log message for closing port
|
||||
"""
|
||||
log.msg('(%s Port %s Closed)' % (self._type, self._realPortNumber))
|
||||
|
||||
|
||||
def connectionLost(self, reason):
|
||||
"""
|
||||
Cleans up the socket.
|
||||
"""
|
||||
self._logConnectionLostMsg()
|
||||
self._realPortNumber = None
|
||||
d = None
|
||||
if hasattr(self, "deferred"):
|
||||
d = self.deferred
|
||||
del self.deferred
|
||||
|
||||
self.disconnected = True
|
||||
self.reactor.removeActiveHandle(self)
|
||||
self.connected = False
|
||||
self._closeSocket(True)
|
||||
del self.socket
|
||||
del self.getFileHandle
|
||||
|
||||
try:
|
||||
self.factory.doStop()
|
||||
except:
|
||||
self.disconnecting = False
|
||||
if d is not None:
|
||||
d.errback(failure.Failure())
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
self.disconnecting = False
|
||||
if d is not None:
|
||||
d.callback(None)
|
||||
|
||||
|
||||
def logPrefix(self):
|
||||
"""
|
||||
Returns the name of my class, to prefix log entries with.
|
||||
"""
|
||||
return reflect.qual(self.factory.__class__)
|
||||
|
||||
|
||||
def getHost(self):
|
||||
"""
|
||||
Returns an IPv4Address.
|
||||
|
||||
This indicates the server's address.
|
||||
"""
|
||||
host, port = self.socket.getsockname()[:2]
|
||||
return self._addressType('TCP', host, port)
|
||||
|
||||
|
||||
def cbAccept(self, rc, bytes, evt):
|
||||
self.handleAccept(rc, evt)
|
||||
if not (self.disconnecting or self.disconnected):
|
||||
self.doAccept()
|
||||
|
||||
|
||||
def handleAccept(self, rc, evt):
|
||||
if self.disconnecting or self.disconnected:
|
||||
return False
|
||||
|
||||
# possible errors:
|
||||
# (WSAEMFILE, WSAENOBUFS, WSAENFILE, WSAENOMEM, WSAECONNABORTED)
|
||||
if rc:
|
||||
log.msg("Could not accept new connection -- %s (%s)" %
|
||||
(errno.errorcode.get(rc, 'unknown error'), rc))
|
||||
return False
|
||||
else:
|
||||
evt.newskt.setsockopt(
|
||||
socket.SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
|
||||
struct.pack('P', self.socket.fileno()))
|
||||
family, lAddr, rAddr = _iocp.get_accept_addrs(evt.newskt.fileno(),
|
||||
evt.buff)
|
||||
assert family == self.addressFamily
|
||||
|
||||
protocol = self.factory.buildProtocol(
|
||||
self._addressType('TCP', rAddr[0], rAddr[1]))
|
||||
if protocol is None:
|
||||
evt.newskt.close()
|
||||
else:
|
||||
s = self.sessionno
|
||||
self.sessionno = s+1
|
||||
transport = Server(evt.newskt, protocol,
|
||||
self._addressType('TCP', rAddr[0], rAddr[1]),
|
||||
self._addressType('TCP', lAddr[0], lAddr[1]),
|
||||
s, self.reactor)
|
||||
protocol.makeConnection(transport)
|
||||
return True
|
||||
|
||||
|
||||
def doAccept(self):
|
||||
evt = _iocp.Event(self.cbAccept, self)
|
||||
|
||||
# see AcceptEx documentation
|
||||
evt.buff = buff = _iocp.AllocateReadBuffer(2 * (self.addrLen + 16))
|
||||
|
||||
evt.newskt = newskt = self.reactor.createSocket(self.addressFamily,
|
||||
self.socketType)
|
||||
rc = _iocp.accept(self.socket.fileno(), newskt.fileno(), buff, evt)
|
||||
|
||||
if rc and rc != ERROR_IO_PENDING:
|
||||
self.handleAccept(rc, evt)
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,433 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
UDP support for IOCP reactor
|
||||
"""
|
||||
|
||||
import socket, operator, struct, warnings, errno
|
||||
|
||||
from zope.interface import implements
|
||||
|
||||
from twisted.internet import defer, address, error, interfaces
|
||||
from twisted.internet.abstract import isIPAddress, isIPv6Address
|
||||
from twisted.python import log, failure
|
||||
|
||||
from twisted.internet.iocpreactor.const import ERROR_IO_PENDING
|
||||
from twisted.internet.iocpreactor.const import ERROR_CONNECTION_REFUSED
|
||||
from twisted.internet.iocpreactor.const import ERROR_PORT_UNREACHABLE
|
||||
from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
|
||||
from twisted.internet.iocpreactor import iocpsupport as _iocp, abstract
|
||||
|
||||
|
||||
|
||||
class Port(abstract.FileHandle):
|
||||
"""
|
||||
UDP port, listening for packets.
|
||||
|
||||
@ivar addressFamily: L{socket.AF_INET} or L{socket.AF_INET6}, depending on
|
||||
whether this port is listening on an IPv4 address or an IPv6 address.
|
||||
"""
|
||||
implements(
|
||||
IReadWriteHandle, interfaces.IListeningPort, interfaces.IUDPTransport,
|
||||
interfaces.ISystemHandle)
|
||||
|
||||
addressFamily = socket.AF_INET
|
||||
socketType = socket.SOCK_DGRAM
|
||||
dynamicReadBuffers = False
|
||||
|
||||
# Actual port number being listened on, only set to a non-None
|
||||
# value when we are actually listening.
|
||||
_realPortNumber = None
|
||||
|
||||
|
||||
def __init__(self, port, proto, interface='', maxPacketSize=8192,
|
||||
reactor=None):
|
||||
"""
|
||||
Initialize with a numeric port to listen on.
|
||||
"""
|
||||
self.port = port
|
||||
self.protocol = proto
|
||||
self.readBufferSize = maxPacketSize
|
||||
self.interface = interface
|
||||
self.setLogStr()
|
||||
self._connectedAddr = None
|
||||
self._setAddressFamily()
|
||||
|
||||
abstract.FileHandle.__init__(self, reactor)
|
||||
|
||||
skt = socket.socket(self.addressFamily, self.socketType)
|
||||
addrLen = _iocp.maxAddrLen(skt.fileno())
|
||||
self.addressBuffer = _iocp.AllocateReadBuffer(addrLen)
|
||||
# WSARecvFrom takes an int
|
||||
self.addressLengthBuffer = _iocp.AllocateReadBuffer(
|
||||
struct.calcsize('i'))
|
||||
|
||||
|
||||
def _setAddressFamily(self):
|
||||
"""
|
||||
Resolve address family for the socket.
|
||||
"""
|
||||
if isIPv6Address(self.interface):
|
||||
self.addressFamily = socket.AF_INET6
|
||||
elif isIPAddress(self.interface):
|
||||
self.addressFamily = socket.AF_INET
|
||||
elif self.interface:
|
||||
raise error.InvalidAddressError(
|
||||
self.interface, 'not an IPv4 or IPv6 address')
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
if self._realPortNumber is not None:
|
||||
return ("<%s on %s>" %
|
||||
(self.protocol.__class__, self._realPortNumber))
|
||||
else:
|
||||
return "<%s not connected>" % (self.protocol.__class__,)
|
||||
|
||||
|
||||
def getHandle(self):
|
||||
"""
|
||||
Return a socket object.
|
||||
"""
|
||||
return self.socket
|
||||
|
||||
|
||||
def startListening(self):
|
||||
"""
|
||||
Create and bind my socket, and begin listening on it.
|
||||
|
||||
This is called on unserialization, and must be called after creating a
|
||||
server to begin listening on the specified port.
|
||||
"""
|
||||
self._bindSocket()
|
||||
self._connectToProtocol()
|
||||
|
||||
|
||||
def createSocket(self):
|
||||
return self.reactor.createSocket(self.addressFamily, self.socketType)
|
||||
|
||||
|
||||
def _bindSocket(self):
|
||||
try:
|
||||
skt = self.createSocket()
|
||||
skt.bind((self.interface, self.port))
|
||||
except socket.error, le:
|
||||
raise error.CannotListenError(self.interface, self.port, le)
|
||||
|
||||
# Make sure that if we listened on port 0, we update that to
|
||||
# reflect what the OS actually assigned us.
|
||||
self._realPortNumber = skt.getsockname()[1]
|
||||
|
||||
log.msg("%s starting on %s" % (
|
||||
self._getLogPrefix(self.protocol), self._realPortNumber))
|
||||
|
||||
self.connected = True
|
||||
self.socket = skt
|
||||
self.getFileHandle = self.socket.fileno
|
||||
|
||||
|
||||
def _connectToProtocol(self):
|
||||
self.protocol.makeConnection(self)
|
||||
self.startReading()
|
||||
self.reactor.addActiveHandle(self)
|
||||
|
||||
|
||||
def cbRead(self, rc, bytes, evt):
|
||||
if self.reading:
|
||||
self.handleRead(rc, bytes, evt)
|
||||
self.doRead()
|
||||
|
||||
|
||||
def handleRead(self, rc, bytes, evt):
|
||||
if rc in (errno.WSAECONNREFUSED, errno.WSAECONNRESET,
|
||||
ERROR_CONNECTION_REFUSED, ERROR_PORT_UNREACHABLE):
|
||||
if self._connectedAddr:
|
||||
self.protocol.connectionRefused()
|
||||
elif rc:
|
||||
log.msg("error in recvfrom -- %s (%s)" %
|
||||
(errno.errorcode.get(rc, 'unknown error'), rc))
|
||||
else:
|
||||
try:
|
||||
self.protocol.datagramReceived(str(evt.buff[:bytes]),
|
||||
_iocp.makesockaddr(evt.addr_buff))
|
||||
except:
|
||||
log.err()
|
||||
|
||||
|
||||
def doRead(self):
|
||||
evt = _iocp.Event(self.cbRead, self)
|
||||
|
||||
evt.buff = buff = self._readBuffers[0]
|
||||
evt.addr_buff = addr_buff = self.addressBuffer
|
||||
evt.addr_len_buff = addr_len_buff = self.addressLengthBuffer
|
||||
rc, bytes = _iocp.recvfrom(self.getFileHandle(), buff,
|
||||
addr_buff, addr_len_buff, evt)
|
||||
|
||||
if rc and rc != ERROR_IO_PENDING:
|
||||
self.handleRead(rc, bytes, evt)
|
||||
|
||||
|
||||
def write(self, datagram, addr=None):
|
||||
"""
|
||||
Write a datagram.
|
||||
|
||||
@param addr: should be a tuple (ip, port), can be None in connected
|
||||
mode.
|
||||
"""
|
||||
if self._connectedAddr:
|
||||
assert addr in (None, self._connectedAddr)
|
||||
try:
|
||||
return self.socket.send(datagram)
|
||||
except socket.error, se:
|
||||
no = se.args[0]
|
||||
if no == errno.WSAEINTR:
|
||||
return self.write(datagram)
|
||||
elif no == errno.WSAEMSGSIZE:
|
||||
raise error.MessageLengthError("message too long")
|
||||
elif no in (errno.WSAECONNREFUSED, errno.WSAECONNRESET,
|
||||
ERROR_CONNECTION_REFUSED, ERROR_PORT_UNREACHABLE):
|
||||
self.protocol.connectionRefused()
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
assert addr != None
|
||||
if (not isIPAddress(addr[0]) and not isIPv6Address(addr[0])
|
||||
and addr[0] != "<broadcast>"):
|
||||
raise error.InvalidAddressError(
|
||||
addr[0],
|
||||
"write() only accepts IP addresses, not hostnames")
|
||||
if isIPAddress(addr[0]) and self.addressFamily == socket.AF_INET6:
|
||||
raise error.InvalidAddressError(
|
||||
addr[0], "IPv6 port write() called with IPv4 address")
|
||||
if isIPv6Address(addr[0]) and self.addressFamily == socket.AF_INET:
|
||||
raise error.InvalidAddressError(
|
||||
addr[0], "IPv4 port write() called with IPv6 address")
|
||||
try:
|
||||
return self.socket.sendto(datagram, addr)
|
||||
except socket.error, se:
|
||||
no = se.args[0]
|
||||
if no == errno.WSAEINTR:
|
||||
return self.write(datagram, addr)
|
||||
elif no == errno.WSAEMSGSIZE:
|
||||
raise error.MessageLengthError("message too long")
|
||||
elif no in (errno.WSAECONNREFUSED, errno.WSAECONNRESET,
|
||||
ERROR_CONNECTION_REFUSED, ERROR_PORT_UNREACHABLE):
|
||||
# in non-connected UDP ECONNREFUSED is platform dependent,
|
||||
# I think and the info is not necessarily useful.
|
||||
# Nevertheless maybe we should call connectionRefused? XXX
|
||||
return
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def writeSequence(self, seq, addr):
|
||||
self.write("".join(seq), addr)
|
||||
|
||||
|
||||
def connect(self, host, port):
|
||||
"""
|
||||
'Connect' to remote server.
|
||||
"""
|
||||
if self._connectedAddr:
|
||||
raise RuntimeError(
|
||||
"already connected, reconnecting is not currently supported "
|
||||
"(talk to itamar if you want this)")
|
||||
if not isIPAddress(host) and not isIPv6Address(host):
|
||||
raise error.InvalidAddressError(
|
||||
host, 'not an IPv4 or IPv6 address.')
|
||||
self._connectedAddr = (host, port)
|
||||
self.socket.connect((host, port))
|
||||
|
||||
|
||||
def _loseConnection(self):
|
||||
self.stopReading()
|
||||
self.reactor.removeActiveHandle(self)
|
||||
if self.connected: # actually means if we are *listening*
|
||||
self.reactor.callLater(0, self.connectionLost)
|
||||
|
||||
|
||||
def stopListening(self):
|
||||
if self.connected:
|
||||
result = self.d = defer.Deferred()
|
||||
else:
|
||||
result = None
|
||||
self._loseConnection()
|
||||
return result
|
||||
|
||||
|
||||
def loseConnection(self):
|
||||
warnings.warn("Please use stopListening() to disconnect port",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
self.stopListening()
|
||||
|
||||
|
||||
def connectionLost(self, reason=None):
|
||||
"""
|
||||
Cleans up my socket.
|
||||
"""
|
||||
log.msg('(UDP Port %s Closed)' % self._realPortNumber)
|
||||
self._realPortNumber = None
|
||||
abstract.FileHandle.connectionLost(self, reason)
|
||||
self.protocol.doStop()
|
||||
self.socket.close()
|
||||
del self.socket
|
||||
del self.getFileHandle
|
||||
if hasattr(self, "d"):
|
||||
self.d.callback(None)
|
||||
del self.d
|
||||
|
||||
|
||||
def setLogStr(self):
|
||||
"""
|
||||
Initialize the C{logstr} attribute to be used by C{logPrefix}.
|
||||
"""
|
||||
logPrefix = self._getLogPrefix(self.protocol)
|
||||
self.logstr = "%s (UDP)" % logPrefix
|
||||
|
||||
|
||||
def logPrefix(self):
|
||||
"""
|
||||
Returns the name of my class, to prefix log entries with.
|
||||
"""
|
||||
return self.logstr
|
||||
|
||||
|
||||
def getHost(self):
|
||||
"""
|
||||
Return the local address of the UDP connection
|
||||
|
||||
@returns: the local address of the UDP connection
|
||||
@rtype: L{IPv4Address} or L{IPv6Address}
|
||||
"""
|
||||
addr = self.socket.getsockname()
|
||||
if self.addressFamily == socket.AF_INET:
|
||||
return address.IPv4Address('UDP', *addr)
|
||||
elif self.addressFamily == socket.AF_INET6:
|
||||
return address.IPv6Address('UDP', *(addr[:2]))
|
||||
|
||||
|
||||
def setBroadcastAllowed(self, enabled):
|
||||
"""
|
||||
Set whether this port may broadcast. This is disabled by default.
|
||||
|
||||
@param enabled: Whether the port may broadcast.
|
||||
@type enabled: L{bool}
|
||||
"""
|
||||
self.socket.setsockopt(
|
||||
socket.SOL_SOCKET, socket.SO_BROADCAST, enabled)
|
||||
|
||||
|
||||
def getBroadcastAllowed(self):
|
||||
"""
|
||||
Checks if broadcast is currently allowed on this port.
|
||||
|
||||
@return: Whether this port may broadcast.
|
||||
@rtype: L{bool}
|
||||
"""
|
||||
return operator.truth(
|
||||
self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST))
|
||||
|
||||
|
||||
|
||||
class MulticastMixin:
|
||||
"""
|
||||
Implement multicast functionality.
|
||||
"""
|
||||
|
||||
|
||||
def getOutgoingInterface(self):
|
||||
i = self.socket.getsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF)
|
||||
return socket.inet_ntoa(struct.pack("@i", i))
|
||||
|
||||
|
||||
def setOutgoingInterface(self, addr):
|
||||
"""
|
||||
Returns Deferred of success.
|
||||
"""
|
||||
return self.reactor.resolve(addr).addCallback(self._setInterface)
|
||||
|
||||
|
||||
def _setInterface(self, addr):
|
||||
i = socket.inet_aton(addr)
|
||||
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, i)
|
||||
return 1
|
||||
|
||||
|
||||
def getLoopbackMode(self):
|
||||
return self.socket.getsockopt(socket.IPPROTO_IP,
|
||||
socket.IP_MULTICAST_LOOP)
|
||||
|
||||
|
||||
def setLoopbackMode(self, mode):
|
||||
mode = struct.pack("b", operator.truth(mode))
|
||||
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP,
|
||||
mode)
|
||||
|
||||
|
||||
def getTTL(self):
|
||||
return self.socket.getsockopt(socket.IPPROTO_IP,
|
||||
socket.IP_MULTICAST_TTL)
|
||||
|
||||
|
||||
def setTTL(self, ttl):
|
||||
ttl = struct.pack("B", ttl)
|
||||
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
|
||||
|
||||
|
||||
def joinGroup(self, addr, interface=""):
|
||||
"""
|
||||
Join a multicast group. Returns Deferred of success.
|
||||
"""
|
||||
return self.reactor.resolve(addr).addCallback(self._joinAddr1,
|
||||
interface, 1)
|
||||
|
||||
|
||||
def _joinAddr1(self, addr, interface, join):
|
||||
return self.reactor.resolve(interface).addCallback(self._joinAddr2,
|
||||
addr, join)
|
||||
|
||||
|
||||
def _joinAddr2(self, interface, addr, join):
|
||||
addr = socket.inet_aton(addr)
|
||||
interface = socket.inet_aton(interface)
|
||||
if join:
|
||||
cmd = socket.IP_ADD_MEMBERSHIP
|
||||
else:
|
||||
cmd = socket.IP_DROP_MEMBERSHIP
|
||||
try:
|
||||
self.socket.setsockopt(socket.IPPROTO_IP, cmd, addr + interface)
|
||||
except socket.error, e:
|
||||
return failure.Failure(error.MulticastJoinError(addr, interface,
|
||||
*e.args))
|
||||
|
||||
|
||||
def leaveGroup(self, addr, interface=""):
|
||||
"""
|
||||
Leave multicast group, return Deferred of success.
|
||||
"""
|
||||
return self.reactor.resolve(addr).addCallback(self._joinAddr1,
|
||||
interface, 0)
|
||||
|
||||
|
||||
|
||||
class MulticastPort(MulticastMixin, Port):
|
||||
"""
|
||||
UDP Port that supports multicasting.
|
||||
"""
|
||||
|
||||
implements(interfaces.IMulticastTransport)
|
||||
|
||||
|
||||
def __init__(self, port, proto, interface='', maxPacketSize=8192,
|
||||
reactor=None, listenMultiple=False):
|
||||
Port.__init__(self, port, proto, interface, maxPacketSize, reactor)
|
||||
self.listenMultiple = listenMultiple
|
||||
|
||||
|
||||
def createSocket(self):
|
||||
skt = Port.createSocket(self)
|
||||
if self.listenMultiple:
|
||||
skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
if hasattr(socket, "SO_REUSEPORT"):
|
||||
skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
||||
return skt
|
||||
|
|
@ -0,0 +1,303 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
A kqueue()/kevent() based implementation of the Twisted main loop.
|
||||
|
||||
To use this reactor, start your application specifying the kqueue reactor::
|
||||
|
||||
twistd --reactor kqueue ...
|
||||
|
||||
To install the event loop from code (and you should do this before any
|
||||
connections, listeners or connectors are added)::
|
||||
|
||||
from twisted.internet import kqreactor
|
||||
kqreactor.install()
|
||||
|
||||
This implementation depends on Python 2.6 or higher which has kqueue support
|
||||
built in the select module.
|
||||
|
||||
Note, that you should use Python 2.6.5 or higher, since previous implementations
|
||||
of select.kqueue had U{http://bugs.python.org/issue5910} not yet fixed.
|
||||
"""
|
||||
|
||||
import errno
|
||||
|
||||
from zope.interface import implements
|
||||
|
||||
from select import kqueue, kevent
|
||||
from select import KQ_FILTER_READ, KQ_FILTER_WRITE
|
||||
from select import KQ_EV_DELETE, KQ_EV_ADD, KQ_EV_EOF
|
||||
|
||||
from twisted.internet.interfaces import IReactorFDSet, IReactorDaemonize
|
||||
|
||||
from twisted.python import log, failure
|
||||
from twisted.internet import main, posixbase
|
||||
|
||||
|
||||
class KQueueReactor(posixbase.PosixReactorBase):
|
||||
"""
|
||||
A reactor that uses kqueue(2)/kevent(2) and relies on Python 2.6 or higher
|
||||
which has built in support for kqueue in the select module.
|
||||
|
||||
@ivar _kq: A L{kqueue} which will be used to check for I/O readiness.
|
||||
|
||||
@ivar _selectables: A dictionary mapping integer file descriptors to
|
||||
instances of L{FileDescriptor} which have been registered with the
|
||||
reactor. All L{FileDescriptors} which are currently receiving read or
|
||||
write readiness notifications will be present as values in this
|
||||
dictionary.
|
||||
|
||||
@ivar _reads: A set containing integer file descriptors. Values in this
|
||||
set will be registered with C{_kq} for read readiness notifications
|
||||
which will be dispatched to the corresponding L{FileDescriptor}
|
||||
instances in C{_selectables}.
|
||||
|
||||
@ivar _writes: A set containing integer file descriptors. Values in this
|
||||
set will be registered with C{_kq} for write readiness notifications
|
||||
which will be dispatched to the corresponding L{FileDescriptor}
|
||||
instances in C{_selectables}.
|
||||
"""
|
||||
implements(IReactorFDSet, IReactorDaemonize)
|
||||
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize kqueue object, file descriptor tracking dictionaries, and the
|
||||
base class.
|
||||
|
||||
See:
|
||||
- http://docs.python.org/library/select.html
|
||||
- www.freebsd.org/cgi/man.cgi?query=kqueue
|
||||
- people.freebsd.org/~jlemon/papers/kqueue.pdf
|
||||
"""
|
||||
self._kq = kqueue()
|
||||
self._reads = set()
|
||||
self._writes = set()
|
||||
self._selectables = {}
|
||||
posixbase.PosixReactorBase.__init__(self)
|
||||
|
||||
|
||||
def _updateRegistration(self, fd, filter, op):
|
||||
"""
|
||||
Private method for changing kqueue registration on a given FD
|
||||
filtering for events given filter/op. This will never block and
|
||||
returns nothing.
|
||||
"""
|
||||
self._kq.control([kevent(fd, filter, op)], 0, 0)
|
||||
|
||||
|
||||
def beforeDaemonize(self):
|
||||
"""
|
||||
Implement L{IReactorDaemonize.beforeDaemonize}.
|
||||
"""
|
||||
# Twisted-internal method called during daemonization (when application
|
||||
# is started via twistd). This is called right before the magic double
|
||||
# forking done for daemonization. We cleanly close the kqueue() and later
|
||||
# recreate it. This is needed since a) kqueue() are not inherited across
|
||||
# forks and b) twistd will create the reactor already before daemonization
|
||||
# (and will also add at least 1 reader to the reactor, an instance of
|
||||
# twisted.internet.posixbase._UnixWaker).
|
||||
#
|
||||
# See: twisted.scripts._twistd_unix.daemonize()
|
||||
self._kq.close()
|
||||
self._kq = None
|
||||
|
||||
|
||||
def afterDaemonize(self):
|
||||
"""
|
||||
Implement L{IReactorDaemonize.afterDaemonize}.
|
||||
"""
|
||||
# Twisted-internal method called during daemonization. This is called right
|
||||
# after daemonization and recreates the kqueue() and any readers/writers
|
||||
# that were added before. Note that you MUST NOT call any reactor methods
|
||||
# in between beforeDaemonize() and afterDaemonize()!
|
||||
self._kq = kqueue()
|
||||
for fd in self._reads:
|
||||
self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_ADD)
|
||||
for fd in self._writes:
|
||||
self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_ADD)
|
||||
|
||||
|
||||
def addReader(self, reader):
|
||||
"""
|
||||
Implement L{IReactorFDSet.addReader}.
|
||||
"""
|
||||
fd = reader.fileno()
|
||||
if fd not in self._reads:
|
||||
try:
|
||||
self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_ADD)
|
||||
except OSError:
|
||||
pass
|
||||
finally:
|
||||
self._selectables[fd] = reader
|
||||
self._reads.add(fd)
|
||||
|
||||
|
||||
def addWriter(self, writer):
|
||||
"""
|
||||
Implement L{IReactorFDSet.addWriter}.
|
||||
"""
|
||||
fd = writer.fileno()
|
||||
if fd not in self._writes:
|
||||
try:
|
||||
self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_ADD)
|
||||
except OSError:
|
||||
pass
|
||||
finally:
|
||||
self._selectables[fd] = writer
|
||||
self._writes.add(fd)
|
||||
|
||||
|
||||
def removeReader(self, reader):
|
||||
"""
|
||||
Implement L{IReactorFDSet.removeReader}.
|
||||
"""
|
||||
wasLost = False
|
||||
try:
|
||||
fd = reader.fileno()
|
||||
except:
|
||||
fd = -1
|
||||
if fd == -1:
|
||||
for fd, fdes in self._selectables.items():
|
||||
if reader is fdes:
|
||||
wasLost = True
|
||||
break
|
||||
else:
|
||||
return
|
||||
if fd in self._reads:
|
||||
self._reads.remove(fd)
|
||||
if fd not in self._writes:
|
||||
del self._selectables[fd]
|
||||
if not wasLost:
|
||||
try:
|
||||
self._updateRegistration(fd, KQ_FILTER_READ, KQ_EV_DELETE)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
def removeWriter(self, writer):
|
||||
"""
|
||||
Implement L{IReactorFDSet.removeWriter}.
|
||||
"""
|
||||
wasLost = False
|
||||
try:
|
||||
fd = writer.fileno()
|
||||
except:
|
||||
fd = -1
|
||||
if fd == -1:
|
||||
for fd, fdes in self._selectables.items():
|
||||
if writer is fdes:
|
||||
wasLost = True
|
||||
break
|
||||
else:
|
||||
return
|
||||
if fd in self._writes:
|
||||
self._writes.remove(fd)
|
||||
if fd not in self._reads:
|
||||
del self._selectables[fd]
|
||||
if not wasLost:
|
||||
try:
|
||||
self._updateRegistration(fd, KQ_FILTER_WRITE, KQ_EV_DELETE)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
def removeAll(self):
|
||||
"""
|
||||
Implement L{IReactorFDSet.removeAll}.
|
||||
"""
|
||||
return self._removeAll(
|
||||
[self._selectables[fd] for fd in self._reads],
|
||||
[self._selectables[fd] for fd in self._writes])
|
||||
|
||||
|
||||
def getReaders(self):
|
||||
"""
|
||||
Implement L{IReactorFDSet.getReaders}.
|
||||
"""
|
||||
return [self._selectables[fd] for fd in self._reads]
|
||||
|
||||
|
||||
def getWriters(self):
|
||||
"""
|
||||
Implement L{IReactorFDSet.getWriters}.
|
||||
"""
|
||||
return [self._selectables[fd] for fd in self._writes]
|
||||
|
||||
|
||||
def doKEvent(self, timeout):
|
||||
"""
|
||||
Poll the kqueue for new events.
|
||||
"""
|
||||
if timeout is None:
|
||||
timeout = 1
|
||||
|
||||
try:
|
||||
l = self._kq.control([], len(self._selectables), timeout)
|
||||
except OSError, e:
|
||||
if e[0] == errno.EINTR:
|
||||
return
|
||||
else:
|
||||
raise
|
||||
|
||||
_drdw = self._doWriteOrRead
|
||||
for event in l:
|
||||
fd = event.ident
|
||||
try:
|
||||
selectable = self._selectables[fd]
|
||||
except KeyError:
|
||||
# Handles the infrequent case where one selectable's
|
||||
# handler disconnects another.
|
||||
continue
|
||||
else:
|
||||
log.callWithLogger(selectable, _drdw, selectable, fd, event)
|
||||
|
||||
|
||||
def _doWriteOrRead(self, selectable, fd, event):
|
||||
"""
|
||||
Private method called when a FD is ready for reading, writing or was
|
||||
lost. Do the work and raise errors where necessary.
|
||||
"""
|
||||
why = None
|
||||
inRead = False
|
||||
(filter, flags, data, fflags) = (
|
||||
event.filter, event.flags, event.data, event.fflags)
|
||||
|
||||
if flags & KQ_EV_EOF and data and fflags:
|
||||
why = main.CONNECTION_LOST
|
||||
else:
|
||||
try:
|
||||
if selectable.fileno() == -1:
|
||||
inRead = False
|
||||
why = posixbase._NO_FILEDESC
|
||||
else:
|
||||
if filter == KQ_FILTER_READ:
|
||||
inRead = True
|
||||
why = selectable.doRead()
|
||||
if filter == KQ_FILTER_WRITE:
|
||||
inRead = False
|
||||
why = selectable.doWrite()
|
||||
except:
|
||||
# Any exception from application code gets logged and will
|
||||
# cause us to disconnect the selectable.
|
||||
why = failure.Failure()
|
||||
log.err(why, "An exception was raised from application code" \
|
||||
" while processing a reactor selectable")
|
||||
|
||||
if why:
|
||||
self._disconnectSelectable(selectable, why, inRead)
|
||||
|
||||
doIteration = doKEvent
|
||||
|
||||
|
||||
def install():
|
||||
"""
|
||||
Install the kqueue() reactor.
|
||||
"""
|
||||
p = KQueueReactor()
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(p)
|
||||
|
||||
|
||||
__all__ = ["KQueueReactor", "install"]
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
# -*- test-case-name: twisted.internet.test.test_main -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
"""
|
||||
Backwards compatibility, and utility functions.
|
||||
|
||||
In general, this module should not be used, other than by reactor authors
|
||||
who need to use the 'installReactor' method.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
from twisted.internet import error
|
||||
|
||||
CONNECTION_DONE = error.ConnectionDone('Connection done')
|
||||
CONNECTION_LOST = error.ConnectionLost('Connection lost')
|
||||
|
||||
|
||||
|
||||
def installReactor(reactor):
|
||||
"""
|
||||
Install reactor C{reactor}.
|
||||
|
||||
@param reactor: An object that provides one or more IReactor* interfaces.
|
||||
"""
|
||||
# this stuff should be common to all reactors.
|
||||
import twisted.internet
|
||||
import sys
|
||||
if 'twisted.internet.reactor' in sys.modules:
|
||||
raise error.ReactorAlreadyInstalledError("reactor already installed")
|
||||
twisted.internet.reactor = reactor
|
||||
sys.modules['twisted.internet.reactor'] = reactor
|
||||
|
||||
|
||||
__all__ = ["CONNECTION_LOST", "CONNECTION_DONE", "installReactor"]
|
||||
|
|
@ -0,0 +1,189 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
A poll() based implementation of the twisted main loop.
|
||||
|
||||
To install the event loop (and you should do this before any connections,
|
||||
listeners or connectors are added)::
|
||||
|
||||
from twisted.internet import pollreactor
|
||||
pollreactor.install()
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
# System imports
|
||||
import errno
|
||||
from select import error as SelectError, poll
|
||||
from select import POLLIN, POLLOUT, POLLHUP, POLLERR, POLLNVAL
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
# Twisted imports
|
||||
from twisted.python import log
|
||||
from twisted.internet import posixbase
|
||||
from twisted.internet.interfaces import IReactorFDSet
|
||||
|
||||
|
||||
|
||||
@implementer(IReactorFDSet)
|
||||
class PollReactor(posixbase.PosixReactorBase, posixbase._PollLikeMixin):
|
||||
"""
|
||||
A reactor that uses poll(2).
|
||||
|
||||
@ivar _poller: A L{poll} which will be used to check for I/O
|
||||
readiness.
|
||||
|
||||
@ivar _selectables: A dictionary mapping integer file descriptors to
|
||||
instances of L{FileDescriptor} which have been registered with the
|
||||
reactor. All L{FileDescriptors} which are currently receiving read or
|
||||
write readiness notifications will be present as values in this
|
||||
dictionary.
|
||||
|
||||
@ivar _reads: A dictionary mapping integer file descriptors to arbitrary
|
||||
values (this is essentially a set). Keys in this dictionary will be
|
||||
registered with C{_poller} for read readiness notifications which will
|
||||
be dispatched to the corresponding L{FileDescriptor} instances in
|
||||
C{_selectables}.
|
||||
|
||||
@ivar _writes: A dictionary mapping integer file descriptors to arbitrary
|
||||
values (this is essentially a set). Keys in this dictionary will be
|
||||
registered with C{_poller} for write readiness notifications which will
|
||||
be dispatched to the corresponding L{FileDescriptor} instances in
|
||||
C{_selectables}.
|
||||
"""
|
||||
|
||||
_POLL_DISCONNECTED = (POLLHUP | POLLERR | POLLNVAL)
|
||||
_POLL_IN = POLLIN
|
||||
_POLL_OUT = POLLOUT
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize polling object, file descriptor tracking dictionaries, and
|
||||
the base class.
|
||||
"""
|
||||
self._poller = poll()
|
||||
self._selectables = {}
|
||||
self._reads = {}
|
||||
self._writes = {}
|
||||
posixbase.PosixReactorBase.__init__(self)
|
||||
|
||||
|
||||
def _updateRegistration(self, fd):
|
||||
"""Register/unregister an fd with the poller."""
|
||||
try:
|
||||
self._poller.unregister(fd)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
mask = 0
|
||||
if fd in self._reads:
|
||||
mask = mask | POLLIN
|
||||
if fd in self._writes:
|
||||
mask = mask | POLLOUT
|
||||
if mask != 0:
|
||||
self._poller.register(fd, mask)
|
||||
else:
|
||||
if fd in self._selectables:
|
||||
del self._selectables[fd]
|
||||
|
||||
def _dictRemove(self, selectable, mdict):
|
||||
try:
|
||||
# the easy way
|
||||
fd = selectable.fileno()
|
||||
# make sure the fd is actually real. In some situations we can get
|
||||
# -1 here.
|
||||
mdict[fd]
|
||||
except:
|
||||
# the hard way: necessary because fileno() may disappear at any
|
||||
# moment, thanks to python's underlying sockets impl
|
||||
for fd, fdes in self._selectables.items():
|
||||
if selectable is fdes:
|
||||
break
|
||||
else:
|
||||
# Hmm, maybe not the right course of action? This method can't
|
||||
# fail, because it happens inside error detection...
|
||||
return
|
||||
if fd in mdict:
|
||||
del mdict[fd]
|
||||
self._updateRegistration(fd)
|
||||
|
||||
def addReader(self, reader):
|
||||
"""Add a FileDescriptor for notification of data available to read.
|
||||
"""
|
||||
fd = reader.fileno()
|
||||
if fd not in self._reads:
|
||||
self._selectables[fd] = reader
|
||||
self._reads[fd] = 1
|
||||
self._updateRegistration(fd)
|
||||
|
||||
def addWriter(self, writer):
|
||||
"""Add a FileDescriptor for notification of data available to write.
|
||||
"""
|
||||
fd = writer.fileno()
|
||||
if fd not in self._writes:
|
||||
self._selectables[fd] = writer
|
||||
self._writes[fd] = 1
|
||||
self._updateRegistration(fd)
|
||||
|
||||
def removeReader(self, reader):
|
||||
"""Remove a Selectable for notification of data available to read.
|
||||
"""
|
||||
return self._dictRemove(reader, self._reads)
|
||||
|
||||
def removeWriter(self, writer):
|
||||
"""Remove a Selectable for notification of data available to write.
|
||||
"""
|
||||
return self._dictRemove(writer, self._writes)
|
||||
|
||||
def removeAll(self):
|
||||
"""
|
||||
Remove all selectables, and return a list of them.
|
||||
"""
|
||||
return self._removeAll(
|
||||
[self._selectables[fd] for fd in self._reads],
|
||||
[self._selectables[fd] for fd in self._writes])
|
||||
|
||||
|
||||
def doPoll(self, timeout):
|
||||
"""Poll the poller for new events."""
|
||||
if timeout is not None:
|
||||
timeout = int(timeout * 1000) # convert seconds to milliseconds
|
||||
|
||||
try:
|
||||
l = self._poller.poll(timeout)
|
||||
except SelectError as e:
|
||||
if e.args[0] == errno.EINTR:
|
||||
return
|
||||
else:
|
||||
raise
|
||||
_drdw = self._doReadOrWrite
|
||||
for fd, event in l:
|
||||
try:
|
||||
selectable = self._selectables[fd]
|
||||
except KeyError:
|
||||
# Handles the infrequent case where one selectable's
|
||||
# handler disconnects another.
|
||||
continue
|
||||
log.callWithLogger(selectable, _drdw, selectable, fd, event)
|
||||
|
||||
doIteration = doPoll
|
||||
|
||||
def getReaders(self):
|
||||
return [self._selectables[fd] for fd in self._reads]
|
||||
|
||||
|
||||
def getWriters(self):
|
||||
return [self._selectables[fd] for fd in self._writes]
|
||||
|
||||
|
||||
|
||||
def install():
|
||||
"""Install the poll() reactor."""
|
||||
p = PollReactor()
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(p)
|
||||
|
||||
|
||||
__all__ = ["PollReactor", "install"]
|
||||
|
|
@ -0,0 +1,640 @@
|
|||
# -*- test-case-name: twisted.test.test_internet,twisted.internet.test.test_posixbase -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Posix reactor base class
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import socket
|
||||
import errno
|
||||
import os
|
||||
import sys
|
||||
|
||||
from zope.interface import implementer, classImplements
|
||||
|
||||
from twisted.python.compat import _PY3
|
||||
from twisted.internet.interfaces import IReactorUNIX, IReactorUNIXDatagram
|
||||
from twisted.internet.interfaces import (
|
||||
IReactorTCP, IReactorUDP, IReactorSSL, IReactorSocket)
|
||||
from twisted.internet.interfaces import IReactorProcess, IReactorMulticast
|
||||
from twisted.internet.interfaces import IHalfCloseableDescriptor
|
||||
from twisted.internet import error, udp, tcp
|
||||
|
||||
from twisted.python import log, failure, util
|
||||
from twisted.python.runtime import platformType, platform
|
||||
|
||||
from twisted.internet.base import ReactorBase, _SignalReactorMixin
|
||||
from twisted.internet.main import CONNECTION_DONE, CONNECTION_LOST
|
||||
|
||||
# Exceptions that doSelect might return frequently
|
||||
_NO_FILENO = error.ConnectionFdescWentAway('Handler has no fileno method')
|
||||
_NO_FILEDESC = error.ConnectionFdescWentAway('File descriptor lost')
|
||||
|
||||
|
||||
try:
|
||||
from twisted.protocols import tls
|
||||
except ImportError:
|
||||
tls = None
|
||||
try:
|
||||
from twisted.internet import ssl
|
||||
except ImportError:
|
||||
ssl = None
|
||||
|
||||
unixEnabled = (platformType == 'posix')
|
||||
|
||||
processEnabled = False
|
||||
if unixEnabled:
|
||||
from twisted.internet import fdesc
|
||||
# Enable on Python 3 in ticket #5987:
|
||||
if not _PY3:
|
||||
from twisted.internet import process, _signals
|
||||
processEnabled = True
|
||||
|
||||
|
||||
if platform.isWindows():
|
||||
try:
|
||||
import win32process
|
||||
processEnabled = True
|
||||
except ImportError:
|
||||
win32process = None
|
||||
|
||||
|
||||
class _SocketWaker(log.Logger):
|
||||
"""
|
||||
The I{self-pipe trick<http://cr.yp.to/docs/selfpipe.html>}, implemented
|
||||
using a pair of sockets rather than pipes (due to the lack of support in
|
||||
select() on Windows for pipes), used to wake up the main loop from
|
||||
another thread.
|
||||
"""
|
||||
disconnected = 0
|
||||
|
||||
def __init__(self, reactor):
|
||||
"""Initialize.
|
||||
"""
|
||||
self.reactor = reactor
|
||||
# Following select_trigger (from asyncore)'s example;
|
||||
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||
server.bind(('127.0.0.1', 0))
|
||||
server.listen(1)
|
||||
client.connect(server.getsockname())
|
||||
reader, clientaddr = server.accept()
|
||||
client.setblocking(0)
|
||||
reader.setblocking(0)
|
||||
self.r = reader
|
||||
self.w = client
|
||||
self.fileno = self.r.fileno
|
||||
|
||||
def wakeUp(self):
|
||||
"""Send a byte to my connection.
|
||||
"""
|
||||
try:
|
||||
util.untilConcludes(self.w.send, b'x')
|
||||
except socket.error as e:
|
||||
if e.args[0] != errno.WSAEWOULDBLOCK:
|
||||
raise
|
||||
|
||||
def doRead(self):
|
||||
"""Read some data from my connection.
|
||||
"""
|
||||
try:
|
||||
self.r.recv(8192)
|
||||
except socket.error:
|
||||
pass
|
||||
|
||||
def connectionLost(self, reason):
|
||||
self.r.close()
|
||||
self.w.close()
|
||||
|
||||
|
||||
|
||||
class _FDWaker(log.Logger, object):
|
||||
"""
|
||||
The I{self-pipe trick<http://cr.yp.to/docs/selfpipe.html>}, used to wake
|
||||
up the main loop from another thread or a signal handler.
|
||||
|
||||
L{_FDWaker} is a base class for waker implementations based on
|
||||
writing to a pipe being monitored by the reactor.
|
||||
|
||||
@ivar o: The file descriptor for the end of the pipe which can be
|
||||
written to to wake up a reactor monitoring this waker.
|
||||
|
||||
@ivar i: The file descriptor which should be monitored in order to
|
||||
be awoken by this waker.
|
||||
"""
|
||||
disconnected = 0
|
||||
|
||||
i = None
|
||||
o = None
|
||||
|
||||
def __init__(self, reactor):
|
||||
"""Initialize.
|
||||
"""
|
||||
self.reactor = reactor
|
||||
self.i, self.o = os.pipe()
|
||||
fdesc.setNonBlocking(self.i)
|
||||
fdesc._setCloseOnExec(self.i)
|
||||
fdesc.setNonBlocking(self.o)
|
||||
fdesc._setCloseOnExec(self.o)
|
||||
self.fileno = lambda: self.i
|
||||
|
||||
|
||||
def doRead(self):
|
||||
"""
|
||||
Read some bytes from the pipe and discard them.
|
||||
"""
|
||||
fdesc.readFromFD(self.fileno(), lambda data: None)
|
||||
|
||||
|
||||
def connectionLost(self, reason):
|
||||
"""Close both ends of my pipe.
|
||||
"""
|
||||
if not hasattr(self, "o"):
|
||||
return
|
||||
for fd in self.i, self.o:
|
||||
try:
|
||||
os.close(fd)
|
||||
except IOError:
|
||||
pass
|
||||
del self.i, self.o
|
||||
|
||||
|
||||
|
||||
class _UnixWaker(_FDWaker):
|
||||
"""
|
||||
This class provides a simple interface to wake up the event loop.
|
||||
|
||||
This is used by threads or signals to wake up the event loop.
|
||||
"""
|
||||
|
||||
def wakeUp(self):
|
||||
"""Write one byte to the pipe, and flush it.
|
||||
"""
|
||||
# We don't use fdesc.writeToFD since we need to distinguish
|
||||
# between EINTR (try again) and EAGAIN (do nothing).
|
||||
if self.o is not None:
|
||||
try:
|
||||
util.untilConcludes(os.write, self.o, b'x')
|
||||
except OSError as e:
|
||||
# XXX There is no unit test for raising the exception
|
||||
# for other errnos. See #4285.
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
|
||||
|
||||
|
||||
if platformType == 'posix':
|
||||
_Waker = _UnixWaker
|
||||
else:
|
||||
# Primarily Windows and Jython.
|
||||
_Waker = _SocketWaker
|
||||
|
||||
|
||||
class _SIGCHLDWaker(_FDWaker):
|
||||
"""
|
||||
L{_SIGCHLDWaker} can wake up a reactor whenever C{SIGCHLD} is
|
||||
received.
|
||||
|
||||
@see: L{twisted.internet._signals}
|
||||
"""
|
||||
def __init__(self, reactor):
|
||||
_FDWaker.__init__(self, reactor)
|
||||
|
||||
|
||||
def install(self):
|
||||
"""
|
||||
Install the handler necessary to make this waker active.
|
||||
"""
|
||||
_signals.installHandler(self.o)
|
||||
|
||||
|
||||
def uninstall(self):
|
||||
"""
|
||||
Remove the handler which makes this waker active.
|
||||
"""
|
||||
_signals.installHandler(-1)
|
||||
|
||||
|
||||
def doRead(self):
|
||||
"""
|
||||
Having woken up the reactor in response to receipt of
|
||||
C{SIGCHLD}, reap the process which exited.
|
||||
|
||||
This is called whenever the reactor notices the waker pipe is
|
||||
writeable, which happens soon after any call to the C{wakeUp}
|
||||
method.
|
||||
"""
|
||||
_FDWaker.doRead(self)
|
||||
process.reapAllProcesses()
|
||||
|
||||
|
||||
|
||||
|
||||
class _DisconnectSelectableMixin(object):
|
||||
"""
|
||||
Mixin providing the C{_disconnectSelectable} method.
|
||||
"""
|
||||
|
||||
def _disconnectSelectable(self, selectable, why, isRead, faildict={
|
||||
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
|
||||
error.ConnectionLost: failure.Failure(error.ConnectionLost())
|
||||
}):
|
||||
"""
|
||||
Utility function for disconnecting a selectable.
|
||||
|
||||
Supports half-close notification, isRead should be boolean indicating
|
||||
whether error resulted from doRead().
|
||||
"""
|
||||
self.removeReader(selectable)
|
||||
f = faildict.get(why.__class__)
|
||||
if f:
|
||||
if (isRead and why.__class__ == error.ConnectionDone
|
||||
and IHalfCloseableDescriptor.providedBy(selectable)):
|
||||
selectable.readConnectionLost(f)
|
||||
else:
|
||||
self.removeWriter(selectable)
|
||||
selectable.connectionLost(f)
|
||||
else:
|
||||
self.removeWriter(selectable)
|
||||
selectable.connectionLost(failure.Failure(why))
|
||||
|
||||
|
||||
|
||||
@implementer(IReactorTCP, IReactorUDP, IReactorMulticast)
|
||||
class PosixReactorBase(_SignalReactorMixin, _DisconnectSelectableMixin,
|
||||
ReactorBase):
|
||||
"""
|
||||
A basis for reactors that use file descriptors.
|
||||
|
||||
@ivar _childWaker: C{None} or a reference to the L{_SIGCHLDWaker}
|
||||
which is used to properly notice child process termination.
|
||||
"""
|
||||
|
||||
# Callable that creates a waker, overrideable so that subclasses can
|
||||
# substitute their own implementation:
|
||||
_wakerFactory = _Waker
|
||||
|
||||
def installWaker(self):
|
||||
"""
|
||||
Install a `waker' to allow threads and signals to wake up the IO thread.
|
||||
|
||||
We use the self-pipe trick (http://cr.yp.to/docs/selfpipe.html) to wake
|
||||
the reactor. On Windows we use a pair of sockets.
|
||||
"""
|
||||
if not self.waker:
|
||||
self.waker = self._wakerFactory(self)
|
||||
self._internalReaders.add(self.waker)
|
||||
self.addReader(self.waker)
|
||||
|
||||
|
||||
_childWaker = None
|
||||
def _handleSignals(self):
|
||||
"""
|
||||
Extend the basic signal handling logic to also support
|
||||
handling SIGCHLD to know when to try to reap child processes.
|
||||
"""
|
||||
_SignalReactorMixin._handleSignals(self)
|
||||
if platformType == 'posix' and processEnabled:
|
||||
if not self._childWaker:
|
||||
self._childWaker = _SIGCHLDWaker(self)
|
||||
self._internalReaders.add(self._childWaker)
|
||||
self.addReader(self._childWaker)
|
||||
self._childWaker.install()
|
||||
# Also reap all processes right now, in case we missed any
|
||||
# signals before we installed the SIGCHLD waker/handler.
|
||||
# This should only happen if someone used spawnProcess
|
||||
# before calling reactor.run (and the process also exited
|
||||
# already).
|
||||
process.reapAllProcesses()
|
||||
|
||||
def _uninstallHandler(self):
|
||||
"""
|
||||
If a child waker was created and installed, uninstall it now.
|
||||
|
||||
Since this disables reactor functionality and is only called
|
||||
when the reactor is stopping, it doesn't provide any directly
|
||||
useful functionality, but the cleanup of reactor-related
|
||||
process-global state that it does helps in unit tests
|
||||
involving multiple reactors and is generally just a nice
|
||||
thing.
|
||||
"""
|
||||
# XXX This would probably be an alright place to put all of
|
||||
# the cleanup code for all internal readers (here and in the
|
||||
# base class, anyway). See #3063 for that cleanup task.
|
||||
if self._childWaker:
|
||||
self._childWaker.uninstall()
|
||||
|
||||
# IReactorProcess
|
||||
|
||||
def spawnProcess(self, processProtocol, executable, args=(),
|
||||
env={}, path=None,
|
||||
uid=None, gid=None, usePTY=0, childFDs=None):
|
||||
args, env = self._checkProcessArgs(args, env)
|
||||
if platformType == 'posix':
|
||||
if usePTY:
|
||||
if childFDs is not None:
|
||||
raise ValueError("Using childFDs is not supported with usePTY=True.")
|
||||
return process.PTYProcess(self, executable, args, env, path,
|
||||
processProtocol, uid, gid, usePTY)
|
||||
else:
|
||||
return process.Process(self, executable, args, env, path,
|
||||
processProtocol, uid, gid, childFDs)
|
||||
elif platformType == "win32":
|
||||
if uid is not None:
|
||||
raise ValueError("Setting UID is unsupported on this platform.")
|
||||
if gid is not None:
|
||||
raise ValueError("Setting GID is unsupported on this platform.")
|
||||
if usePTY:
|
||||
raise ValueError("The usePTY parameter is not supported on Windows.")
|
||||
if childFDs:
|
||||
raise ValueError("Customizing childFDs is not supported on Windows.")
|
||||
|
||||
if win32process:
|
||||
from twisted.internet._dumbwin32proc import Process
|
||||
return Process(self, processProtocol, executable, args, env, path)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"spawnProcess not available since pywin32 is not installed.")
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"spawnProcess only available on Windows or POSIX.")
|
||||
|
||||
# IReactorUDP
|
||||
|
||||
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
|
||||
"""Connects a given L{DatagramProtocol} to the given numeric UDP port.
|
||||
|
||||
@returns: object conforming to L{IListeningPort}.
|
||||
"""
|
||||
p = udp.Port(port, protocol, interface, maxPacketSize, self)
|
||||
p.startListening()
|
||||
return p
|
||||
|
||||
# IReactorMulticast
|
||||
|
||||
def listenMulticast(self, port, protocol, interface='', maxPacketSize=8192, listenMultiple=False):
|
||||
"""Connects a given DatagramProtocol to the given numeric UDP port.
|
||||
|
||||
EXPERIMENTAL.
|
||||
|
||||
@returns: object conforming to IListeningPort.
|
||||
"""
|
||||
p = udp.MulticastPort(port, protocol, interface, maxPacketSize, self, listenMultiple)
|
||||
p.startListening()
|
||||
return p
|
||||
|
||||
|
||||
# IReactorUNIX
|
||||
|
||||
def connectUNIX(self, address, factory, timeout=30, checkPID=0):
|
||||
assert unixEnabled, "UNIX support is not present"
|
||||
# Move this import back up to main level when twisted.internet.unix is
|
||||
# ported to Python 3:
|
||||
from twisted.internet import unix
|
||||
c = unix.Connector(address, factory, timeout, self, checkPID)
|
||||
c.connect()
|
||||
return c
|
||||
|
||||
def listenUNIX(self, address, factory, backlog=50, mode=0o666, wantPID=0):
|
||||
assert unixEnabled, "UNIX support is not present"
|
||||
# Move this import back up to main level when twisted.internet.unix is
|
||||
# ported to Python 3:
|
||||
from twisted.internet import unix
|
||||
p = unix.Port(address, factory, backlog, mode, self, wantPID)
|
||||
p.startListening()
|
||||
return p
|
||||
|
||||
|
||||
# IReactorUNIXDatagram
|
||||
|
||||
def listenUNIXDatagram(self, address, protocol, maxPacketSize=8192,
|
||||
mode=0o666):
|
||||
"""
|
||||
Connects a given L{DatagramProtocol} to the given path.
|
||||
|
||||
EXPERIMENTAL.
|
||||
|
||||
@returns: object conforming to L{IListeningPort}.
|
||||
"""
|
||||
assert unixEnabled, "UNIX support is not present"
|
||||
# Move this import back up to main level when twisted.internet.unix is
|
||||
# ported to Python 3:
|
||||
from twisted.internet import unix
|
||||
p = unix.DatagramPort(address, protocol, maxPacketSize, mode, self)
|
||||
p.startListening()
|
||||
return p
|
||||
|
||||
def connectUNIXDatagram(self, address, protocol, maxPacketSize=8192,
|
||||
mode=0o666, bindAddress=None):
|
||||
"""
|
||||
Connects a L{ConnectedDatagramProtocol} instance to a path.
|
||||
|
||||
EXPERIMENTAL.
|
||||
"""
|
||||
assert unixEnabled, "UNIX support is not present"
|
||||
# Move this import back up to main level when twisted.internet.unix is
|
||||
# ported to Python 3:
|
||||
from twisted.internet import unix
|
||||
p = unix.ConnectedDatagramPort(address, protocol, maxPacketSize, mode, bindAddress, self)
|
||||
p.startListening()
|
||||
return p
|
||||
|
||||
|
||||
# IReactorSocket (but not on Windows)
|
||||
|
||||
def adoptStreamPort(self, fileDescriptor, addressFamily, factory):
|
||||
"""
|
||||
Create a new L{IListeningPort} from an already-initialized socket.
|
||||
|
||||
This just dispatches to a suitable port implementation (eg from
|
||||
L{IReactorTCP}, etc) based on the specified C{addressFamily}.
|
||||
|
||||
@see: L{twisted.internet.interfaces.IReactorSocket.adoptStreamPort}
|
||||
"""
|
||||
if addressFamily not in (socket.AF_INET, socket.AF_INET6):
|
||||
raise error.UnsupportedAddressFamily(addressFamily)
|
||||
|
||||
p = tcp.Port._fromListeningDescriptor(
|
||||
self, fileDescriptor, addressFamily, factory)
|
||||
p.startListening()
|
||||
return p
|
||||
|
||||
def adoptStreamConnection(self, fileDescriptor, addressFamily, factory):
|
||||
"""
|
||||
@see:
|
||||
L{twisted.internet.interfaces.IReactorSocket.adoptStreamConnection}
|
||||
"""
|
||||
if addressFamily not in (socket.AF_INET, socket.AF_INET6):
|
||||
raise error.UnsupportedAddressFamily(addressFamily)
|
||||
|
||||
return tcp.Server._fromConnectedSocket(
|
||||
fileDescriptor, addressFamily, factory, self)
|
||||
|
||||
|
||||
def adoptDatagramPort(self, fileDescriptor, addressFamily, protocol,
|
||||
maxPacketSize=8192):
|
||||
if addressFamily not in (socket.AF_INET, socket.AF_INET6):
|
||||
raise error.UnsupportedAddressFamily(addressFamily)
|
||||
|
||||
p = udp.Port._fromListeningDescriptor(
|
||||
self, fileDescriptor, addressFamily, protocol,
|
||||
maxPacketSize=maxPacketSize)
|
||||
p.startListening()
|
||||
return p
|
||||
|
||||
|
||||
|
||||
# IReactorTCP
|
||||
|
||||
def listenTCP(self, port, factory, backlog=50, interface=''):
|
||||
p = tcp.Port(port, factory, backlog, interface, self)
|
||||
p.startListening()
|
||||
return p
|
||||
|
||||
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
|
||||
c = tcp.Connector(host, port, factory, timeout, bindAddress, self)
|
||||
c.connect()
|
||||
return c
|
||||
|
||||
# IReactorSSL (sometimes, not implemented)
|
||||
|
||||
def connectSSL(self, host, port, factory, contextFactory, timeout=30, bindAddress=None):
|
||||
if tls is not None:
|
||||
tlsFactory = tls.TLSMemoryBIOFactory(contextFactory, True, factory)
|
||||
return self.connectTCP(host, port, tlsFactory, timeout, bindAddress)
|
||||
elif ssl is not None:
|
||||
c = ssl.Connector(
|
||||
host, port, factory, contextFactory, timeout, bindAddress, self)
|
||||
c.connect()
|
||||
return c
|
||||
else:
|
||||
assert False, "SSL support is not present"
|
||||
|
||||
|
||||
|
||||
def listenSSL(self, port, factory, contextFactory, backlog=50, interface=''):
|
||||
if tls is not None:
|
||||
tlsFactory = tls.TLSMemoryBIOFactory(contextFactory, False, factory)
|
||||
port = self.listenTCP(port, tlsFactory, backlog, interface)
|
||||
port._type = 'TLS'
|
||||
return port
|
||||
elif ssl is not None:
|
||||
p = ssl.Port(
|
||||
port, factory, contextFactory, backlog, interface, self)
|
||||
p.startListening()
|
||||
return p
|
||||
else:
|
||||
assert False, "SSL support is not present"
|
||||
|
||||
|
||||
def _removeAll(self, readers, writers):
|
||||
"""
|
||||
Remove all readers and writers, and list of removed L{IReadDescriptor}s
|
||||
and L{IWriteDescriptor}s.
|
||||
|
||||
Meant for calling from subclasses, to implement removeAll, like::
|
||||
|
||||
def removeAll(self):
|
||||
return self._removeAll(self._reads, self._writes)
|
||||
|
||||
where C{self._reads} and C{self._writes} are iterables.
|
||||
"""
|
||||
removedReaders = set(readers) - self._internalReaders
|
||||
for reader in removedReaders:
|
||||
self.removeReader(reader)
|
||||
|
||||
removedWriters = set(writers)
|
||||
for writer in removedWriters:
|
||||
self.removeWriter(writer)
|
||||
|
||||
return list(removedReaders | removedWriters)
|
||||
|
||||
|
||||
class _PollLikeMixin(object):
|
||||
"""
|
||||
Mixin for poll-like reactors.
|
||||
|
||||
Subclasses must define the following attributes::
|
||||
|
||||
- _POLL_DISCONNECTED - Bitmask for events indicating a connection was
|
||||
lost.
|
||||
- _POLL_IN - Bitmask for events indicating there is input to read.
|
||||
- _POLL_OUT - Bitmask for events indicating output can be written.
|
||||
|
||||
Must be mixed in to a subclass of PosixReactorBase (for
|
||||
_disconnectSelectable).
|
||||
"""
|
||||
|
||||
def _doReadOrWrite(self, selectable, fd, event):
|
||||
"""
|
||||
fd is available for read or write, do the work and raise errors if
|
||||
necessary.
|
||||
"""
|
||||
why = None
|
||||
inRead = False
|
||||
if event & self._POLL_DISCONNECTED and not (event & self._POLL_IN):
|
||||
# Handle disconnection. But only if we finished processing all
|
||||
# the pending input.
|
||||
if fd in self._reads:
|
||||
# If we were reading from the descriptor then this is a
|
||||
# clean shutdown. We know there are no read events pending
|
||||
# because we just checked above. It also might be a
|
||||
# half-close (which is why we have to keep track of inRead).
|
||||
inRead = True
|
||||
why = CONNECTION_DONE
|
||||
else:
|
||||
# If we weren't reading, this is an error shutdown of some
|
||||
# sort.
|
||||
why = CONNECTION_LOST
|
||||
else:
|
||||
# Any non-disconnect event turns into a doRead or a doWrite.
|
||||
try:
|
||||
# First check to see if the descriptor is still valid. This
|
||||
# gives fileno() a chance to raise an exception, too.
|
||||
# Ideally, disconnection would always be indicated by the
|
||||
# return value of doRead or doWrite (or an exception from
|
||||
# one of those methods), but calling fileno here helps make
|
||||
# buggy applications more transparent.
|
||||
if selectable.fileno() == -1:
|
||||
# -1 is sort of a historical Python artifact. Python
|
||||
# files and sockets used to change their file descriptor
|
||||
# to -1 when they closed. For the time being, we'll
|
||||
# continue to support this anyway in case applications
|
||||
# replicated it, plus abstract.FileDescriptor.fileno
|
||||
# returns -1. Eventually it'd be good to deprecate this
|
||||
# case.
|
||||
why = _NO_FILEDESC
|
||||
else:
|
||||
if event & self._POLL_IN:
|
||||
# Handle a read event.
|
||||
why = selectable.doRead()
|
||||
inRead = True
|
||||
if not why and event & self._POLL_OUT:
|
||||
# Handle a write event, as long as doRead didn't
|
||||
# disconnect us.
|
||||
why = selectable.doWrite()
|
||||
inRead = False
|
||||
except:
|
||||
# Any exception from application code gets logged and will
|
||||
# cause us to disconnect the selectable.
|
||||
why = sys.exc_info()[1]
|
||||
log.err()
|
||||
if why:
|
||||
self._disconnectSelectable(selectable, why, inRead)
|
||||
|
||||
|
||||
|
||||
if tls is not None or ssl is not None:
|
||||
classImplements(PosixReactorBase, IReactorSSL)
|
||||
if unixEnabled:
|
||||
classImplements(PosixReactorBase, IReactorUNIX, IReactorUNIXDatagram)
|
||||
if processEnabled:
|
||||
classImplements(PosixReactorBase, IReactorProcess)
|
||||
if getattr(socket, 'fromfd', None) is not None:
|
||||
classImplements(PosixReactorBase, IReactorSocket)
|
||||
|
||||
__all__ = ["PosixReactorBase"]
|
||||
1072
Linux_i686/lib/python2.7/site-packages/twisted/internet/process.py
Normal file
1072
Linux_i686/lib/python2.7/site-packages/twisted/internet/process.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,848 @@
|
|||
# -*- test-case-name: twisted.test.test_factories,twisted.internet.test.test_protocol -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Standard implementations of Twisted protocol-related interfaces.
|
||||
|
||||
Start here if you are looking to write a new protocol implementation for
|
||||
Twisted. The Protocol class contains some introductory material.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import random
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.python import log, failure, components
|
||||
from twisted.internet import interfaces, error, defer
|
||||
|
||||
|
||||
@implementer(interfaces.IProtocolFactory, interfaces.ILoggingContext)
|
||||
class Factory:
|
||||
"""
|
||||
This is a factory which produces protocols.
|
||||
|
||||
By default, buildProtocol will create a protocol of the class given in
|
||||
self.protocol.
|
||||
"""
|
||||
|
||||
# put a subclass of Protocol here:
|
||||
protocol = None
|
||||
|
||||
numPorts = 0
|
||||
noisy = True
|
||||
|
||||
@classmethod
|
||||
def forProtocol(cls, protocol, *args, **kwargs):
|
||||
"""
|
||||
Create a factory for the given protocol.
|
||||
|
||||
It sets the C{protocol} attribute and returns the constructed factory
|
||||
instance.
|
||||
|
||||
@param protocol: A L{Protocol} subclass
|
||||
|
||||
@param args: Positional arguments for the factory.
|
||||
|
||||
@param kwargs: Keyword arguments for the factory.
|
||||
|
||||
@return: A L{Factory} instance wired up to C{protocol}.
|
||||
"""
|
||||
factory = cls(*args, **kwargs)
|
||||
factory.protocol = protocol
|
||||
return factory
|
||||
|
||||
|
||||
def logPrefix(self):
|
||||
"""
|
||||
Describe this factory for log messages.
|
||||
"""
|
||||
return self.__class__.__name__
|
||||
|
||||
|
||||
def doStart(self):
|
||||
"""Make sure startFactory is called.
|
||||
|
||||
Users should not call this function themselves!
|
||||
"""
|
||||
if not self.numPorts:
|
||||
if self.noisy:
|
||||
log.msg("Starting factory %r" % self)
|
||||
self.startFactory()
|
||||
self.numPorts = self.numPorts + 1
|
||||
|
||||
def doStop(self):
|
||||
"""Make sure stopFactory is called.
|
||||
|
||||
Users should not call this function themselves!
|
||||
"""
|
||||
if self.numPorts == 0:
|
||||
# this shouldn't happen, but does sometimes and this is better
|
||||
# than blowing up in assert as we did previously.
|
||||
return
|
||||
self.numPorts = self.numPorts - 1
|
||||
if not self.numPorts:
|
||||
if self.noisy:
|
||||
log.msg("Stopping factory %r" % self)
|
||||
self.stopFactory()
|
||||
|
||||
def startFactory(self):
|
||||
"""This will be called before I begin listening on a Port or Connector.
|
||||
|
||||
It will only be called once, even if the factory is connected
|
||||
to multiple ports.
|
||||
|
||||
This can be used to perform 'unserialization' tasks that
|
||||
are best put off until things are actually running, such
|
||||
as connecting to a database, opening files, etcetera.
|
||||
"""
|
||||
|
||||
def stopFactory(self):
|
||||
"""This will be called before I stop listening on all Ports/Connectors.
|
||||
|
||||
This can be overridden to perform 'shutdown' tasks such as disconnecting
|
||||
database connections, closing files, etc.
|
||||
|
||||
It will be called, for example, before an application shuts down,
|
||||
if it was connected to a port. User code should not call this function
|
||||
directly.
|
||||
"""
|
||||
|
||||
def buildProtocol(self, addr):
|
||||
"""Create an instance of a subclass of Protocol.
|
||||
|
||||
The returned instance will handle input on an incoming server
|
||||
connection, and an attribute \"factory\" pointing to the creating
|
||||
factory.
|
||||
|
||||
Override this method to alter how Protocol instances get created.
|
||||
|
||||
@param addr: an object implementing L{twisted.internet.interfaces.IAddress}
|
||||
"""
|
||||
p = self.protocol()
|
||||
p.factory = self
|
||||
return p
|
||||
|
||||
|
||||
class ClientFactory(Factory):
|
||||
"""A Protocol factory for clients.
|
||||
|
||||
This can be used together with the various connectXXX methods in
|
||||
reactors.
|
||||
"""
|
||||
|
||||
def startedConnecting(self, connector):
|
||||
"""Called when a connection has been started.
|
||||
|
||||
You can call connector.stopConnecting() to stop the connection attempt.
|
||||
|
||||
@param connector: a Connector object.
|
||||
"""
|
||||
|
||||
def clientConnectionFailed(self, connector, reason):
|
||||
"""Called when a connection has failed to connect.
|
||||
|
||||
It may be useful to call connector.connect() - this will reconnect.
|
||||
|
||||
@type reason: L{twisted.python.failure.Failure}
|
||||
"""
|
||||
|
||||
def clientConnectionLost(self, connector, reason):
|
||||
"""Called when an established connection is lost.
|
||||
|
||||
It may be useful to call connector.connect() - this will reconnect.
|
||||
|
||||
@type reason: L{twisted.python.failure.Failure}
|
||||
"""
|
||||
|
||||
|
||||
class _InstanceFactory(ClientFactory):
|
||||
"""
|
||||
Factory used by ClientCreator.
|
||||
|
||||
@ivar deferred: The L{Deferred} which represents this connection attempt and
|
||||
which will be fired when it succeeds or fails.
|
||||
|
||||
@ivar pending: After a connection attempt succeeds or fails, a delayed call
|
||||
which will fire the L{Deferred} representing this connection attempt.
|
||||
"""
|
||||
|
||||
noisy = False
|
||||
pending = None
|
||||
|
||||
def __init__(self, reactor, instance, deferred):
|
||||
self.reactor = reactor
|
||||
self.instance = instance
|
||||
self.deferred = deferred
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return "<ClientCreator factory: %r>" % (self.instance, )
|
||||
|
||||
|
||||
def buildProtocol(self, addr):
|
||||
"""
|
||||
Return the pre-constructed protocol instance and arrange to fire the
|
||||
waiting L{Deferred} to indicate success establishing the connection.
|
||||
"""
|
||||
self.pending = self.reactor.callLater(
|
||||
0, self.fire, self.deferred.callback, self.instance)
|
||||
self.deferred = None
|
||||
return self.instance
|
||||
|
||||
|
||||
def clientConnectionFailed(self, connector, reason):
|
||||
"""
|
||||
Arrange to fire the waiting L{Deferred} with the given failure to
|
||||
indicate the connection could not be established.
|
||||
"""
|
||||
self.pending = self.reactor.callLater(
|
||||
0, self.fire, self.deferred.errback, reason)
|
||||
self.deferred = None
|
||||
|
||||
|
||||
def fire(self, func, value):
|
||||
"""
|
||||
Clear C{self.pending} to avoid a reference cycle and then invoke func
|
||||
with the value.
|
||||
"""
|
||||
self.pending = None
|
||||
func(value)
|
||||
|
||||
|
||||
|
||||
class ClientCreator:
|
||||
"""
|
||||
Client connections that do not require a factory.
|
||||
|
||||
The various connect* methods create a protocol instance using the given
|
||||
protocol class and arguments, and connect it, returning a Deferred of the
|
||||
resulting protocol instance.
|
||||
|
||||
Useful for cases when we don't really need a factory. Mainly this
|
||||
is when there is no shared state between protocol instances, and no need
|
||||
to reconnect.
|
||||
|
||||
The C{connectTCP}, C{connectUNIX}, and C{connectSSL} methods each return a
|
||||
L{Deferred} which will fire with an instance of the protocol class passed to
|
||||
L{ClientCreator.__init__}. These Deferred can be cancelled to abort the
|
||||
connection attempt (in a very unlikely case, cancelling the Deferred may not
|
||||
prevent the protocol from being instantiated and connected to a transport;
|
||||
if this happens, it will be disconnected immediately afterwards and the
|
||||
Deferred will still errback with L{CancelledError}).
|
||||
"""
|
||||
|
||||
def __init__(self, reactor, protocolClass, *args, **kwargs):
|
||||
self.reactor = reactor
|
||||
self.protocolClass = protocolClass
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
|
||||
def _connect(self, method, *args, **kwargs):
|
||||
"""
|
||||
Initiate a connection attempt.
|
||||
|
||||
@param method: A callable which will actually start the connection
|
||||
attempt. For example, C{reactor.connectTCP}.
|
||||
|
||||
@param *args: Positional arguments to pass to C{method}, excluding the
|
||||
factory.
|
||||
|
||||
@param **kwargs: Keyword arguments to pass to C{method}.
|
||||
|
||||
@return: A L{Deferred} which fires with an instance of the protocol
|
||||
class passed to this L{ClientCreator}'s initializer or fails if the
|
||||
connection cannot be set up for some reason.
|
||||
"""
|
||||
def cancelConnect(deferred):
|
||||
connector.disconnect()
|
||||
if f.pending is not None:
|
||||
f.pending.cancel()
|
||||
d = defer.Deferred(cancelConnect)
|
||||
f = _InstanceFactory(
|
||||
self.reactor, self.protocolClass(*self.args, **self.kwargs), d)
|
||||
connector = method(factory=f, *args, **kwargs)
|
||||
return d
|
||||
|
||||
|
||||
def connectTCP(self, host, port, timeout=30, bindAddress=None):
|
||||
"""
|
||||
Connect to a TCP server.
|
||||
|
||||
The parameters are all the same as to L{IReactorTCP.connectTCP} except
|
||||
that the factory parameter is omitted.
|
||||
|
||||
@return: A L{Deferred} which fires with an instance of the protocol
|
||||
class passed to this L{ClientCreator}'s initializer or fails if the
|
||||
connection cannot be set up for some reason.
|
||||
"""
|
||||
return self._connect(
|
||||
self.reactor.connectTCP, host, port, timeout=timeout,
|
||||
bindAddress=bindAddress)
|
||||
|
||||
|
||||
def connectUNIX(self, address, timeout=30, checkPID=False):
|
||||
"""
|
||||
Connect to a Unix socket.
|
||||
|
||||
The parameters are all the same as to L{IReactorUNIX.connectUNIX} except
|
||||
that the factory parameter is omitted.
|
||||
|
||||
@return: A L{Deferred} which fires with an instance of the protocol
|
||||
class passed to this L{ClientCreator}'s initializer or fails if the
|
||||
connection cannot be set up for some reason.
|
||||
"""
|
||||
return self._connect(
|
||||
self.reactor.connectUNIX, address, timeout=timeout,
|
||||
checkPID=checkPID)
|
||||
|
||||
|
||||
def connectSSL(self, host, port, contextFactory, timeout=30, bindAddress=None):
|
||||
"""
|
||||
Connect to an SSL server.
|
||||
|
||||
The parameters are all the same as to L{IReactorSSL.connectSSL} except
|
||||
that the factory parameter is omitted.
|
||||
|
||||
@return: A L{Deferred} which fires with an instance of the protocol
|
||||
class passed to this L{ClientCreator}'s initializer or fails if the
|
||||
connection cannot be set up for some reason.
|
||||
"""
|
||||
return self._connect(
|
||||
self.reactor.connectSSL, host, port,
|
||||
contextFactory=contextFactory, timeout=timeout,
|
||||
bindAddress=bindAddress)
|
||||
|
||||
|
||||
|
||||
class ReconnectingClientFactory(ClientFactory):
|
||||
"""
|
||||
Factory which auto-reconnects clients with an exponential back-off.
|
||||
|
||||
Note that clients should call my resetDelay method after they have
|
||||
connected successfully.
|
||||
|
||||
@ivar maxDelay: Maximum number of seconds between connection attempts.
|
||||
@ivar initialDelay: Delay for the first reconnection attempt.
|
||||
@ivar factor: A multiplicitive factor by which the delay grows
|
||||
@ivar jitter: Percentage of randomness to introduce into the delay length
|
||||
to prevent stampeding.
|
||||
@ivar clock: The clock used to schedule reconnection. It's mainly useful to
|
||||
be parametrized in tests. If the factory is serialized, this attribute
|
||||
will not be serialized, and the default value (the reactor) will be
|
||||
restored when deserialized.
|
||||
@type clock: L{IReactorTime}
|
||||
@ivar maxRetries: Maximum number of consecutive unsuccessful connection
|
||||
attempts, after which no further connection attempts will be made. If
|
||||
this is not explicitly set, no maximum is applied.
|
||||
"""
|
||||
maxDelay = 3600
|
||||
initialDelay = 1.0
|
||||
# Note: These highly sensitive factors have been precisely measured by
|
||||
# the National Institute of Science and Technology. Take extreme care
|
||||
# in altering them, or you may damage your Internet!
|
||||
# (Seriously: <http://physics.nist.gov/cuu/Constants/index.html>)
|
||||
factor = 2.7182818284590451 # (math.e)
|
||||
# Phi = 1.6180339887498948 # (Phi is acceptable for use as a
|
||||
# factor if e is too large for your application.)
|
||||
jitter = 0.11962656472 # molar Planck constant times c, joule meter/mole
|
||||
|
||||
delay = initialDelay
|
||||
retries = 0
|
||||
maxRetries = None
|
||||
_callID = None
|
||||
connector = None
|
||||
clock = None
|
||||
|
||||
continueTrying = 1
|
||||
|
||||
|
||||
def clientConnectionFailed(self, connector, reason):
|
||||
if self.continueTrying:
|
||||
self.connector = connector
|
||||
self.retry()
|
||||
|
||||
|
||||
def clientConnectionLost(self, connector, unused_reason):
|
||||
if self.continueTrying:
|
||||
self.connector = connector
|
||||
self.retry()
|
||||
|
||||
|
||||
def retry(self, connector=None):
|
||||
"""
|
||||
Have this connector connect again, after a suitable delay.
|
||||
"""
|
||||
if not self.continueTrying:
|
||||
if self.noisy:
|
||||
log.msg("Abandoning %s on explicit request" % (connector,))
|
||||
return
|
||||
|
||||
if connector is None:
|
||||
if self.connector is None:
|
||||
raise ValueError("no connector to retry")
|
||||
else:
|
||||
connector = self.connector
|
||||
|
||||
self.retries += 1
|
||||
if self.maxRetries is not None and (self.retries > self.maxRetries):
|
||||
if self.noisy:
|
||||
log.msg("Abandoning %s after %d retries." %
|
||||
(connector, self.retries))
|
||||
return
|
||||
|
||||
self.delay = min(self.delay * self.factor, self.maxDelay)
|
||||
if self.jitter:
|
||||
self.delay = random.normalvariate(self.delay,
|
||||
self.delay * self.jitter)
|
||||
|
||||
if self.noisy:
|
||||
log.msg("%s will retry in %d seconds" % (connector, self.delay,))
|
||||
|
||||
def reconnector():
|
||||
self._callID = None
|
||||
connector.connect()
|
||||
if self.clock is None:
|
||||
from twisted.internet import reactor
|
||||
self.clock = reactor
|
||||
self._callID = self.clock.callLater(self.delay, reconnector)
|
||||
|
||||
|
||||
def stopTrying(self):
|
||||
"""
|
||||
Put a stop to any attempt to reconnect in progress.
|
||||
"""
|
||||
# ??? Is this function really stopFactory?
|
||||
if self._callID:
|
||||
self._callID.cancel()
|
||||
self._callID = None
|
||||
self.continueTrying = 0
|
||||
if self.connector:
|
||||
try:
|
||||
self.connector.stopConnecting()
|
||||
except error.NotConnectingError:
|
||||
pass
|
||||
|
||||
|
||||
def resetDelay(self):
|
||||
"""
|
||||
Call this method after a successful connection: it resets the delay and
|
||||
the retry counter.
|
||||
"""
|
||||
self.delay = self.initialDelay
|
||||
self.retries = 0
|
||||
self._callID = None
|
||||
self.continueTrying = 1
|
||||
|
||||
|
||||
def __getstate__(self):
|
||||
"""
|
||||
Remove all of the state which is mutated by connection attempts and
|
||||
failures, returning just the state which describes how reconnections
|
||||
should be attempted. This will make the unserialized instance
|
||||
behave just as this one did when it was first instantiated.
|
||||
"""
|
||||
state = self.__dict__.copy()
|
||||
for key in ['connector', 'retries', 'delay',
|
||||
'continueTrying', '_callID', 'clock']:
|
||||
if key in state:
|
||||
del state[key]
|
||||
return state
|
||||
|
||||
|
||||
|
||||
class ServerFactory(Factory):
|
||||
"""Subclass this to indicate that your protocol.Factory is only usable for servers.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class BaseProtocol:
|
||||
"""
|
||||
This is the abstract superclass of all protocols.
|
||||
|
||||
Some methods have helpful default implementations here so that they can
|
||||
easily be shared, but otherwise the direct subclasses of this class are more
|
||||
interesting, L{Protocol} and L{ProcessProtocol}.
|
||||
"""
|
||||
connected = 0
|
||||
transport = None
|
||||
|
||||
def makeConnection(self, transport):
|
||||
"""Make a connection to a transport and a server.
|
||||
|
||||
This sets the 'transport' attribute of this Protocol, and calls the
|
||||
connectionMade() callback.
|
||||
"""
|
||||
self.connected = 1
|
||||
self.transport = transport
|
||||
self.connectionMade()
|
||||
|
||||
def connectionMade(self):
|
||||
"""Called when a connection is made.
|
||||
|
||||
This may be considered the initializer of the protocol, because
|
||||
it is called when the connection is completed. For clients,
|
||||
this is called once the connection to the server has been
|
||||
established; for servers, this is called after an accept() call
|
||||
stops blocking and a socket has been received. If you need to
|
||||
send any greeting or initial message, do it here.
|
||||
"""
|
||||
|
||||
connectionDone=failure.Failure(error.ConnectionDone())
|
||||
connectionDone.cleanFailure()
|
||||
|
||||
|
||||
@implementer(interfaces.IProtocol, interfaces.ILoggingContext)
|
||||
class Protocol(BaseProtocol):
|
||||
"""
|
||||
This is the base class for streaming connection-oriented protocols.
|
||||
|
||||
If you are going to write a new connection-oriented protocol for Twisted,
|
||||
start here. Any protocol implementation, either client or server, should
|
||||
be a subclass of this class.
|
||||
|
||||
The API is quite simple. Implement L{dataReceived} to handle both
|
||||
event-based and synchronous input; output can be sent through the
|
||||
'transport' attribute, which is to be an instance that implements
|
||||
L{twisted.internet.interfaces.ITransport}. Override C{connectionLost} to be
|
||||
notified when the connection ends.
|
||||
|
||||
Some subclasses exist already to help you write common types of protocols:
|
||||
see the L{twisted.protocols.basic} module for a few of them.
|
||||
"""
|
||||
|
||||
def logPrefix(self):
|
||||
"""
|
||||
Return a prefix matching the class name, to identify log messages
|
||||
related to this protocol instance.
|
||||
"""
|
||||
return self.__class__.__name__
|
||||
|
||||
|
||||
def dataReceived(self, data):
|
||||
"""Called whenever data is received.
|
||||
|
||||
Use this method to translate to a higher-level message. Usually, some
|
||||
callback will be made upon the receipt of each complete protocol
|
||||
message.
|
||||
|
||||
@param data: a string of indeterminate length. Please keep in mind
|
||||
that you will probably need to buffer some data, as partial
|
||||
(or multiple) protocol messages may be received! I recommend
|
||||
that unit tests for protocols call through to this method with
|
||||
differing chunk sizes, down to one byte at a time.
|
||||
"""
|
||||
|
||||
def connectionLost(self, reason=connectionDone):
|
||||
"""Called when the connection is shut down.
|
||||
|
||||
Clear any circular references here, and any external references
|
||||
to this Protocol. The connection has been closed.
|
||||
|
||||
@type reason: L{twisted.python.failure.Failure}
|
||||
"""
|
||||
|
||||
|
||||
@implementer(interfaces.IConsumer)
|
||||
class ProtocolToConsumerAdapter(components.Adapter):
|
||||
|
||||
def write(self, data):
|
||||
self.original.dataReceived(data)
|
||||
|
||||
def registerProducer(self, producer, streaming):
|
||||
pass
|
||||
|
||||
def unregisterProducer(self):
|
||||
pass
|
||||
|
||||
components.registerAdapter(ProtocolToConsumerAdapter, interfaces.IProtocol,
|
||||
interfaces.IConsumer)
|
||||
|
||||
@implementer(interfaces.IProtocol)
|
||||
class ConsumerToProtocolAdapter(components.Adapter):
|
||||
|
||||
def dataReceived(self, data):
|
||||
self.original.write(data)
|
||||
|
||||
def connectionLost(self, reason):
|
||||
pass
|
||||
|
||||
def makeConnection(self, transport):
|
||||
pass
|
||||
|
||||
def connectionMade(self):
|
||||
pass
|
||||
|
||||
components.registerAdapter(ConsumerToProtocolAdapter, interfaces.IConsumer,
|
||||
interfaces.IProtocol)
|
||||
|
||||
@implementer(interfaces.IProcessProtocol)
|
||||
class ProcessProtocol(BaseProtocol):
|
||||
"""
|
||||
Base process protocol implementation which does simple dispatching for
|
||||
stdin, stdout, and stderr file descriptors.
|
||||
"""
|
||||
|
||||
def childDataReceived(self, childFD, data):
|
||||
if childFD == 1:
|
||||
self.outReceived(data)
|
||||
elif childFD == 2:
|
||||
self.errReceived(data)
|
||||
|
||||
|
||||
def outReceived(self, data):
|
||||
"""
|
||||
Some data was received from stdout.
|
||||
"""
|
||||
|
||||
|
||||
def errReceived(self, data):
|
||||
"""
|
||||
Some data was received from stderr.
|
||||
"""
|
||||
|
||||
|
||||
def childConnectionLost(self, childFD):
|
||||
if childFD == 0:
|
||||
self.inConnectionLost()
|
||||
elif childFD == 1:
|
||||
self.outConnectionLost()
|
||||
elif childFD == 2:
|
||||
self.errConnectionLost()
|
||||
|
||||
|
||||
def inConnectionLost(self):
|
||||
"""
|
||||
This will be called when stdin is closed.
|
||||
"""
|
||||
|
||||
|
||||
def outConnectionLost(self):
|
||||
"""
|
||||
This will be called when stdout is closed.
|
||||
"""
|
||||
|
||||
|
||||
def errConnectionLost(self):
|
||||
"""
|
||||
This will be called when stderr is closed.
|
||||
"""
|
||||
|
||||
|
||||
def processExited(self, reason):
|
||||
"""
|
||||
This will be called when the subprocess exits.
|
||||
|
||||
@type reason: L{twisted.python.failure.Failure}
|
||||
"""
|
||||
|
||||
|
||||
def processEnded(self, reason):
|
||||
"""
|
||||
Called when the child process exits and all file descriptors
|
||||
associated with it have been closed.
|
||||
|
||||
@type reason: L{twisted.python.failure.Failure}
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class AbstractDatagramProtocol:
|
||||
"""
|
||||
Abstract protocol for datagram-oriented transports, e.g. IP, ICMP, ARP, UDP.
|
||||
"""
|
||||
|
||||
transport = None
|
||||
numPorts = 0
|
||||
noisy = True
|
||||
|
||||
def __getstate__(self):
|
||||
d = self.__dict__.copy()
|
||||
d['transport'] = None
|
||||
return d
|
||||
|
||||
def doStart(self):
|
||||
"""Make sure startProtocol is called.
|
||||
|
||||
This will be called by makeConnection(), users should not call it.
|
||||
"""
|
||||
if not self.numPorts:
|
||||
if self.noisy:
|
||||
log.msg("Starting protocol %s" % self)
|
||||
self.startProtocol()
|
||||
self.numPorts = self.numPorts + 1
|
||||
|
||||
def doStop(self):
|
||||
"""Make sure stopProtocol is called.
|
||||
|
||||
This will be called by the port, users should not call it.
|
||||
"""
|
||||
assert self.numPorts > 0
|
||||
self.numPorts = self.numPorts - 1
|
||||
self.transport = None
|
||||
if not self.numPorts:
|
||||
if self.noisy:
|
||||
log.msg("Stopping protocol %s" % self)
|
||||
self.stopProtocol()
|
||||
|
||||
def startProtocol(self):
|
||||
"""Called when a transport is connected to this protocol.
|
||||
|
||||
Will only be called once, even if multiple ports are connected.
|
||||
"""
|
||||
|
||||
def stopProtocol(self):
|
||||
"""Called when the transport is disconnected.
|
||||
|
||||
Will only be called once, after all ports are disconnected.
|
||||
"""
|
||||
|
||||
def makeConnection(self, transport):
|
||||
"""Make a connection to a transport and a server.
|
||||
|
||||
This sets the 'transport' attribute of this DatagramProtocol, and calls the
|
||||
doStart() callback.
|
||||
"""
|
||||
assert self.transport == None
|
||||
self.transport = transport
|
||||
self.doStart()
|
||||
|
||||
def datagramReceived(self, datagram, addr):
|
||||
"""Called when a datagram is received.
|
||||
|
||||
@param datagram: the string received from the transport.
|
||||
@param addr: tuple of source of datagram.
|
||||
"""
|
||||
|
||||
|
||||
@implementer(interfaces.ILoggingContext)
|
||||
class DatagramProtocol(AbstractDatagramProtocol):
|
||||
"""
|
||||
Protocol for datagram-oriented transport, e.g. UDP.
|
||||
|
||||
@type transport: C{NoneType} or
|
||||
L{IUDPTransport<twisted.internet.interfaces.IUDPTransport>} provider
|
||||
@ivar transport: The transport with which this protocol is associated,
|
||||
if it is associated with one.
|
||||
"""
|
||||
|
||||
def logPrefix(self):
|
||||
"""
|
||||
Return a prefix matching the class name, to identify log messages
|
||||
related to this protocol instance.
|
||||
"""
|
||||
return self.__class__.__name__
|
||||
|
||||
|
||||
def connectionRefused(self):
|
||||
"""Called due to error from write in connected mode.
|
||||
|
||||
Note this is a result of ICMP message generated by *previous*
|
||||
write.
|
||||
"""
|
||||
|
||||
|
||||
class ConnectedDatagramProtocol(DatagramProtocol):
|
||||
"""Protocol for connected datagram-oriented transport.
|
||||
|
||||
No longer necessary for UDP.
|
||||
"""
|
||||
|
||||
def datagramReceived(self, datagram):
|
||||
"""Called when a datagram is received.
|
||||
|
||||
@param datagram: the string received from the transport.
|
||||
"""
|
||||
|
||||
def connectionFailed(self, failure):
|
||||
"""Called if connecting failed.
|
||||
|
||||
Usually this will be due to a DNS lookup failure.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@implementer(interfaces.ITransport)
|
||||
class FileWrapper:
|
||||
"""A wrapper around a file-like object to make it behave as a Transport.
|
||||
|
||||
This doesn't actually stream the file to the attached protocol,
|
||||
and is thus useful mainly as a utility for debugging protocols.
|
||||
"""
|
||||
|
||||
closed = 0
|
||||
disconnecting = 0
|
||||
producer = None
|
||||
streamingProducer = 0
|
||||
|
||||
def __init__(self, file):
|
||||
self.file = file
|
||||
|
||||
def write(self, data):
|
||||
try:
|
||||
self.file.write(data)
|
||||
except:
|
||||
self.handleException()
|
||||
# self._checkProducer()
|
||||
|
||||
def _checkProducer(self):
|
||||
# Cheating; this is called at "idle" times to allow producers to be
|
||||
# found and dealt with
|
||||
if self.producer:
|
||||
self.producer.resumeProducing()
|
||||
|
||||
def registerProducer(self, producer, streaming):
|
||||
"""From abstract.FileDescriptor
|
||||
"""
|
||||
self.producer = producer
|
||||
self.streamingProducer = streaming
|
||||
if not streaming:
|
||||
producer.resumeProducing()
|
||||
|
||||
def unregisterProducer(self):
|
||||
self.producer = None
|
||||
|
||||
def stopConsuming(self):
|
||||
self.unregisterProducer()
|
||||
self.loseConnection()
|
||||
|
||||
def writeSequence(self, iovec):
|
||||
self.write("".join(iovec))
|
||||
|
||||
def loseConnection(self):
|
||||
self.closed = 1
|
||||
try:
|
||||
self.file.close()
|
||||
except (IOError, OSError):
|
||||
self.handleException()
|
||||
|
||||
def getPeer(self):
|
||||
# XXX: According to ITransport, this should return an IAddress!
|
||||
return 'file', 'file'
|
||||
|
||||
def getHost(self):
|
||||
# XXX: According to ITransport, this should return an IAddress!
|
||||
return 'file'
|
||||
|
||||
def handleException(self):
|
||||
pass
|
||||
|
||||
def resumeProducing(self):
|
||||
# Never sends data anyways
|
||||
pass
|
||||
|
||||
def pauseProducing(self):
|
||||
# Never sends data anyways
|
||||
pass
|
||||
|
||||
def stopProducing(self):
|
||||
self.loseConnection()
|
||||
|
||||
|
||||
__all__ = ["Factory", "ClientFactory", "ReconnectingClientFactory", "connectionDone",
|
||||
"Protocol", "ProcessProtocol", "FileWrapper", "ServerFactory",
|
||||
"AbstractDatagramProtocol", "DatagramProtocol", "ConnectedDatagramProtocol",
|
||||
"ClientCreator"]
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
"""
|
||||
This module integrates PyUI with twisted.internet's mainloop.
|
||||
|
||||
Maintainer: Jp Calderone
|
||||
|
||||
See doc/examples/pyuidemo.py for example usage.
|
||||
"""
|
||||
|
||||
# System imports
|
||||
import pyui
|
||||
|
||||
def _guiUpdate(reactor, delay):
|
||||
pyui.draw()
|
||||
if pyui.update() == 0:
|
||||
pyui.quit()
|
||||
reactor.stop()
|
||||
else:
|
||||
reactor.callLater(delay, _guiUpdate, reactor, delay)
|
||||
|
||||
|
||||
def install(ms=10, reactor=None, args=(), kw={}):
|
||||
"""
|
||||
Schedule PyUI's display to be updated approximately every C{ms}
|
||||
milliseconds, and initialize PyUI with the specified arguments.
|
||||
"""
|
||||
d = pyui.init(*args, **kw)
|
||||
|
||||
if reactor is None:
|
||||
from twisted.internet import reactor
|
||||
_guiUpdate(reactor, ms / 1000.0)
|
||||
return d
|
||||
|
||||
__all__ = ["install"]
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
# -*- test-case-name: twisted.internet.test.test_qtreactor -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
try:
|
||||
# 'import qtreactor' would have imported this file instead of the
|
||||
# top-level qtreactor. __import__ does the right thing
|
||||
# (kids, don't repeat this at home)
|
||||
install = __import__('qtreactor').install
|
||||
except ImportError:
|
||||
from twisted.plugins.twisted_qtstub import errorMessage
|
||||
raise ImportError(errorMessage)
|
||||
else:
|
||||
import warnings
|
||||
warnings.warn("Please use qtreactor instead of twisted.internet.qtreactor",
|
||||
category=DeprecationWarning)
|
||||
|
||||
__all__ = ['install']
|
||||
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
The reactor is the Twisted event loop within Twisted, the loop which drives
|
||||
applications using Twisted. The reactor provides APIs for networking,
|
||||
threading, dispatching events, and more.
|
||||
|
||||
The default reactor depends on the platform and will be installed if this
|
||||
module is imported without another reactor being explicitly installed
|
||||
beforehand. Regardless of which reactor is installed, importing this module is
|
||||
the correct way to get a reference to it.
|
||||
|
||||
New application code should prefer to pass and accept the reactor as a
|
||||
parameter where it is needed, rather than relying on being able to import this
|
||||
module to get a reference. This simplifies unit testing and may make it easier
|
||||
to one day support multiple reactors (as a performance enhancement), though
|
||||
this is not currently possible.
|
||||
|
||||
@see: L{IReactorCore<twisted.internet.interfaces.IReactorCore>}
|
||||
@see: L{IReactorTime<twisted.internet.interfaces.IReactorTime>}
|
||||
@see: L{IReactorProcess<twisted.internet.interfaces.IReactorProcess>}
|
||||
@see: L{IReactorTCP<twisted.internet.interfaces.IReactorTCP>}
|
||||
@see: L{IReactorSSL<twisted.internet.interfaces.IReactorSSL>}
|
||||
@see: L{IReactorUDP<twisted.internet.interfaces.IReactorUDP>}
|
||||
@see: L{IReactorMulticast<twisted.internet.interfaces.IReactorMulticast>}
|
||||
@see: L{IReactorUNIX<twisted.internet.interfaces.IReactorUNIX>}
|
||||
@see: L{IReactorUNIXDatagram<twisted.internet.interfaces.IReactorUNIXDatagram>}
|
||||
@see: L{IReactorFDSet<twisted.internet.interfaces.IReactorFDSet>}
|
||||
@see: L{IReactorThreads<twisted.internet.interfaces.IReactorThreads>}
|
||||
@see: L{IReactorPluggableResolver<twisted.internet.interfaces.IReactorPluggableResolver>}
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import sys
|
||||
del sys.modules['twisted.internet.reactor']
|
||||
from twisted.internet import default
|
||||
default.install()
|
||||
|
|
@ -0,0 +1,200 @@
|
|||
# -*- test-case-name: twisted.test.test_internet -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Select reactor
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
from time import sleep
|
||||
import sys, select, socket
|
||||
from errno import EINTR, EBADF
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.internet.interfaces import IReactorFDSet
|
||||
from twisted.internet import posixbase
|
||||
from twisted.python import log
|
||||
from twisted.python.runtime import platformType
|
||||
|
||||
|
||||
def win32select(r, w, e, timeout=None):
|
||||
"""Win32 select wrapper."""
|
||||
if not (r or w):
|
||||
# windows select() exits immediately when no sockets
|
||||
if timeout is None:
|
||||
timeout = 0.01
|
||||
else:
|
||||
timeout = min(timeout, 0.001)
|
||||
sleep(timeout)
|
||||
return [], [], []
|
||||
# windows doesn't process 'signals' inside select(), so we set a max
|
||||
# time or ctrl-c will never be recognized
|
||||
if timeout is None or timeout > 0.5:
|
||||
timeout = 0.5
|
||||
r, w, e = select.select(r, w, w, timeout)
|
||||
return r, w + e, []
|
||||
|
||||
if platformType == "win32":
|
||||
_select = win32select
|
||||
else:
|
||||
_select = select.select
|
||||
|
||||
|
||||
try:
|
||||
from twisted.internet.win32eventreactor import _ThreadedWin32EventsMixin
|
||||
except ImportError:
|
||||
_extraBase = object
|
||||
else:
|
||||
_extraBase = _ThreadedWin32EventsMixin
|
||||
|
||||
|
||||
@implementer(IReactorFDSet)
|
||||
class SelectReactor(posixbase.PosixReactorBase, _extraBase):
|
||||
"""
|
||||
A select() based reactor - runs on all POSIX platforms and on Win32.
|
||||
|
||||
@ivar _reads: A set containing L{FileDescriptor} instances which will be
|
||||
checked for read events.
|
||||
|
||||
@ivar _writes: A set containing L{FileDescriptor} instances which will be
|
||||
checked for writability.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize file descriptor tracking dictionaries and the base class.
|
||||
"""
|
||||
self._reads = set()
|
||||
self._writes = set()
|
||||
posixbase.PosixReactorBase.__init__(self)
|
||||
|
||||
|
||||
def _preenDescriptors(self):
|
||||
log.msg("Malformed file descriptor found. Preening lists.")
|
||||
readers = list(self._reads)
|
||||
writers = list(self._writes)
|
||||
self._reads.clear()
|
||||
self._writes.clear()
|
||||
for selSet, selList in ((self._reads, readers),
|
||||
(self._writes, writers)):
|
||||
for selectable in selList:
|
||||
try:
|
||||
select.select([selectable], [selectable], [selectable], 0)
|
||||
except Exception as e:
|
||||
log.msg("bad descriptor %s" % selectable)
|
||||
self._disconnectSelectable(selectable, e, False)
|
||||
else:
|
||||
selSet.add(selectable)
|
||||
|
||||
|
||||
def doSelect(self, timeout):
|
||||
"""
|
||||
Run one iteration of the I/O monitor loop.
|
||||
|
||||
This will run all selectables who had input or output readiness
|
||||
waiting for them.
|
||||
"""
|
||||
try:
|
||||
r, w, ignored = _select(self._reads,
|
||||
self._writes,
|
||||
[], timeout)
|
||||
except ValueError:
|
||||
# Possibly a file descriptor has gone negative?
|
||||
self._preenDescriptors()
|
||||
return
|
||||
except TypeError:
|
||||
# Something *totally* invalid (object w/o fileno, non-integral
|
||||
# result) was passed
|
||||
log.err()
|
||||
self._preenDescriptors()
|
||||
return
|
||||
except (select.error, socket.error, IOError) as se:
|
||||
# select(2) encountered an error, perhaps while calling the fileno()
|
||||
# method of a socket. (Python 2.6 socket.error is an IOError
|
||||
# subclass, but on Python 2.5 and earlier it is not.)
|
||||
if se.args[0] in (0, 2):
|
||||
# windows does this if it got an empty list
|
||||
if (not self._reads) and (not self._writes):
|
||||
return
|
||||
else:
|
||||
raise
|
||||
elif se.args[0] == EINTR:
|
||||
return
|
||||
elif se.args[0] == EBADF:
|
||||
self._preenDescriptors()
|
||||
return
|
||||
else:
|
||||
# OK, I really don't know what's going on. Blow up.
|
||||
raise
|
||||
|
||||
_drdw = self._doReadOrWrite
|
||||
_logrun = log.callWithLogger
|
||||
for selectables, method, fdset in ((r, "doRead", self._reads),
|
||||
(w,"doWrite", self._writes)):
|
||||
for selectable in selectables:
|
||||
# if this was disconnected in another thread, kill it.
|
||||
# ^^^^ --- what the !@#*? serious! -exarkun
|
||||
if selectable not in fdset:
|
||||
continue
|
||||
# This for pausing input when we're not ready for more.
|
||||
_logrun(selectable, _drdw, selectable, method)
|
||||
|
||||
doIteration = doSelect
|
||||
|
||||
def _doReadOrWrite(self, selectable, method):
|
||||
try:
|
||||
why = getattr(selectable, method)()
|
||||
except:
|
||||
why = sys.exc_info()[1]
|
||||
log.err()
|
||||
if why:
|
||||
self._disconnectSelectable(selectable, why, method=="doRead")
|
||||
|
||||
def addReader(self, reader):
|
||||
"""
|
||||
Add a FileDescriptor for notification of data available to read.
|
||||
"""
|
||||
self._reads.add(reader)
|
||||
|
||||
def addWriter(self, writer):
|
||||
"""
|
||||
Add a FileDescriptor for notification of data available to write.
|
||||
"""
|
||||
self._writes.add(writer)
|
||||
|
||||
def removeReader(self, reader):
|
||||
"""
|
||||
Remove a Selectable for notification of data available to read.
|
||||
"""
|
||||
self._reads.discard(reader)
|
||||
|
||||
def removeWriter(self, writer):
|
||||
"""
|
||||
Remove a Selectable for notification of data available to write.
|
||||
"""
|
||||
self._writes.discard(writer)
|
||||
|
||||
def removeAll(self):
|
||||
return self._removeAll(self._reads, self._writes)
|
||||
|
||||
|
||||
def getReaders(self):
|
||||
return list(self._reads)
|
||||
|
||||
|
||||
def getWriters(self):
|
||||
return list(self._writes)
|
||||
|
||||
|
||||
|
||||
def install():
|
||||
"""Configure the twisted mainloop to be run using the select() reactor.
|
||||
"""
|
||||
reactor = SelectReactor()
|
||||
from twisted.internet.main import installReactor
|
||||
installReactor(reactor)
|
||||
|
||||
__all__ = ['install']
|
||||
|
|
@ -0,0 +1,87 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
|
||||
"""
|
||||
Serial Port Protocol
|
||||
"""
|
||||
|
||||
# http://twistedmatrix.com/trac/ticket/3725#comment:24
|
||||
# Apparently applications use these names even though they should
|
||||
# be imported from pyserial
|
||||
__all__ = ["serial", "PARITY_ODD", "PARITY_EVEN", "PARITY_NONE",
|
||||
"STOPBITS_TWO", "STOPBITS_ONE", "FIVEBITS",
|
||||
"EIGHTBITS", "SEVENBITS", "SIXBITS",
|
||||
# Name this module is actually trying to export
|
||||
"SerialPort"]
|
||||
|
||||
# system imports
|
||||
import os, sys
|
||||
|
||||
# all of them require pyserial at the moment, so check that first
|
||||
import serial
|
||||
from serial import PARITY_NONE, PARITY_EVEN, PARITY_ODD
|
||||
from serial import STOPBITS_ONE, STOPBITS_TWO
|
||||
from serial import FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS
|
||||
|
||||
|
||||
|
||||
class BaseSerialPort:
|
||||
"""
|
||||
Base class for Windows and POSIX serial ports.
|
||||
|
||||
@ivar _serialFactory: a pyserial C{serial.Serial} factory, used to create
|
||||
the instance stored in C{self._serial}. Overrideable to enable easier
|
||||
testing.
|
||||
|
||||
@ivar _serial: a pyserial C{serial.Serial} instance used to manage the
|
||||
options on the serial port.
|
||||
"""
|
||||
|
||||
_serialFactory = serial.Serial
|
||||
|
||||
|
||||
def setBaudRate(self, baudrate):
|
||||
if hasattr(self._serial, "setBaudrate"):
|
||||
self._serial.setBaudrate(baudrate)
|
||||
else:
|
||||
self._serial.setBaudRate(baudrate)
|
||||
|
||||
def inWaiting(self):
|
||||
return self._serial.inWaiting()
|
||||
|
||||
def flushInput(self):
|
||||
self._serial.flushInput()
|
||||
|
||||
def flushOutput(self):
|
||||
self._serial.flushOutput()
|
||||
|
||||
def sendBreak(self):
|
||||
self._serial.sendBreak()
|
||||
|
||||
def getDSR(self):
|
||||
return self._serial.getDSR()
|
||||
|
||||
def getCD(self):
|
||||
return self._serial.getCD()
|
||||
|
||||
def getRI(self):
|
||||
return self._serial.getRI()
|
||||
|
||||
def getCTS(self):
|
||||
return self._serial.getCTS()
|
||||
|
||||
def setDTR(self, on = 1):
|
||||
self._serial.setDTR(on)
|
||||
|
||||
def setRTS(self, on = 1):
|
||||
self._serial.setRTS(on)
|
||||
|
||||
class SerialPort(BaseSerialPort):
|
||||
pass
|
||||
|
||||
# replace SerialPort with appropriate serial port
|
||||
if os.name == 'posix':
|
||||
from twisted.internet._posixserialport import SerialPort
|
||||
elif sys.platform == 'win32':
|
||||
from twisted.internet._win32serialport import SerialPort
|
||||
243
Linux_i686/lib/python2.7/site-packages/twisted/internet/ssl.py
Normal file
243
Linux_i686/lib/python2.7/site-packages/twisted/internet/ssl.py
Normal file
|
|
@ -0,0 +1,243 @@
|
|||
# -*- test-case-name: twisted.test.test_ssl -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
This module implements Transport Layer Security (TLS) support for Twisted. It
|
||||
requires U{PyOpenSSL <https://pypi.python.org/pypi/pyOpenSSL>}.
|
||||
|
||||
If you wish to establish a TLS connection, please use one of the following
|
||||
APIs:
|
||||
|
||||
- SSL endpoints for L{servers
|
||||
<twisted.internet.endpoints.SSL4ServerEndpoint>} and L{clients
|
||||
<twisted.internet.endpoints.SSL4ClientEndpoint>}
|
||||
|
||||
- L{startTLS <twisted.internet.interfaces.ITLSTransport.startTLS>}
|
||||
|
||||
- L{connectSSL <twisted.internet.interfaces.IReactorSSL.connectSSL>}
|
||||
|
||||
- L{listenSSL <twisted.internet.interfaces.IReactorSSL.listenSSL>}
|
||||
|
||||
These APIs all require a C{contextFactory} argument that specifies their
|
||||
security properties, such as certificate, private key, certificate authorities
|
||||
to verify the peer, allowed TLS protocol versions, cipher suites, and so on.
|
||||
The recommended value for this argument is a L{CertificateOptions} instance;
|
||||
see its documentation for an explanation of the available options.
|
||||
|
||||
The C{contextFactory} name is a bit of an anachronism now, as context factories
|
||||
have been replaced with "connection creators", but these objects serve the same
|
||||
role.
|
||||
|
||||
Be warned that implementing your own connection creator (i.e.: value for the
|
||||
C{contextFactory}) is both difficult and dangerous; the Twisted team has worked
|
||||
hard to make L{CertificateOptions}' API comprehensible and unsurprising, and
|
||||
the Twisted team is actively maintaining it to ensure that it becomes more
|
||||
secure over time.
|
||||
|
||||
If you are really absolutely sure that you want to take on the risk of
|
||||
implementing your own connection creator based on the pyOpenSSL API, see the
|
||||
L{server connection creator
|
||||
<twisted.internet.interfaces.IOpenSSLServerConnectionCreator>} and L{client
|
||||
connection creator
|
||||
<twisted.internet.interfaces.IOpenSSLServerConnectionCreator>} interfaces.
|
||||
|
||||
Developers using Twisted, please ignore the L{Port}, L{Connector}, and
|
||||
L{Client} classes defined here, as these are details of certain reactors' TLS
|
||||
implementations, exposed by accident (and remaining here only for compatibility
|
||||
reasons). If you wish to establish a TLS connection, please use one of the
|
||||
APIs listed above.
|
||||
|
||||
@note: "SSL" (Secure Sockets Layer) is an antiquated synonym for "TLS"
|
||||
(Transport Layer Security). You may see these terms used interchangeably
|
||||
throughout the documentation.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
# System imports
|
||||
from OpenSSL import SSL
|
||||
supported = True
|
||||
|
||||
from zope.interface import implementer, implementer_only, implementedBy
|
||||
|
||||
# Twisted imports
|
||||
from twisted.internet import tcp, interfaces
|
||||
|
||||
|
||||
class ContextFactory:
|
||||
"""A factory for SSL context objects, for server SSL connections."""
|
||||
|
||||
isClient = 0
|
||||
|
||||
def getContext(self):
|
||||
"""Return a SSL.Context object. override in subclasses."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class DefaultOpenSSLContextFactory(ContextFactory):
|
||||
"""
|
||||
L{DefaultOpenSSLContextFactory} is a factory for server-side SSL context
|
||||
objects. These objects define certain parameters related to SSL
|
||||
handshakes and the subsequent connection.
|
||||
|
||||
@ivar _contextFactory: A callable which will be used to create new
|
||||
context objects. This is typically L{SSL.Context}.
|
||||
"""
|
||||
_context = None
|
||||
|
||||
def __init__(self, privateKeyFileName, certificateFileName,
|
||||
sslmethod=SSL.SSLv23_METHOD, _contextFactory=SSL.Context):
|
||||
"""
|
||||
@param privateKeyFileName: Name of a file containing a private key
|
||||
@param certificateFileName: Name of a file containing a certificate
|
||||
@param sslmethod: The SSL method to use
|
||||
"""
|
||||
self.privateKeyFileName = privateKeyFileName
|
||||
self.certificateFileName = certificateFileName
|
||||
self.sslmethod = sslmethod
|
||||
self._contextFactory = _contextFactory
|
||||
|
||||
# Create a context object right now. This is to force validation of
|
||||
# the given parameters so that errors are detected earlier rather
|
||||
# than later.
|
||||
self.cacheContext()
|
||||
|
||||
|
||||
def cacheContext(self):
|
||||
if self._context is None:
|
||||
ctx = self._contextFactory(self.sslmethod)
|
||||
# Disallow SSLv2! It's insecure! SSLv3 has been around since
|
||||
# 1996. It's time to move on.
|
||||
ctx.set_options(SSL.OP_NO_SSLv2)
|
||||
ctx.use_certificate_file(self.certificateFileName)
|
||||
ctx.use_privatekey_file(self.privateKeyFileName)
|
||||
self._context = ctx
|
||||
|
||||
|
||||
def __getstate__(self):
|
||||
d = self.__dict__.copy()
|
||||
del d['_context']
|
||||
return d
|
||||
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.__dict__ = state
|
||||
|
||||
|
||||
def getContext(self):
|
||||
"""
|
||||
Return an SSL context.
|
||||
"""
|
||||
return self._context
|
||||
|
||||
|
||||
class ClientContextFactory:
|
||||
"""A context factory for SSL clients."""
|
||||
|
||||
isClient = 1
|
||||
|
||||
# SSLv23_METHOD allows SSLv2, SSLv3, and TLSv1. We disable SSLv2 below,
|
||||
# though.
|
||||
method = SSL.SSLv23_METHOD
|
||||
|
||||
_contextFactory = SSL.Context
|
||||
|
||||
def getContext(self):
|
||||
ctx = self._contextFactory(self.method)
|
||||
# See comment in DefaultOpenSSLContextFactory about SSLv2.
|
||||
ctx.set_options(SSL.OP_NO_SSLv2)
|
||||
return ctx
|
||||
|
||||
|
||||
|
||||
@implementer_only(interfaces.ISSLTransport,
|
||||
*[i for i in implementedBy(tcp.Client)
|
||||
if i != interfaces.ITLSTransport])
|
||||
class Client(tcp.Client):
|
||||
"""
|
||||
I am an SSL client.
|
||||
"""
|
||||
|
||||
def __init__(self, host, port, bindAddress, ctxFactory, connector, reactor=None):
|
||||
# tcp.Client.__init__ depends on self.ctxFactory being set
|
||||
self.ctxFactory = ctxFactory
|
||||
tcp.Client.__init__(self, host, port, bindAddress, connector, reactor)
|
||||
|
||||
def _connectDone(self):
|
||||
self.startTLS(self.ctxFactory)
|
||||
self.startWriting()
|
||||
tcp.Client._connectDone(self)
|
||||
|
||||
|
||||
|
||||
@implementer(interfaces.ISSLTransport)
|
||||
class Server(tcp.Server):
|
||||
"""
|
||||
I am an SSL server.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
tcp.Server.__init__(self, *args, **kwargs)
|
||||
self.startTLS(self.server.ctxFactory)
|
||||
|
||||
|
||||
|
||||
class Port(tcp.Port):
|
||||
"""
|
||||
I am an SSL port.
|
||||
"""
|
||||
transport = Server
|
||||
|
||||
_type = 'TLS'
|
||||
|
||||
def __init__(self, port, factory, ctxFactory, backlog=50, interface='', reactor=None):
|
||||
tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
|
||||
self.ctxFactory = ctxFactory
|
||||
|
||||
|
||||
def _getLogPrefix(self, factory):
|
||||
"""
|
||||
Override the normal prefix to include an annotation indicating this is a
|
||||
port for TLS connections.
|
||||
"""
|
||||
return tcp.Port._getLogPrefix(self, factory) + ' (TLS)'
|
||||
|
||||
|
||||
|
||||
class Connector(tcp.Connector):
|
||||
def __init__(self, host, port, factory, contextFactory, timeout, bindAddress, reactor=None):
|
||||
self.contextFactory = contextFactory
|
||||
tcp.Connector.__init__(self, host, port, factory, timeout, bindAddress, reactor)
|
||||
|
||||
# Force some parameter checking in pyOpenSSL. It's better to fail now
|
||||
# than after we've set up the transport.
|
||||
contextFactory.getContext()
|
||||
|
||||
|
||||
def _makeTransport(self):
|
||||
return Client(self.host, self.port, self.bindAddress, self.contextFactory, self, self.reactor)
|
||||
|
||||
|
||||
|
||||
from twisted.internet._sslverify import (
|
||||
KeyPair, DistinguishedName, DN, Certificate,
|
||||
CertificateRequest, PrivateCertificate,
|
||||
OpenSSLAcceptableCiphers as AcceptableCiphers,
|
||||
OpenSSLCertificateOptions as CertificateOptions,
|
||||
OpenSSLDiffieHellmanParameters as DiffieHellmanParameters,
|
||||
platformTrust, OpenSSLDefaultPaths, VerificationError,
|
||||
optionsForClientTLS,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ContextFactory", "DefaultOpenSSLContextFactory", "ClientContextFactory",
|
||||
|
||||
'DistinguishedName', 'DN',
|
||||
'Certificate', 'CertificateRequest', 'PrivateCertificate',
|
||||
'KeyPair',
|
||||
'AcceptableCiphers', 'CertificateOptions', 'DiffieHellmanParameters',
|
||||
'platformTrust', 'OpenSSLDefaultPaths',
|
||||
|
||||
'VerificationError', 'optionsForClientTLS',
|
||||
]
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
# -*- test-case-name: twisted.test.test_stdio -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Standard input/out/err support.
|
||||
|
||||
This module exposes one name, StandardIO, which is a factory that takes an
|
||||
IProtocol provider as an argument. It connects that protocol to standard input
|
||||
and output on the current process.
|
||||
|
||||
It should work on any UNIX and also on Win32 (with some caveats: due to
|
||||
platform limitations, it will perform very poorly on Win32).
|
||||
|
||||
Future Plans::
|
||||
|
||||
support for stderr, perhaps
|
||||
Rewrite to use the reactor instead of an ad-hoc mechanism for connecting
|
||||
protocols to transport.
|
||||
|
||||
|
||||
Maintainer: James Y Knight
|
||||
"""
|
||||
|
||||
from twisted.python.runtime import platform
|
||||
|
||||
if platform.isWindows():
|
||||
from twisted.internet import _win32stdio
|
||||
StandardIO = _win32stdio.StandardIO
|
||||
PipeAddress = _win32stdio.Win32PipeAddress
|
||||
|
||||
else:
|
||||
from twisted.internet._posixstdio import StandardIO, PipeAddress
|
||||
|
||||
__all__ = ['StandardIO', 'PipeAddress']
|
||||
914
Linux_i686/lib/python2.7/site-packages/twisted/internet/task.py
Normal file
914
Linux_i686/lib/python2.7/site-packages/twisted/internet/task.py
Normal file
|
|
@ -0,0 +1,914 @@
|
|||
# -*- test-case-name: twisted.test.test_task,twisted.test.test_cooperator -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Scheduling utility methods and classes.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.python import log
|
||||
from twisted.python import reflect
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
from twisted.internet import base, defer
|
||||
from twisted.internet.interfaces import IReactorTime
|
||||
from twisted.internet.error import ReactorNotRunning
|
||||
|
||||
|
||||
class LoopingCall:
|
||||
"""Call a function repeatedly.
|
||||
|
||||
If C{f} returns a deferred, rescheduling will not take place until the
|
||||
deferred has fired. The result value is ignored.
|
||||
|
||||
@ivar f: The function to call.
|
||||
@ivar a: A tuple of arguments to pass the function.
|
||||
@ivar kw: A dictionary of keyword arguments to pass to the function.
|
||||
@ivar clock: A provider of
|
||||
L{twisted.internet.interfaces.IReactorTime}. The default is
|
||||
L{twisted.internet.reactor}. Feel free to set this to
|
||||
something else, but it probably ought to be set *before*
|
||||
calling L{start}.
|
||||
|
||||
@type running: C{bool}
|
||||
@ivar running: A flag which is C{True} while C{f} is scheduled to be called
|
||||
(or is currently being called). It is set to C{True} when L{start} is
|
||||
called and set to C{False} when L{stop} is called or if C{f} raises an
|
||||
exception. In either case, it will be C{False} by the time the
|
||||
C{Deferred} returned by L{start} fires its callback or errback.
|
||||
|
||||
@type _expectNextCallAt: C{float}
|
||||
@ivar _expectNextCallAt: The time at which this instance most recently
|
||||
scheduled itself to run.
|
||||
|
||||
@type _realLastTime: C{float}
|
||||
@ivar _realLastTime: When counting skips, the time at which the skip
|
||||
counter was last invoked.
|
||||
|
||||
@type _runAtStart: C{bool}
|
||||
@ivar _runAtStart: A flag indicating whether the 'now' argument was passed
|
||||
to L{LoopingCall.start}.
|
||||
"""
|
||||
|
||||
call = None
|
||||
running = False
|
||||
deferred = None
|
||||
interval = None
|
||||
_expectNextCallAt = 0.0
|
||||
_runAtStart = False
|
||||
starttime = None
|
||||
|
||||
def __init__(self, f, *a, **kw):
|
||||
self.f = f
|
||||
self.a = a
|
||||
self.kw = kw
|
||||
from twisted.internet import reactor
|
||||
self.clock = reactor
|
||||
|
||||
|
||||
def withCount(cls, countCallable):
|
||||
"""
|
||||
An alternate constructor for L{LoopingCall} that makes available the
|
||||
number of calls which should have occurred since it was last invoked.
|
||||
|
||||
Note that this number is an C{int} value; It represents the discrete
|
||||
number of calls that should have been made. For example, if you are
|
||||
using a looping call to display an animation with discrete frames, this
|
||||
number would be the number of frames to advance.
|
||||
|
||||
The count is normally 1, but can be higher. For example, if the reactor
|
||||
is blocked and takes too long to invoke the L{LoopingCall}, a Deferred
|
||||
returned from a previous call is not fired before an interval has
|
||||
elapsed, or if the callable itself blocks for longer than an interval,
|
||||
preventing I{itself} from being called.
|
||||
|
||||
@param countCallable: A callable that will be invoked each time the
|
||||
resulting LoopingCall is run, with an integer specifying the number
|
||||
of calls that should have been invoked.
|
||||
|
||||
@type countCallable: 1-argument callable which takes an C{int}
|
||||
|
||||
@return: An instance of L{LoopingCall} with call counting enabled,
|
||||
which provides the count as the first positional argument.
|
||||
|
||||
@rtype: L{LoopingCall}
|
||||
|
||||
@since: 9.0
|
||||
"""
|
||||
|
||||
def counter():
|
||||
now = self.clock.seconds()
|
||||
lastTime = self._realLastTime
|
||||
if lastTime is None:
|
||||
lastTime = self.starttime
|
||||
if self._runAtStart:
|
||||
lastTime -= self.interval
|
||||
self._realLastTime = now
|
||||
lastInterval = self._intervalOf(lastTime)
|
||||
thisInterval = self._intervalOf(now)
|
||||
count = thisInterval - lastInterval
|
||||
return countCallable(count)
|
||||
|
||||
self = cls(counter)
|
||||
|
||||
self._realLastTime = None
|
||||
|
||||
return self
|
||||
|
||||
withCount = classmethod(withCount)
|
||||
|
||||
|
||||
def _intervalOf(self, t):
|
||||
"""
|
||||
Determine the number of intervals passed as of the given point in
|
||||
time.
|
||||
|
||||
@param t: The specified time (from the start of the L{LoopingCall}) to
|
||||
be measured in intervals
|
||||
|
||||
@return: The C{int} number of intervals which have passed as of the
|
||||
given point in time.
|
||||
"""
|
||||
elapsedTime = t - self.starttime
|
||||
intervalNum = int(elapsedTime / self.interval)
|
||||
return intervalNum
|
||||
|
||||
|
||||
def start(self, interval, now=True):
|
||||
"""
|
||||
Start running function every interval seconds.
|
||||
|
||||
@param interval: The number of seconds between calls. May be
|
||||
less than one. Precision will depend on the underlying
|
||||
platform, the available hardware, and the load on the system.
|
||||
|
||||
@param now: If True, run this call right now. Otherwise, wait
|
||||
until the interval has elapsed before beginning.
|
||||
|
||||
@return: A Deferred whose callback will be invoked with
|
||||
C{self} when C{self.stop} is called, or whose errback will be
|
||||
invoked when the function raises an exception or returned a
|
||||
deferred that has its errback invoked.
|
||||
"""
|
||||
assert not self.running, ("Tried to start an already running "
|
||||
"LoopingCall.")
|
||||
if interval < 0:
|
||||
raise ValueError("interval must be >= 0")
|
||||
self.running = True
|
||||
d = self.deferred = defer.Deferred()
|
||||
self.starttime = self.clock.seconds()
|
||||
self._expectNextCallAt = self.starttime
|
||||
self.interval = interval
|
||||
self._runAtStart = now
|
||||
if now:
|
||||
self()
|
||||
else:
|
||||
self._reschedule()
|
||||
return d
|
||||
|
||||
def stop(self):
|
||||
"""Stop running function.
|
||||
"""
|
||||
assert self.running, ("Tried to stop a LoopingCall that was "
|
||||
"not running.")
|
||||
self.running = False
|
||||
if self.call is not None:
|
||||
self.call.cancel()
|
||||
self.call = None
|
||||
d, self.deferred = self.deferred, None
|
||||
d.callback(self)
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Skip the next iteration and reset the timer.
|
||||
|
||||
@since: 11.1
|
||||
"""
|
||||
assert self.running, ("Tried to reset a LoopingCall that was "
|
||||
"not running.")
|
||||
if self.call is not None:
|
||||
self.call.cancel()
|
||||
self.call = None
|
||||
self._expectNextCallAt = self.clock.seconds()
|
||||
self._reschedule()
|
||||
|
||||
def __call__(self):
|
||||
def cb(result):
|
||||
if self.running:
|
||||
self._reschedule()
|
||||
else:
|
||||
d, self.deferred = self.deferred, None
|
||||
d.callback(self)
|
||||
|
||||
def eb(failure):
|
||||
self.running = False
|
||||
d, self.deferred = self.deferred, None
|
||||
d.errback(failure)
|
||||
|
||||
self.call = None
|
||||
d = defer.maybeDeferred(self.f, *self.a, **self.kw)
|
||||
d.addCallback(cb)
|
||||
d.addErrback(eb)
|
||||
|
||||
|
||||
def _reschedule(self):
|
||||
"""
|
||||
Schedule the next iteration of this looping call.
|
||||
"""
|
||||
if self.interval == 0:
|
||||
self.call = self.clock.callLater(0, self)
|
||||
return
|
||||
|
||||
currentTime = self.clock.seconds()
|
||||
# Find how long is left until the interval comes around again.
|
||||
untilNextTime = (self._expectNextCallAt - currentTime) % self.interval
|
||||
# Make sure it is in the future, in case more than one interval worth
|
||||
# of time passed since the previous call was made.
|
||||
nextTime = max(
|
||||
self._expectNextCallAt + self.interval, currentTime + untilNextTime)
|
||||
# If the interval falls on the current time exactly, skip it and
|
||||
# schedule the call for the next interval.
|
||||
if nextTime == currentTime:
|
||||
nextTime += self.interval
|
||||
self._expectNextCallAt = nextTime
|
||||
self.call = self.clock.callLater(nextTime - currentTime, self)
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
if hasattr(self.f, '__qualname__'):
|
||||
func = self.f.__qualname__
|
||||
elif hasattr(self.f, '__name__'):
|
||||
func = self.f.__name__
|
||||
if hasattr(self.f, 'im_class'):
|
||||
func = self.f.im_class.__name__ + '.' + func
|
||||
else:
|
||||
func = reflect.safe_repr(self.f)
|
||||
|
||||
return 'LoopingCall<%r>(%s, *%s, **%s)' % (
|
||||
self.interval, func, reflect.safe_repr(self.a),
|
||||
reflect.safe_repr(self.kw))
|
||||
|
||||
|
||||
|
||||
class SchedulerError(Exception):
|
||||
"""
|
||||
The operation could not be completed because the scheduler or one of its
|
||||
tasks was in an invalid state. This exception should not be raised
|
||||
directly, but is a superclass of various scheduler-state-related
|
||||
exceptions.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class SchedulerStopped(SchedulerError):
|
||||
"""
|
||||
The operation could not complete because the scheduler was stopped in
|
||||
progress or was already stopped.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class TaskFinished(SchedulerError):
|
||||
"""
|
||||
The operation could not complete because the task was already completed,
|
||||
stopped, encountered an error or otherwise permanently stopped running.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class TaskDone(TaskFinished):
|
||||
"""
|
||||
The operation could not complete because the task was already completed.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class TaskStopped(TaskFinished):
|
||||
"""
|
||||
The operation could not complete because the task was stopped.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class TaskFailed(TaskFinished):
|
||||
"""
|
||||
The operation could not complete because the task died with an unhandled
|
||||
error.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class NotPaused(SchedulerError):
|
||||
"""
|
||||
This exception is raised when a task is resumed which was not previously
|
||||
paused.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class _Timer(object):
|
||||
MAX_SLICE = 0.01
|
||||
def __init__(self):
|
||||
self.end = time.time() + self.MAX_SLICE
|
||||
|
||||
|
||||
def __call__(self):
|
||||
return time.time() >= self.end
|
||||
|
||||
|
||||
|
||||
_EPSILON = 0.00000001
|
||||
def _defaultScheduler(x):
|
||||
from twisted.internet import reactor
|
||||
return reactor.callLater(_EPSILON, x)
|
||||
|
||||
|
||||
class CooperativeTask(object):
|
||||
"""
|
||||
A L{CooperativeTask} is a task object inside a L{Cooperator}, which can be
|
||||
paused, resumed, and stopped. It can also have its completion (or
|
||||
termination) monitored.
|
||||
|
||||
@see: L{Cooperator.cooperate}
|
||||
|
||||
@ivar _iterator: the iterator to iterate when this L{CooperativeTask} is
|
||||
asked to do work.
|
||||
|
||||
@ivar _cooperator: the L{Cooperator} that this L{CooperativeTask}
|
||||
participates in, which is used to re-insert it upon resume.
|
||||
|
||||
@ivar _deferreds: the list of L{defer.Deferred}s to fire when this task
|
||||
completes, fails, or finishes.
|
||||
|
||||
@type _deferreds: C{list}
|
||||
|
||||
@type _cooperator: L{Cooperator}
|
||||
|
||||
@ivar _pauseCount: the number of times that this L{CooperativeTask} has
|
||||
been paused; if 0, it is running.
|
||||
|
||||
@type _pauseCount: C{int}
|
||||
|
||||
@ivar _completionState: The completion-state of this L{CooperativeTask}.
|
||||
C{None} if the task is not yet completed, an instance of L{TaskStopped}
|
||||
if C{stop} was called to stop this task early, of L{TaskFailed} if the
|
||||
application code in the iterator raised an exception which caused it to
|
||||
terminate, and of L{TaskDone} if it terminated normally via raising
|
||||
C{StopIteration}.
|
||||
|
||||
@type _completionState: L{TaskFinished}
|
||||
"""
|
||||
|
||||
def __init__(self, iterator, cooperator):
|
||||
"""
|
||||
A private constructor: to create a new L{CooperativeTask}, see
|
||||
L{Cooperator.cooperate}.
|
||||
"""
|
||||
self._iterator = iterator
|
||||
self._cooperator = cooperator
|
||||
self._deferreds = []
|
||||
self._pauseCount = 0
|
||||
self._completionState = None
|
||||
self._completionResult = None
|
||||
cooperator._addTask(self)
|
||||
|
||||
|
||||
def whenDone(self):
|
||||
"""
|
||||
Get a L{defer.Deferred} notification of when this task is complete.
|
||||
|
||||
@return: a L{defer.Deferred} that fires with the C{iterator} that this
|
||||
L{CooperativeTask} was created with when the iterator has been
|
||||
exhausted (i.e. its C{next} method has raised C{StopIteration}), or
|
||||
fails with the exception raised by C{next} if it raises some other
|
||||
exception.
|
||||
|
||||
@rtype: L{defer.Deferred}
|
||||
"""
|
||||
d = defer.Deferred()
|
||||
if self._completionState is None:
|
||||
self._deferreds.append(d)
|
||||
else:
|
||||
d.callback(self._completionResult)
|
||||
return d
|
||||
|
||||
|
||||
def pause(self):
|
||||
"""
|
||||
Pause this L{CooperativeTask}. Stop doing work until
|
||||
L{CooperativeTask.resume} is called. If C{pause} is called more than
|
||||
once, C{resume} must be called an equal number of times to resume this
|
||||
task.
|
||||
|
||||
@raise TaskFinished: if this task has already finished or completed.
|
||||
"""
|
||||
self._checkFinish()
|
||||
self._pauseCount += 1
|
||||
if self._pauseCount == 1:
|
||||
self._cooperator._removeTask(self)
|
||||
|
||||
|
||||
def resume(self):
|
||||
"""
|
||||
Resume processing of a paused L{CooperativeTask}.
|
||||
|
||||
@raise NotPaused: if this L{CooperativeTask} is not paused.
|
||||
"""
|
||||
if self._pauseCount == 0:
|
||||
raise NotPaused()
|
||||
self._pauseCount -= 1
|
||||
if self._pauseCount == 0 and self._completionState is None:
|
||||
self._cooperator._addTask(self)
|
||||
|
||||
|
||||
def _completeWith(self, completionState, deferredResult):
|
||||
"""
|
||||
@param completionState: a L{TaskFinished} exception or a subclass
|
||||
thereof, indicating what exception should be raised when subsequent
|
||||
operations are performed.
|
||||
|
||||
@param deferredResult: the result to fire all the deferreds with.
|
||||
"""
|
||||
self._completionState = completionState
|
||||
self._completionResult = deferredResult
|
||||
if not self._pauseCount:
|
||||
self._cooperator._removeTask(self)
|
||||
|
||||
# The Deferreds need to be invoked after all this is completed, because
|
||||
# a Deferred may want to manipulate other tasks in a Cooperator. For
|
||||
# example, if you call "stop()" on a cooperator in a callback on a
|
||||
# Deferred returned from whenDone(), this CooperativeTask must be gone
|
||||
# from the Cooperator by that point so that _completeWith is not
|
||||
# invoked reentrantly; that would cause these Deferreds to blow up with
|
||||
# an AlreadyCalledError, or the _removeTask to fail with a ValueError.
|
||||
for d in self._deferreds:
|
||||
d.callback(deferredResult)
|
||||
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop further processing of this task.
|
||||
|
||||
@raise TaskFinished: if this L{CooperativeTask} has previously
|
||||
completed, via C{stop}, completion, or failure.
|
||||
"""
|
||||
self._checkFinish()
|
||||
self._completeWith(TaskStopped(), Failure(TaskStopped()))
|
||||
|
||||
|
||||
def _checkFinish(self):
|
||||
"""
|
||||
If this task has been stopped, raise the appropriate subclass of
|
||||
L{TaskFinished}.
|
||||
"""
|
||||
if self._completionState is not None:
|
||||
raise self._completionState
|
||||
|
||||
|
||||
def _oneWorkUnit(self):
|
||||
"""
|
||||
Perform one unit of work for this task, retrieving one item from its
|
||||
iterator, stopping if there are no further items in the iterator, and
|
||||
pausing if the result was a L{defer.Deferred}.
|
||||
"""
|
||||
try:
|
||||
result = next(self._iterator)
|
||||
except StopIteration:
|
||||
self._completeWith(TaskDone(), self._iterator)
|
||||
except:
|
||||
self._completeWith(TaskFailed(), Failure())
|
||||
else:
|
||||
if isinstance(result, defer.Deferred):
|
||||
self.pause()
|
||||
def failLater(f):
|
||||
self._completeWith(TaskFailed(), f)
|
||||
result.addCallbacks(lambda result: self.resume(),
|
||||
failLater)
|
||||
|
||||
|
||||
|
||||
class Cooperator(object):
|
||||
"""
|
||||
Cooperative task scheduler.
|
||||
|
||||
A cooperative task is an iterator where each iteration represents an
|
||||
atomic unit of work. When the iterator yields, it allows the
|
||||
L{Cooperator} to decide which of its tasks to execute next. If the
|
||||
iterator yields a L{defer.Deferred} then work will pause until the
|
||||
L{defer.Deferred} fires and completes its callback chain.
|
||||
|
||||
When a L{Cooperator} has more than one task, it distributes work between
|
||||
all tasks.
|
||||
|
||||
There are two ways to add tasks to a L{Cooperator}, L{cooperate} and
|
||||
L{coiterate}. L{cooperate} is the more useful of the two, as it returns a
|
||||
L{CooperativeTask}, which can be L{paused<CooperativeTask.pause>},
|
||||
L{resumed<CooperativeTask.resume>} and L{waited
|
||||
on<CooperativeTask.whenDone>}. L{coiterate} has the same effect, but
|
||||
returns only a L{defer.Deferred} that fires when the task is done.
|
||||
|
||||
L{Cooperator} can be used for many things, including but not limited to:
|
||||
|
||||
- running one or more computationally intensive tasks without blocking
|
||||
- limiting parallelism by running a subset of the total tasks
|
||||
simultaneously
|
||||
- doing one thing, waiting for a L{Deferred<defer.Deferred>} to fire,
|
||||
doing the next thing, repeat (i.e. serializing a sequence of
|
||||
asynchronous tasks)
|
||||
|
||||
Multiple L{Cooperator}s do not cooperate with each other, so for most
|
||||
cases you should use the L{global cooperator<task.cooperate>}.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
terminationPredicateFactory=_Timer,
|
||||
scheduler=_defaultScheduler,
|
||||
started=True):
|
||||
"""
|
||||
Create a scheduler-like object to which iterators may be added.
|
||||
|
||||
@param terminationPredicateFactory: A no-argument callable which will
|
||||
be invoked at the beginning of each step and should return a
|
||||
no-argument callable which will return True when the step should be
|
||||
terminated. The default factory is time-based and allows iterators to
|
||||
run for 1/100th of a second at a time.
|
||||
|
||||
@param scheduler: A one-argument callable which takes a no-argument
|
||||
callable and should invoke it at some future point. This will be used
|
||||
to schedule each step of this Cooperator.
|
||||
|
||||
@param started: A boolean which indicates whether iterators should be
|
||||
stepped as soon as they are added, or if they will be queued up until
|
||||
L{Cooperator.start} is called.
|
||||
"""
|
||||
self._tasks = []
|
||||
self._metarator = iter(())
|
||||
self._terminationPredicateFactory = terminationPredicateFactory
|
||||
self._scheduler = scheduler
|
||||
self._delayedCall = None
|
||||
self._stopped = False
|
||||
self._started = started
|
||||
|
||||
|
||||
def coiterate(self, iterator, doneDeferred=None):
|
||||
"""
|
||||
Add an iterator to the list of iterators this L{Cooperator} is
|
||||
currently running.
|
||||
|
||||
Equivalent to L{cooperate}, but returns a L{defer.Deferred} that will
|
||||
be fired when the task is done.
|
||||
|
||||
@param doneDeferred: If specified, this will be the Deferred used as
|
||||
the completion deferred. It is suggested that you use the default,
|
||||
which creates a new Deferred for you.
|
||||
|
||||
@return: a Deferred that will fire when the iterator finishes.
|
||||
"""
|
||||
if doneDeferred is None:
|
||||
doneDeferred = defer.Deferred()
|
||||
CooperativeTask(iterator, self).whenDone().chainDeferred(doneDeferred)
|
||||
return doneDeferred
|
||||
|
||||
|
||||
def cooperate(self, iterator):
|
||||
"""
|
||||
Start running the given iterator as a long-running cooperative task, by
|
||||
calling next() on it as a periodic timed event.
|
||||
|
||||
@param iterator: the iterator to invoke.
|
||||
|
||||
@return: a L{CooperativeTask} object representing this task.
|
||||
"""
|
||||
return CooperativeTask(iterator, self)
|
||||
|
||||
|
||||
def _addTask(self, task):
|
||||
"""
|
||||
Add a L{CooperativeTask} object to this L{Cooperator}.
|
||||
"""
|
||||
if self._stopped:
|
||||
self._tasks.append(task) # XXX silly, I know, but _completeWith
|
||||
# does the inverse
|
||||
task._completeWith(SchedulerStopped(), Failure(SchedulerStopped()))
|
||||
else:
|
||||
self._tasks.append(task)
|
||||
self._reschedule()
|
||||
|
||||
|
||||
def _removeTask(self, task):
|
||||
"""
|
||||
Remove a L{CooperativeTask} from this L{Cooperator}.
|
||||
"""
|
||||
self._tasks.remove(task)
|
||||
# If no work left to do, cancel the delayed call:
|
||||
if not self._tasks and self._delayedCall:
|
||||
self._delayedCall.cancel()
|
||||
self._delayedCall = None
|
||||
|
||||
|
||||
def _tasksWhileNotStopped(self):
|
||||
"""
|
||||
Yield all L{CooperativeTask} objects in a loop as long as this
|
||||
L{Cooperator}'s termination condition has not been met.
|
||||
"""
|
||||
terminator = self._terminationPredicateFactory()
|
||||
while self._tasks:
|
||||
for t in self._metarator:
|
||||
yield t
|
||||
if terminator():
|
||||
return
|
||||
self._metarator = iter(self._tasks)
|
||||
|
||||
|
||||
def _tick(self):
|
||||
"""
|
||||
Run one scheduler tick.
|
||||
"""
|
||||
self._delayedCall = None
|
||||
for taskObj in self._tasksWhileNotStopped():
|
||||
taskObj._oneWorkUnit()
|
||||
self._reschedule()
|
||||
|
||||
|
||||
_mustScheduleOnStart = False
|
||||
def _reschedule(self):
|
||||
if not self._started:
|
||||
self._mustScheduleOnStart = True
|
||||
return
|
||||
if self._delayedCall is None and self._tasks:
|
||||
self._delayedCall = self._scheduler(self._tick)
|
||||
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Begin scheduling steps.
|
||||
"""
|
||||
self._stopped = False
|
||||
self._started = True
|
||||
if self._mustScheduleOnStart:
|
||||
del self._mustScheduleOnStart
|
||||
self._reschedule()
|
||||
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop scheduling steps. Errback the completion Deferreds of all
|
||||
iterators which have been added and forget about them.
|
||||
"""
|
||||
self._stopped = True
|
||||
for taskObj in self._tasks:
|
||||
taskObj._completeWith(SchedulerStopped(),
|
||||
Failure(SchedulerStopped()))
|
||||
self._tasks = []
|
||||
if self._delayedCall is not None:
|
||||
self._delayedCall.cancel()
|
||||
self._delayedCall = None
|
||||
|
||||
|
||||
@property
|
||||
def running(self):
|
||||
"""
|
||||
Is this L{Cooperator} is currently running?
|
||||
|
||||
@return: C{True} if the L{Cooperator} is running, C{False} otherwise.
|
||||
@rtype: C{bool}
|
||||
"""
|
||||
return (self._started and not self._stopped)
|
||||
|
||||
|
||||
|
||||
_theCooperator = Cooperator()
|
||||
|
||||
def coiterate(iterator):
|
||||
"""
|
||||
Cooperatively iterate over the given iterator, dividing runtime between it
|
||||
and all other iterators which have been passed to this function and not yet
|
||||
exhausted.
|
||||
|
||||
@param iterator: the iterator to invoke.
|
||||
|
||||
@return: a Deferred that will fire when the iterator finishes.
|
||||
"""
|
||||
return _theCooperator.coiterate(iterator)
|
||||
|
||||
|
||||
|
||||
def cooperate(iterator):
|
||||
"""
|
||||
Start running the given iterator as a long-running cooperative task, by
|
||||
calling next() on it as a periodic timed event.
|
||||
|
||||
This is very useful if you have computationally expensive tasks that you
|
||||
want to run without blocking the reactor. Just break each task up so that
|
||||
it yields frequently, pass it in here and the global L{Cooperator} will
|
||||
make sure work is distributed between them without blocking longer than a
|
||||
single iteration of a single task.
|
||||
|
||||
@param iterator: the iterator to invoke.
|
||||
|
||||
@return: a L{CooperativeTask} object representing this task.
|
||||
"""
|
||||
return _theCooperator.cooperate(iterator)
|
||||
|
||||
|
||||
|
||||
@implementer(IReactorTime)
|
||||
class Clock:
|
||||
"""
|
||||
Provide a deterministic, easily-controlled implementation of
|
||||
L{IReactorTime.callLater}. This is commonly useful for writing
|
||||
deterministic unit tests for code which schedules events using this API.
|
||||
"""
|
||||
|
||||
rightNow = 0.0
|
||||
|
||||
def __init__(self):
|
||||
self.calls = []
|
||||
|
||||
|
||||
def seconds(self):
|
||||
"""
|
||||
Pretend to be time.time(). This is used internally when an operation
|
||||
such as L{IDelayedCall.reset} needs to determine a a time value
|
||||
relative to the current time.
|
||||
|
||||
@rtype: C{float}
|
||||
@return: The time which should be considered the current time.
|
||||
"""
|
||||
return self.rightNow
|
||||
|
||||
|
||||
def _sortCalls(self):
|
||||
"""
|
||||
Sort the pending calls according to the time they are scheduled.
|
||||
"""
|
||||
self.calls.sort(key=lambda a: a.getTime())
|
||||
|
||||
|
||||
def callLater(self, when, what, *a, **kw):
|
||||
"""
|
||||
See L{twisted.internet.interfaces.IReactorTime.callLater}.
|
||||
"""
|
||||
dc = base.DelayedCall(self.seconds() + when,
|
||||
what, a, kw,
|
||||
self.calls.remove,
|
||||
lambda c: None,
|
||||
self.seconds)
|
||||
self.calls.append(dc)
|
||||
self._sortCalls()
|
||||
return dc
|
||||
|
||||
|
||||
def getDelayedCalls(self):
|
||||
"""
|
||||
See L{twisted.internet.interfaces.IReactorTime.getDelayedCalls}
|
||||
"""
|
||||
return self.calls
|
||||
|
||||
|
||||
def advance(self, amount):
|
||||
"""
|
||||
Move time on this clock forward by the given amount and run whatever
|
||||
pending calls should be run.
|
||||
|
||||
@type amount: C{float}
|
||||
@param amount: The number of seconds which to advance this clock's
|
||||
time.
|
||||
"""
|
||||
self.rightNow += amount
|
||||
self._sortCalls()
|
||||
while self.calls and self.calls[0].getTime() <= self.seconds():
|
||||
call = self.calls.pop(0)
|
||||
call.called = 1
|
||||
call.func(*call.args, **call.kw)
|
||||
self._sortCalls()
|
||||
|
||||
|
||||
def pump(self, timings):
|
||||
"""
|
||||
Advance incrementally by the given set of times.
|
||||
|
||||
@type timings: iterable of C{float}
|
||||
"""
|
||||
for amount in timings:
|
||||
self.advance(amount)
|
||||
|
||||
|
||||
|
||||
def deferLater(clock, delay, callable, *args, **kw):
|
||||
"""
|
||||
Call the given function after a certain period of time has passed.
|
||||
|
||||
@type clock: L{IReactorTime} provider
|
||||
@param clock: The object which will be used to schedule the delayed
|
||||
call.
|
||||
|
||||
@type delay: C{float} or C{int}
|
||||
@param delay: The number of seconds to wait before calling the function.
|
||||
|
||||
@param callable: The object to call after the delay.
|
||||
|
||||
@param *args: The positional arguments to pass to C{callable}.
|
||||
|
||||
@param **kw: The keyword arguments to pass to C{callable}.
|
||||
|
||||
@rtype: L{defer.Deferred}
|
||||
|
||||
@return: A deferred that fires with the result of the callable when the
|
||||
specified time has elapsed.
|
||||
"""
|
||||
def deferLaterCancel(deferred):
|
||||
delayedCall.cancel()
|
||||
d = defer.Deferred(deferLaterCancel)
|
||||
d.addCallback(lambda ignored: callable(*args, **kw))
|
||||
delayedCall = clock.callLater(delay, d.callback, None)
|
||||
return d
|
||||
|
||||
|
||||
|
||||
def react(main, argv=(), _reactor=None):
|
||||
"""
|
||||
Call C{main} and run the reactor until the L{Deferred} it returns fires.
|
||||
|
||||
This is intended as the way to start up an application with a well-defined
|
||||
completion condition. Use it to write clients or one-off asynchronous
|
||||
operations. Prefer this to calling C{reactor.run} directly, as this
|
||||
function will also:
|
||||
|
||||
- Take care to call C{reactor.stop} once and only once, and at the right
|
||||
time.
|
||||
- Log any failures from the C{Deferred} returned by C{main}.
|
||||
- Exit the application when done, with exit code 0 in case of success and
|
||||
1 in case of failure. If C{main} fails with a C{SystemExit} error, the
|
||||
code returned is used.
|
||||
|
||||
The following demonstrates the signature of a C{main} function which can be
|
||||
used with L{react}::
|
||||
def main(reactor, username, password):
|
||||
return defer.succeed('ok')
|
||||
|
||||
task.react(main, ('alice', 'secret'))
|
||||
|
||||
@param main: A callable which returns a L{Deferred}. It should
|
||||
take the reactor as its first parameter, followed by the elements of
|
||||
C{argv}.
|
||||
|
||||
@param argv: A list of arguments to pass to C{main}. If omitted the
|
||||
callable will be invoked with no additional arguments.
|
||||
|
||||
@param _reactor: An implementation detail to allow easier unit testing. Do
|
||||
not supply this parameter.
|
||||
|
||||
@since: 12.3
|
||||
"""
|
||||
if _reactor is None:
|
||||
from twisted.internet import reactor as _reactor
|
||||
finished = main(_reactor, *argv)
|
||||
codes = [0]
|
||||
|
||||
stopping = []
|
||||
_reactor.addSystemEventTrigger('before', 'shutdown', stopping.append, True)
|
||||
|
||||
def stop(result, stopReactor):
|
||||
if stopReactor:
|
||||
try:
|
||||
_reactor.stop()
|
||||
except ReactorNotRunning:
|
||||
pass
|
||||
|
||||
if isinstance(result, Failure):
|
||||
if result.check(SystemExit) is not None:
|
||||
code = result.value.code
|
||||
else:
|
||||
log.err(result, "main function encountered error")
|
||||
code = 1
|
||||
codes[0] = code
|
||||
|
||||
def cbFinish(result):
|
||||
if stopping:
|
||||
stop(result, False)
|
||||
else:
|
||||
_reactor.callWhenRunning(stop, result, True)
|
||||
|
||||
finished.addBoth(cbFinish)
|
||||
_reactor.run()
|
||||
sys.exit(codes[0])
|
||||
|
||||
|
||||
__all__ = [
|
||||
'LoopingCall',
|
||||
|
||||
'Clock',
|
||||
|
||||
'SchedulerStopped', 'Cooperator', 'coiterate',
|
||||
|
||||
'deferLater', 'react']
|
||||
1182
Linux_i686/lib/python2.7/site-packages/twisted/internet/tcp.py
Normal file
1182
Linux_i686/lib/python2.7/site-packages/twisted/internet/tcp.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,6 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted.internet}.
|
||||
"""
|
||||
|
|
@ -0,0 +1,177 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
POSIX implementation of local network interface enumeration.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import sys, socket
|
||||
|
||||
from socket import AF_INET, AF_INET6, inet_ntop
|
||||
from ctypes import (
|
||||
CDLL, POINTER, Structure, c_char_p, c_ushort, c_int,
|
||||
c_uint32, c_uint8, c_void_p, c_ubyte, pointer, cast)
|
||||
from ctypes.util import find_library
|
||||
|
||||
from twisted.python.compat import _PY3, nativeString
|
||||
|
||||
if _PY3:
|
||||
# Once #6070 is implemented, this can be replaced with the implementation
|
||||
# from that ticket:
|
||||
def chr(i):
|
||||
"""
|
||||
Python 3 implementation of Python 2 chr(), i.e. convert an integer to
|
||||
corresponding byte.
|
||||
"""
|
||||
return bytes([i])
|
||||
|
||||
|
||||
libc = CDLL(find_library("c"))
|
||||
|
||||
if sys.platform.startswith('freebsd') or sys.platform == 'darwin':
|
||||
_sockaddrCommon = [
|
||||
("sin_len", c_uint8),
|
||||
("sin_family", c_uint8),
|
||||
]
|
||||
else:
|
||||
_sockaddrCommon = [
|
||||
("sin_family", c_ushort),
|
||||
]
|
||||
|
||||
|
||||
|
||||
class in_addr(Structure):
|
||||
_fields_ = [
|
||||
("in_addr", c_ubyte * 4),
|
||||
]
|
||||
|
||||
|
||||
|
||||
class in6_addr(Structure):
|
||||
_fields_ = [
|
||||
("in_addr", c_ubyte * 16),
|
||||
]
|
||||
|
||||
|
||||
|
||||
class sockaddr(Structure):
|
||||
_fields_ = _sockaddrCommon + [
|
||||
("sin_port", c_ushort),
|
||||
]
|
||||
|
||||
|
||||
|
||||
class sockaddr_in(Structure):
|
||||
_fields_ = _sockaddrCommon + [
|
||||
("sin_port", c_ushort),
|
||||
("sin_addr", in_addr),
|
||||
]
|
||||
|
||||
|
||||
|
||||
class sockaddr_in6(Structure):
|
||||
_fields_ = _sockaddrCommon + [
|
||||
("sin_port", c_ushort),
|
||||
("sin_flowinfo", c_uint32),
|
||||
("sin_addr", in6_addr),
|
||||
]
|
||||
|
||||
|
||||
|
||||
class ifaddrs(Structure):
|
||||
pass
|
||||
|
||||
ifaddrs_p = POINTER(ifaddrs)
|
||||
ifaddrs._fields_ = [
|
||||
('ifa_next', ifaddrs_p),
|
||||
('ifa_name', c_char_p),
|
||||
('ifa_flags', c_uint32),
|
||||
('ifa_addr', POINTER(sockaddr)),
|
||||
('ifa_netmask', POINTER(sockaddr)),
|
||||
('ifa_dstaddr', POINTER(sockaddr)),
|
||||
('ifa_data', c_void_p)]
|
||||
|
||||
getifaddrs = libc.getifaddrs
|
||||
getifaddrs.argtypes = [POINTER(ifaddrs_p)]
|
||||
getifaddrs.restype = c_int
|
||||
|
||||
freeifaddrs = libc.freeifaddrs
|
||||
freeifaddrs.argtypes = [ifaddrs_p]
|
||||
|
||||
|
||||
|
||||
def _maybeCleanupScopeIndex(family, packed):
|
||||
"""
|
||||
On FreeBSD, kill the embedded interface indices in link-local scoped
|
||||
addresses.
|
||||
|
||||
@param family: The address family of the packed address - one of the
|
||||
I{socket.AF_*} constants.
|
||||
|
||||
@param packed: The packed representation of the address (ie, the bytes of a
|
||||
I{in_addr} field).
|
||||
@type packed: L{bytes}
|
||||
|
||||
@return: The packed address with any FreeBSD-specific extra bits cleared.
|
||||
@rtype: L{bytes}
|
||||
|
||||
@see: U{https://twistedmatrix.com/trac/ticket/6843}
|
||||
@see: U{http://www.freebsd.org/doc/en/books/developers-handbook/ipv6.html#ipv6-scope-index}
|
||||
|
||||
@note: Indications are that the need for this will be gone in FreeBSD >=10.
|
||||
"""
|
||||
if sys.platform.startswith('freebsd') and packed[:2] == b"\xfe\x80":
|
||||
return packed[:2] + b"\x00\x00" + packed[4:]
|
||||
return packed
|
||||
|
||||
|
||||
|
||||
def _interfaces():
|
||||
"""
|
||||
Call C{getifaddrs(3)} and return a list of tuples of interface name, address
|
||||
family, and human-readable address representing its results.
|
||||
"""
|
||||
ifaddrs = ifaddrs_p()
|
||||
if getifaddrs(pointer(ifaddrs)) < 0:
|
||||
raise OSError()
|
||||
results = []
|
||||
try:
|
||||
while ifaddrs:
|
||||
if ifaddrs[0].ifa_addr:
|
||||
family = ifaddrs[0].ifa_addr[0].sin_family
|
||||
if family == AF_INET:
|
||||
addr = cast(ifaddrs[0].ifa_addr, POINTER(sockaddr_in))
|
||||
elif family == AF_INET6:
|
||||
addr = cast(ifaddrs[0].ifa_addr, POINTER(sockaddr_in6))
|
||||
else:
|
||||
addr = None
|
||||
|
||||
if addr:
|
||||
packed = b''.join(map(chr, addr[0].sin_addr.in_addr[:]))
|
||||
packed = _maybeCleanupScopeIndex(family, packed)
|
||||
results.append((
|
||||
ifaddrs[0].ifa_name,
|
||||
family,
|
||||
inet_ntop(family, packed)))
|
||||
|
||||
ifaddrs = ifaddrs[0].ifa_next
|
||||
finally:
|
||||
freeifaddrs(ifaddrs)
|
||||
return results
|
||||
|
||||
|
||||
|
||||
def posixGetLinkLocalIPv6Addresses():
|
||||
"""
|
||||
Return a list of strings in colon-hex format representing all the link local
|
||||
IPv6 addresses available on the system, as reported by I{getifaddrs(3)}.
|
||||
"""
|
||||
retList = []
|
||||
for (interface, family, address) in _interfaces():
|
||||
interface = nativeString(interface)
|
||||
address = nativeString(address)
|
||||
if family == socket.AF_INET6 and address.startswith('fe80:'):
|
||||
retList.append('%s%%%s' % (address, interface))
|
||||
return retList
|
||||
|
|
@ -0,0 +1,119 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Windows implementation of local network interface enumeration.
|
||||
"""
|
||||
|
||||
from socket import socket, AF_INET6, SOCK_STREAM
|
||||
from ctypes import (
|
||||
WinDLL, byref, create_string_buffer, c_int, c_void_p,
|
||||
POINTER, Structure, cast, string_at)
|
||||
|
||||
WS2_32 = WinDLL('ws2_32')
|
||||
|
||||
SOCKET = c_int
|
||||
DWORD = c_int
|
||||
LPVOID = c_void_p
|
||||
LPSOCKADDR = c_void_p
|
||||
LPWSAPROTOCOL_INFO = c_void_p
|
||||
LPTSTR = c_void_p
|
||||
LPDWORD = c_void_p
|
||||
LPWSAOVERLAPPED = c_void_p
|
||||
LPWSAOVERLAPPED_COMPLETION_ROUTINE = c_void_p
|
||||
|
||||
# http://msdn.microsoft.com/en-us/library/ms741621(v=VS.85).aspx
|
||||
# int WSAIoctl(
|
||||
# __in SOCKET s,
|
||||
# __in DWORD dwIoControlCode,
|
||||
# __in LPVOID lpvInBuffer,
|
||||
# __in DWORD cbInBuffer,
|
||||
# __out LPVOID lpvOutBuffer,
|
||||
# __in DWORD cbOutBuffer,
|
||||
# __out LPDWORD lpcbBytesReturned,
|
||||
# __in LPWSAOVERLAPPED lpOverlapped,
|
||||
# __in LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine
|
||||
# );
|
||||
WSAIoctl = WS2_32.WSAIoctl
|
||||
WSAIoctl.argtypes = [
|
||||
SOCKET, DWORD, LPVOID, DWORD, LPVOID, DWORD, LPDWORD,
|
||||
LPWSAOVERLAPPED, LPWSAOVERLAPPED_COMPLETION_ROUTINE]
|
||||
WSAIoctl.restype = c_int
|
||||
|
||||
# http://msdn.microsoft.com/en-us/library/ms741516(VS.85).aspx
|
||||
# INT WSAAPI WSAAddressToString(
|
||||
# __in LPSOCKADDR lpsaAddress,
|
||||
# __in DWORD dwAddressLength,
|
||||
# __in_opt LPWSAPROTOCOL_INFO lpProtocolInfo,
|
||||
# __inout LPTSTR lpszAddressString,
|
||||
# __inout LPDWORD lpdwAddressStringLength
|
||||
# );
|
||||
WSAAddressToString = WS2_32.WSAAddressToStringA
|
||||
WSAAddressToString.argtypes = [
|
||||
LPSOCKADDR, DWORD, LPWSAPROTOCOL_INFO, LPTSTR, LPDWORD]
|
||||
WSAAddressToString.restype = c_int
|
||||
|
||||
|
||||
SIO_ADDRESS_LIST_QUERY = 0x48000016
|
||||
WSAEFAULT = 10014
|
||||
|
||||
class SOCKET_ADDRESS(Structure):
|
||||
_fields_ = [('lpSockaddr', c_void_p),
|
||||
('iSockaddrLength', c_int)]
|
||||
|
||||
|
||||
|
||||
def make_SAL(ln):
|
||||
class SOCKET_ADDRESS_LIST(Structure):
|
||||
_fields_ = [('iAddressCount', c_int),
|
||||
('Address', SOCKET_ADDRESS * ln)]
|
||||
return SOCKET_ADDRESS_LIST
|
||||
|
||||
|
||||
|
||||
def win32GetLinkLocalIPv6Addresses():
|
||||
"""
|
||||
Return a list of strings in colon-hex format representing all the link local
|
||||
IPv6 addresses available on the system, as reported by
|
||||
I{WSAIoctl}/C{SIO_ADDRESS_LIST_QUERY}.
|
||||
"""
|
||||
s = socket(AF_INET6, SOCK_STREAM)
|
||||
size = 4096
|
||||
retBytes = c_int()
|
||||
for i in range(2):
|
||||
buf = create_string_buffer(size)
|
||||
ret = WSAIoctl(
|
||||
s.fileno(),
|
||||
SIO_ADDRESS_LIST_QUERY, 0, 0, buf, size, byref(retBytes), 0, 0)
|
||||
|
||||
# WSAIoctl might fail with WSAEFAULT, which means there was not enough
|
||||
# space in the buffer we gave it. There's no way to check the errno
|
||||
# until Python 2.6, so we don't even try. :/ Maybe if retBytes is still
|
||||
# 0 another error happened, though.
|
||||
if ret and retBytes.value:
|
||||
size = retBytes.value
|
||||
else:
|
||||
break
|
||||
|
||||
# If it failed, then we'll just have to give up. Still no way to see why.
|
||||
if ret:
|
||||
raise RuntimeError("WSAIoctl failure")
|
||||
|
||||
addrList = cast(buf, POINTER(make_SAL(0)))
|
||||
addrCount = addrList[0].iAddressCount
|
||||
addrList = cast(buf, POINTER(make_SAL(addrCount)))
|
||||
|
||||
addressStringBufLength = 1024
|
||||
addressStringBuf = create_string_buffer(addressStringBufLength)
|
||||
|
||||
retList = []
|
||||
for i in range(addrList[0].iAddressCount):
|
||||
retBytes.value = addressStringBufLength
|
||||
addr = addrList[0].Address[i]
|
||||
ret = WSAAddressToString(
|
||||
addr.lpSockaddr, addr.iSockaddrLength, 0, addressStringBuf,
|
||||
byref(retBytes))
|
||||
if ret:
|
||||
raise RuntimeError("WSAAddressToString failure")
|
||||
retList.append(string_at(addressStringBuf))
|
||||
return [addr for addr in retList if '%' in addr]
|
||||
|
|
@ -0,0 +1,606 @@
|
|||
# -*- test-case-name: twisted.internet.test.test_tcp -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Various helpers for tests for connection-oriented transports.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import socket
|
||||
|
||||
from gc import collect
|
||||
from weakref import ref
|
||||
|
||||
from zope.interface.verify import verifyObject
|
||||
|
||||
from twisted.python import context, log
|
||||
from twisted.python.failure import Failure
|
||||
from twisted.python.runtime import platform
|
||||
from twisted.python.log import ILogContext, msg, err
|
||||
from twisted.internet.defer import Deferred, gatherResults
|
||||
from twisted.internet.interfaces import IConnector, IReactorFDSet
|
||||
from twisted.internet.protocol import ClientFactory, Protocol, ServerFactory
|
||||
from twisted.trial.unittest import SkipTest
|
||||
from twisted.internet.test.reactormixins import needsRunningReactor
|
||||
from twisted.test.test_tcp import ClosingProtocol
|
||||
|
||||
|
||||
|
||||
def findFreePort(interface='127.0.0.1', family=socket.AF_INET,
|
||||
type=socket.SOCK_STREAM):
|
||||
"""
|
||||
Ask the platform to allocate a free port on the specified interface, then
|
||||
release the socket and return the address which was allocated.
|
||||
|
||||
@param interface: The local address to try to bind the port on.
|
||||
@type interface: C{str}
|
||||
|
||||
@param type: The socket type which will use the resulting port.
|
||||
|
||||
@return: A two-tuple of address and port, like that returned by
|
||||
L{socket.getsockname}.
|
||||
"""
|
||||
addr = socket.getaddrinfo(interface, 0)[0][4]
|
||||
probe = socket.socket(family, type)
|
||||
try:
|
||||
probe.bind(addr)
|
||||
return probe.getsockname()
|
||||
finally:
|
||||
probe.close()
|
||||
|
||||
|
||||
|
||||
class ConnectableProtocol(Protocol):
|
||||
"""
|
||||
A protocol to be used with L{runProtocolsWithReactor}.
|
||||
|
||||
The protocol and its pair should eventually disconnect from each other.
|
||||
|
||||
@ivar reactor: The reactor used in this test.
|
||||
|
||||
@ivar disconnectReason: The L{Failure} passed to C{connectionLost}.
|
||||
|
||||
@ivar _done: A L{Deferred} which will be fired when the connection is
|
||||
lost.
|
||||
"""
|
||||
|
||||
disconnectReason = None
|
||||
|
||||
def _setAttributes(self, reactor, done):
|
||||
"""
|
||||
Set attributes on the protocol that are known only externally; this
|
||||
will be called by L{runProtocolsWithReactor} when this protocol is
|
||||
instantiated.
|
||||
|
||||
@param reactor: The reactor used in this test.
|
||||
|
||||
@param done: A L{Deferred} which will be fired when the connection is
|
||||
lost.
|
||||
"""
|
||||
self.reactor = reactor
|
||||
self._done = done
|
||||
|
||||
|
||||
def connectionLost(self, reason):
|
||||
self.disconnectReason = reason
|
||||
self._done.callback(None)
|
||||
del self._done
|
||||
|
||||
|
||||
|
||||
class EndpointCreator:
|
||||
"""
|
||||
Create client and server endpoints that know how to connect to each other.
|
||||
"""
|
||||
|
||||
def server(self, reactor):
|
||||
"""
|
||||
Return an object providing C{IStreamServerEndpoint} for use in creating
|
||||
a server to use to establish the connection type to be tested.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def client(self, reactor, serverAddress):
|
||||
"""
|
||||
Return an object providing C{IStreamClientEndpoint} for use in creating
|
||||
a client to use to establish the connection type to be tested.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
|
||||
class _SingleProtocolFactory(ClientFactory):
|
||||
"""
|
||||
Factory to be used by L{runProtocolsWithReactor}.
|
||||
|
||||
It always returns the same protocol (i.e. is intended for only a single
|
||||
connection).
|
||||
"""
|
||||
|
||||
def __init__(self, protocol):
|
||||
self._protocol = protocol
|
||||
|
||||
|
||||
def buildProtocol(self, addr):
|
||||
return self._protocol
|
||||
|
||||
|
||||
|
||||
def runProtocolsWithReactor(reactorBuilder, serverProtocol, clientProtocol,
|
||||
endpointCreator):
|
||||
"""
|
||||
Connect two protocols using endpoints and a new reactor instance.
|
||||
|
||||
A new reactor will be created and run, with the client and server protocol
|
||||
instances connected to each other using the given endpoint creator. The
|
||||
protocols should run through some set of tests, then disconnect; when both
|
||||
have disconnected the reactor will be stopped and the function will
|
||||
return.
|
||||
|
||||
@param reactorBuilder: A L{ReactorBuilder} instance.
|
||||
|
||||
@param serverProtocol: A L{ConnectableProtocol} that will be the server.
|
||||
|
||||
@param clientProtocol: A L{ConnectableProtocol} that will be the client.
|
||||
|
||||
@param endpointCreator: An instance of L{EndpointCreator}.
|
||||
|
||||
@return: The reactor run by this test.
|
||||
"""
|
||||
reactor = reactorBuilder.buildReactor()
|
||||
serverProtocol._setAttributes(reactor, Deferred())
|
||||
clientProtocol._setAttributes(reactor, Deferred())
|
||||
serverFactory = _SingleProtocolFactory(serverProtocol)
|
||||
clientFactory = _SingleProtocolFactory(clientProtocol)
|
||||
|
||||
# Listen on a port:
|
||||
serverEndpoint = endpointCreator.server(reactor)
|
||||
d = serverEndpoint.listen(serverFactory)
|
||||
|
||||
# Connect to the port:
|
||||
def gotPort(p):
|
||||
clientEndpoint = endpointCreator.client(
|
||||
reactor, p.getHost())
|
||||
return clientEndpoint.connect(clientFactory)
|
||||
d.addCallback(gotPort)
|
||||
|
||||
# Stop reactor when both connections are lost:
|
||||
def failed(result):
|
||||
log.err(result, "Connection setup failed.")
|
||||
disconnected = gatherResults([serverProtocol._done, clientProtocol._done])
|
||||
d.addCallback(lambda _: disconnected)
|
||||
d.addErrback(failed)
|
||||
d.addCallback(lambda _: needsRunningReactor(reactor, reactor.stop))
|
||||
|
||||
reactorBuilder.runReactor(reactor)
|
||||
return reactor
|
||||
|
||||
|
||||
|
||||
def _getWriters(reactor):
|
||||
"""
|
||||
Like L{IReactorFDSet.getWriters}, but with support for IOCP reactor as
|
||||
well.
|
||||
"""
|
||||
if IReactorFDSet.providedBy(reactor):
|
||||
return reactor.getWriters()
|
||||
elif 'IOCP' in reactor.__class__.__name__:
|
||||
return reactor.handles
|
||||
else:
|
||||
# Cannot tell what is going on.
|
||||
raise Exception("Cannot find writers on %r" % (reactor,))
|
||||
|
||||
|
||||
|
||||
class _AcceptOneClient(ServerFactory):
|
||||
"""
|
||||
This factory fires a L{Deferred} with a protocol instance shortly after it
|
||||
is constructed (hopefully long enough afterwards so that it has been
|
||||
connected to a transport).
|
||||
|
||||
@ivar reactor: The reactor used to schedule the I{shortly}.
|
||||
|
||||
@ivar result: A L{Deferred} which will be fired with the protocol instance.
|
||||
"""
|
||||
def __init__(self, reactor, result):
|
||||
self.reactor = reactor
|
||||
self.result = result
|
||||
|
||||
|
||||
def buildProtocol(self, addr):
|
||||
protocol = ServerFactory.buildProtocol(self, addr)
|
||||
self.reactor.callLater(0, self.result.callback, protocol)
|
||||
return protocol
|
||||
|
||||
|
||||
|
||||
class _SimplePullProducer(object):
|
||||
"""
|
||||
A pull producer which writes one byte whenever it is resumed. For use by
|
||||
C{test_unregisterProducerAfterDisconnect}.
|
||||
"""
|
||||
def __init__(self, consumer):
|
||||
self.consumer = consumer
|
||||
|
||||
|
||||
def stopProducing(self):
|
||||
pass
|
||||
|
||||
|
||||
def resumeProducing(self):
|
||||
log.msg("Producer.resumeProducing")
|
||||
self.consumer.write(b'x')
|
||||
|
||||
|
||||
|
||||
class Stop(ClientFactory):
|
||||
"""
|
||||
A client factory which stops a reactor when a connection attempt fails.
|
||||
"""
|
||||
failReason = None
|
||||
|
||||
def __init__(self, reactor):
|
||||
self.reactor = reactor
|
||||
|
||||
|
||||
def clientConnectionFailed(self, connector, reason):
|
||||
self.failReason = reason
|
||||
msg("Stop(CF) cCFailed: %s" % (reason.getErrorMessage(),))
|
||||
self.reactor.stop()
|
||||
|
||||
|
||||
|
||||
class ClosingLaterProtocol(ConnectableProtocol):
|
||||
"""
|
||||
ClosingLaterProtocol exchanges one byte with its peer and then disconnects
|
||||
itself. This is mostly a work-around for the fact that connectionMade is
|
||||
called before the SSL handshake has completed.
|
||||
"""
|
||||
def __init__(self, onConnectionLost):
|
||||
self.lostConnectionReason = None
|
||||
self.onConnectionLost = onConnectionLost
|
||||
|
||||
|
||||
def connectionMade(self):
|
||||
msg("ClosingLaterProtocol.connectionMade")
|
||||
|
||||
|
||||
def dataReceived(self, bytes):
|
||||
msg("ClosingLaterProtocol.dataReceived %r" % (bytes,))
|
||||
self.transport.loseConnection()
|
||||
|
||||
|
||||
def connectionLost(self, reason):
|
||||
msg("ClosingLaterProtocol.connectionLost")
|
||||
self.lostConnectionReason = reason
|
||||
self.onConnectionLost.callback(self)
|
||||
|
||||
|
||||
|
||||
class ConnectionTestsMixin(object):
|
||||
"""
|
||||
This mixin defines test methods which should apply to most L{ITransport}
|
||||
implementations.
|
||||
"""
|
||||
|
||||
# This should be a reactormixins.EndpointCreator instance.
|
||||
endpoints = None
|
||||
|
||||
|
||||
def test_logPrefix(self):
|
||||
"""
|
||||
Client and server transports implement L{ILoggingContext.logPrefix} to
|
||||
return a message reflecting the protocol they are running.
|
||||
"""
|
||||
class CustomLogPrefixProtocol(ConnectableProtocol):
|
||||
def __init__(self, prefix):
|
||||
self._prefix = prefix
|
||||
self.system = None
|
||||
|
||||
def connectionMade(self):
|
||||
self.transport.write(b"a")
|
||||
|
||||
def logPrefix(self):
|
||||
return self._prefix
|
||||
|
||||
def dataReceived(self, bytes):
|
||||
self.system = context.get(ILogContext)["system"]
|
||||
self.transport.write(b"b")
|
||||
# Only close connection if both sides have received data, so
|
||||
# that both sides have system set.
|
||||
if b"b" in bytes:
|
||||
self.transport.loseConnection()
|
||||
|
||||
client = CustomLogPrefixProtocol("Custom Client")
|
||||
server = CustomLogPrefixProtocol("Custom Server")
|
||||
runProtocolsWithReactor(self, server, client, self.endpoints)
|
||||
self.assertIn("Custom Client", client.system)
|
||||
self.assertIn("Custom Server", server.system)
|
||||
|
||||
|
||||
def test_writeAfterDisconnect(self):
|
||||
"""
|
||||
After a connection is disconnected, L{ITransport.write} and
|
||||
L{ITransport.writeSequence} are no-ops.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
|
||||
finished = []
|
||||
|
||||
serverConnectionLostDeferred = Deferred()
|
||||
protocol = lambda: ClosingLaterProtocol(serverConnectionLostDeferred)
|
||||
portDeferred = self.endpoints.server(reactor).listen(
|
||||
ServerFactory.forProtocol(protocol))
|
||||
def listening(port):
|
||||
msg("Listening on %r" % (port.getHost(),))
|
||||
endpoint = self.endpoints.client(reactor, port.getHost())
|
||||
|
||||
lostConnectionDeferred = Deferred()
|
||||
protocol = lambda: ClosingLaterProtocol(lostConnectionDeferred)
|
||||
client = endpoint.connect(ClientFactory.forProtocol(protocol))
|
||||
def write(proto):
|
||||
msg("About to write to %r" % (proto,))
|
||||
proto.transport.write(b'x')
|
||||
client.addCallbacks(write, lostConnectionDeferred.errback)
|
||||
|
||||
def disconnected(proto):
|
||||
msg("%r disconnected" % (proto,))
|
||||
proto.transport.write(b"some bytes to get lost")
|
||||
proto.transport.writeSequence([b"some", b"more"])
|
||||
finished.append(True)
|
||||
|
||||
lostConnectionDeferred.addCallback(disconnected)
|
||||
serverConnectionLostDeferred.addCallback(disconnected)
|
||||
return gatherResults([lostConnectionDeferred,
|
||||
serverConnectionLostDeferred])
|
||||
|
||||
def onListen():
|
||||
portDeferred.addCallback(listening)
|
||||
portDeferred.addErrback(err)
|
||||
portDeferred.addCallback(lambda ignored: reactor.stop())
|
||||
needsRunningReactor(reactor, onListen)
|
||||
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(finished, [True, True])
|
||||
|
||||
|
||||
def test_protocolGarbageAfterLostConnection(self):
|
||||
"""
|
||||
After the connection a protocol is being used for is closed, the
|
||||
reactor discards all of its references to the protocol.
|
||||
"""
|
||||
lostConnectionDeferred = Deferred()
|
||||
clientProtocol = ClosingLaterProtocol(lostConnectionDeferred)
|
||||
clientRef = ref(clientProtocol)
|
||||
|
||||
reactor = self.buildReactor()
|
||||
portDeferred = self.endpoints.server(reactor).listen(
|
||||
ServerFactory.forProtocol(Protocol))
|
||||
def listening(port):
|
||||
msg("Listening on %r" % (port.getHost(),))
|
||||
endpoint = self.endpoints.client(reactor, port.getHost())
|
||||
|
||||
client = endpoint.connect(
|
||||
ClientFactory.forProtocol(lambda: clientProtocol))
|
||||
def disconnect(proto):
|
||||
msg("About to disconnect %r" % (proto,))
|
||||
proto.transport.loseConnection()
|
||||
client.addCallback(disconnect)
|
||||
client.addErrback(lostConnectionDeferred.errback)
|
||||
return lostConnectionDeferred
|
||||
|
||||
def onListening():
|
||||
portDeferred.addCallback(listening)
|
||||
portDeferred.addErrback(err)
|
||||
portDeferred.addBoth(lambda ignored: reactor.stop())
|
||||
needsRunningReactor(reactor, onListening)
|
||||
|
||||
self.runReactor(reactor)
|
||||
|
||||
# Drop the reference and get the garbage collector to tell us if there
|
||||
# are no references to the protocol instance left in the reactor.
|
||||
clientProtocol = None
|
||||
collect()
|
||||
self.assertIs(None, clientRef())
|
||||
|
||||
|
||||
|
||||
class LogObserverMixin(object):
|
||||
"""
|
||||
Mixin for L{TestCase} subclasses which want to observe log events.
|
||||
"""
|
||||
def observe(self):
|
||||
loggedMessages = []
|
||||
log.addObserver(loggedMessages.append)
|
||||
self.addCleanup(log.removeObserver, loggedMessages.append)
|
||||
return loggedMessages
|
||||
|
||||
|
||||
|
||||
class BrokenContextFactory(object):
|
||||
"""
|
||||
A context factory with a broken C{getContext} method, for exercising the
|
||||
error handling for such a case.
|
||||
"""
|
||||
message = "Some path was wrong maybe"
|
||||
|
||||
def getContext(self):
|
||||
raise ValueError(self.message)
|
||||
|
||||
|
||||
|
||||
class StreamClientTestsMixin(object):
|
||||
"""
|
||||
This mixin defines tests applicable to SOCK_STREAM client implementations.
|
||||
|
||||
This must be mixed in to a L{ReactorBuilder
|
||||
<twisted.internet.test.reactormixins.ReactorBuilder>} subclass, as it
|
||||
depends on several of its methods.
|
||||
|
||||
Then the methods C{connect} and C{listen} must defined, defining a client
|
||||
and a server communicating with each other.
|
||||
"""
|
||||
|
||||
def test_interface(self):
|
||||
"""
|
||||
The C{connect} method returns an object providing L{IConnector}.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
connector = self.connect(reactor, ClientFactory())
|
||||
self.assertTrue(verifyObject(IConnector, connector))
|
||||
|
||||
|
||||
def test_clientConnectionFailedStopsReactor(self):
|
||||
"""
|
||||
The reactor can be stopped by a client factory's
|
||||
C{clientConnectionFailed} method.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
needsRunningReactor(
|
||||
reactor, lambda: self.connect(reactor, Stop(reactor)))
|
||||
self.runReactor(reactor)
|
||||
|
||||
|
||||
def test_connectEvent(self):
|
||||
"""
|
||||
This test checks that we correctly get notifications event for a
|
||||
client. This ought to prevent a regression under Windows using the
|
||||
GTK2 reactor. See #3925.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
|
||||
self.listen(reactor, ServerFactory.forProtocol(Protocol))
|
||||
connected = []
|
||||
|
||||
class CheckConnection(Protocol):
|
||||
|
||||
def connectionMade(self):
|
||||
connected.append(self)
|
||||
reactor.stop()
|
||||
|
||||
clientFactory = Stop(reactor)
|
||||
clientFactory.protocol = CheckConnection
|
||||
|
||||
needsRunningReactor(
|
||||
reactor, lambda: self.connect(reactor, clientFactory))
|
||||
|
||||
reactor.run()
|
||||
|
||||
self.assertTrue(connected)
|
||||
|
||||
|
||||
def test_unregisterProducerAfterDisconnect(self):
|
||||
"""
|
||||
If a producer is unregistered from a transport after the transport has
|
||||
been disconnected (by the peer) and after C{loseConnection} has been
|
||||
called, the transport is not re-added to the reactor as a writer as
|
||||
would be necessary if the transport were still connected.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
self.listen(reactor, ServerFactory.forProtocol(ClosingProtocol))
|
||||
|
||||
finished = Deferred()
|
||||
finished.addErrback(log.err)
|
||||
finished.addCallback(lambda ign: reactor.stop())
|
||||
|
||||
writing = []
|
||||
|
||||
class ClientProtocol(Protocol):
|
||||
"""
|
||||
Protocol to connect, register a producer, try to lose the
|
||||
connection, wait for the server to disconnect from us, and then
|
||||
unregister the producer.
|
||||
"""
|
||||
|
||||
def connectionMade(self):
|
||||
log.msg("ClientProtocol.connectionMade")
|
||||
self.transport.registerProducer(
|
||||
_SimplePullProducer(self.transport), False)
|
||||
self.transport.loseConnection()
|
||||
|
||||
def connectionLost(self, reason):
|
||||
log.msg("ClientProtocol.connectionLost")
|
||||
self.unregister()
|
||||
writing.append(self.transport in _getWriters(reactor))
|
||||
finished.callback(None)
|
||||
|
||||
def unregister(self):
|
||||
log.msg("ClientProtocol unregister")
|
||||
self.transport.unregisterProducer()
|
||||
|
||||
clientFactory = ClientFactory()
|
||||
clientFactory.protocol = ClientProtocol
|
||||
self.connect(reactor, clientFactory)
|
||||
self.runReactor(reactor)
|
||||
self.assertFalse(writing[0],
|
||||
"Transport was writing after unregisterProducer.")
|
||||
|
||||
|
||||
def test_disconnectWhileProducing(self):
|
||||
"""
|
||||
If C{loseConnection} is called while a producer is registered with the
|
||||
transport, the connection is closed after the producer is unregistered.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
|
||||
# For some reason, pyobject/pygtk will not deliver the close
|
||||
# notification that should happen after the unregisterProducer call in
|
||||
# this test. The selectable is in the write notification set, but no
|
||||
# notification ever arrives. Probably for the same reason #5233 led
|
||||
# win32eventreactor to be broken.
|
||||
skippedReactors = ["Glib2Reactor", "Gtk2Reactor"]
|
||||
reactorClassName = reactor.__class__.__name__
|
||||
if reactorClassName in skippedReactors and platform.isWindows():
|
||||
raise SkipTest(
|
||||
"A pygobject/pygtk bug disables this functionality "
|
||||
"on Windows.")
|
||||
|
||||
class Producer:
|
||||
def resumeProducing(self):
|
||||
log.msg("Producer.resumeProducing")
|
||||
|
||||
self.listen(reactor, ServerFactory.forProtocol(Protocol))
|
||||
|
||||
finished = Deferred()
|
||||
finished.addErrback(log.err)
|
||||
finished.addCallback(lambda ign: reactor.stop())
|
||||
|
||||
class ClientProtocol(Protocol):
|
||||
"""
|
||||
Protocol to connect, register a producer, try to lose the
|
||||
connection, unregister the producer, and wait for the connection to
|
||||
actually be lost.
|
||||
"""
|
||||
def connectionMade(self):
|
||||
log.msg("ClientProtocol.connectionMade")
|
||||
self.transport.registerProducer(Producer(), False)
|
||||
self.transport.loseConnection()
|
||||
# Let the reactor tick over, in case synchronously calling
|
||||
# loseConnection and then unregisterProducer is the same as
|
||||
# synchronously calling unregisterProducer and then
|
||||
# loseConnection (as it is in several reactors).
|
||||
reactor.callLater(0, reactor.callLater, 0, self.unregister)
|
||||
|
||||
def unregister(self):
|
||||
log.msg("ClientProtocol unregister")
|
||||
self.transport.unregisterProducer()
|
||||
# This should all be pretty quick. Fail the test
|
||||
# if we don't get a connectionLost event really
|
||||
# soon.
|
||||
reactor.callLater(
|
||||
1.0, finished.errback,
|
||||
Failure(Exception("Connection was not lost")))
|
||||
|
||||
def connectionLost(self, reason):
|
||||
log.msg("ClientProtocol.connectionLost")
|
||||
finished.callback(None)
|
||||
|
||||
clientFactory = ClientFactory()
|
||||
clientFactory.protocol = ClientProtocol
|
||||
self.connect(reactor, clientFactory)
|
||||
self.runReactor(reactor)
|
||||
# If the test failed, we logged an error already and trial
|
||||
# will catch it.
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
|
||||
This is a concatenation of thing1.pem and thing2.pem.
|
||||
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICwjCCAisCAgTSMA0GCSqGSIb3DQEBBAUAMIGoMREwDwYDVQQLEwhTZWN1cml0
|
||||
eTEcMBoGA1UEChMTVHdpc3RlZCBNYXRyaXggTGFiczEeMBwGA1UEAxMVZmFrZS1j
|
||||
YS0xLmV4YW1wbGUuY29tMREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMx
|
||||
IjAgBgkqhkiG9w0BCQEWE25vcmVwbHlAZXhhbXBsZS5jb20xETAPBgNVBAcTCE5l
|
||||
dyBZb3JrMB4XDTEwMDkyMTAxMjUxNFoXDTExMDkyMTAxMjUxNFowgagxETAPBgNV
|
||||
BAsTCFNlY3VyaXR5MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMR4wHAYD
|
||||
VQQDExVmYWtlLWNhLTEuZXhhbXBsZS5jb20xETAPBgNVBAgTCE5ldyBZb3JrMQsw
|
||||
CQYDVQQGEwJVUzEiMCAGCSqGSIb3DQEJARYTbm9yZXBseUBleGFtcGxlLmNvbTER
|
||||
MA8GA1UEBxMITmV3IFlvcmswgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALRb
|
||||
VqC0CsaFgq1vbwPfs8zoP3ZYC/0sPMv0RJN+f3Dc7Q6YgNHS7o7TM3uAy/McADeW
|
||||
rwVuNJGe9k+4ZBHysmBH1sG64fHT5TlK9saPcUQqkubSWj4cKSDtVbQERWqC5Dy+
|
||||
qTQeZGYoPEMlnRXgMpST04DG//Dgzi4PYqUOjwxTAgMBAAEwDQYJKoZIhvcNAQEE
|
||||
BQADgYEAqNEdMXWEs8Co76wxL3/cSV3MjiAroVxJdI/3EzlnfPi1JeibbdWw31fC
|
||||
bn6428KTjjfhS31zo1yHG3YNXFEJXRscwLAH7ogz5kJwZMy/oS/96EFM10bkNwkK
|
||||
v+nWKN8i3t/E5TEIl3BPN8tchtWmH0rycVuzs5LwaewwR1AnUE4=
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICwjCCAisCAgTSMA0GCSqGSIb3DQEBBAUAMIGoMREwDwYDVQQLEwhTZWN1cml0
|
||||
eTEcMBoGA1UEChMTVHdpc3RlZCBNYXRyaXggTGFiczEeMBwGA1UEAxMVZmFrZS1j
|
||||
YS0yLmV4YW1wbGUuY29tMREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMx
|
||||
IjAgBgkqhkiG9w0BCQEWE25vcmVwbHlAZXhhbXBsZS5jb20xETAPBgNVBAcTCE5l
|
||||
dyBZb3JrMB4XDTEwMDkyMTAxMjUzMVoXDTExMDkyMTAxMjUzMVowgagxETAPBgNV
|
||||
BAsTCFNlY3VyaXR5MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMR4wHAYD
|
||||
VQQDExVmYWtlLWNhLTIuZXhhbXBsZS5jb20xETAPBgNVBAgTCE5ldyBZb3JrMQsw
|
||||
CQYDVQQGEwJVUzEiMCAGCSqGSIb3DQEJARYTbm9yZXBseUBleGFtcGxlLmNvbTER
|
||||
MA8GA1UEBxMITmV3IFlvcmswgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMNn
|
||||
b3EcKqBedQed1qJC4uGVx8PYmn2vxL3QwCVW1w0VjpZXyhCq/2VrYBhJAXRzpfvE
|
||||
dCqhtJKcdifwavUrTfr4yXu1MvWA0YuaAkj1TbmlHHQYACf3h+MPOXroYzhT72bO
|
||||
FSSLDWuitj0ozR+2Fk15QwLWUxaYLmwylxXAf7vpAgMBAAEwDQYJKoZIhvcNAQEE
|
||||
BQADgYEADB2N6VHHhm5M2rJqqGDXMm2dU+7abxiuN+PUygN2LXIsqdGBS6U7/rta
|
||||
lJNVeRaM423c8imfuklkIBG9Msn5+xm1xIMIULoi/efActDLbsX1x6IyHQrG5aDP
|
||||
/RMKBio9RjS8ajgSwyYVUZiCZBsn/T0/JS8K61YLpiv4Tg8uXmM=
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -0,0 +1 @@
|
|||
This file is not a certificate; it is present to make sure that it will be skipped.
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
|
||||
This is a self-signed certificate authority certificate to be used in tests.
|
||||
|
||||
It was created with the following command:
|
||||
certcreate -f thing1.pem -h fake-ca-1.example.com -e noreply@example.com \
|
||||
-S 1234 -o 'Twisted Matrix Labs'
|
||||
|
||||
'certcreate' may be obtained from <http://divmod.org/trac/wiki/DivmodEpsilon>
|
||||
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICwjCCAisCAgTSMA0GCSqGSIb3DQEBBAUAMIGoMREwDwYDVQQLEwhTZWN1cml0
|
||||
eTEcMBoGA1UEChMTVHdpc3RlZCBNYXRyaXggTGFiczEeMBwGA1UEAxMVZmFrZS1j
|
||||
YS0xLmV4YW1wbGUuY29tMREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMx
|
||||
IjAgBgkqhkiG9w0BCQEWE25vcmVwbHlAZXhhbXBsZS5jb20xETAPBgNVBAcTCE5l
|
||||
dyBZb3JrMB4XDTEwMDkyMTAxMjUxNFoXDTExMDkyMTAxMjUxNFowgagxETAPBgNV
|
||||
BAsTCFNlY3VyaXR5MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMR4wHAYD
|
||||
VQQDExVmYWtlLWNhLTEuZXhhbXBsZS5jb20xETAPBgNVBAgTCE5ldyBZb3JrMQsw
|
||||
CQYDVQQGEwJVUzEiMCAGCSqGSIb3DQEJARYTbm9yZXBseUBleGFtcGxlLmNvbTER
|
||||
MA8GA1UEBxMITmV3IFlvcmswgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALRb
|
||||
VqC0CsaFgq1vbwPfs8zoP3ZYC/0sPMv0RJN+f3Dc7Q6YgNHS7o7TM3uAy/McADeW
|
||||
rwVuNJGe9k+4ZBHysmBH1sG64fHT5TlK9saPcUQqkubSWj4cKSDtVbQERWqC5Dy+
|
||||
qTQeZGYoPEMlnRXgMpST04DG//Dgzi4PYqUOjwxTAgMBAAEwDQYJKoZIhvcNAQEE
|
||||
BQADgYEAqNEdMXWEs8Co76wxL3/cSV3MjiAroVxJdI/3EzlnfPi1JeibbdWw31fC
|
||||
bn6428KTjjfhS31zo1yHG3YNXFEJXRscwLAH7ogz5kJwZMy/oS/96EFM10bkNwkK
|
||||
v+nWKN8i3t/E5TEIl3BPN8tchtWmH0rycVuzs5LwaewwR1AnUE4=
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
|
||||
This is a self-signed certificate authority certificate to be used in tests.
|
||||
|
||||
It was created with the following command:
|
||||
certcreate -f thing2.pem -h fake-ca-2.example.com -e noreply@example.com \
|
||||
-S 1234 -o 'Twisted Matrix Labs'
|
||||
|
||||
'certcreate' may be obtained from <http://divmod.org/trac/wiki/DivmodEpsilon>
|
||||
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICwjCCAisCAgTSMA0GCSqGSIb3DQEBBAUAMIGoMREwDwYDVQQLEwhTZWN1cml0
|
||||
eTEcMBoGA1UEChMTVHdpc3RlZCBNYXRyaXggTGFiczEeMBwGA1UEAxMVZmFrZS1j
|
||||
YS0yLmV4YW1wbGUuY29tMREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMx
|
||||
IjAgBgkqhkiG9w0BCQEWE25vcmVwbHlAZXhhbXBsZS5jb20xETAPBgNVBAcTCE5l
|
||||
dyBZb3JrMB4XDTEwMDkyMTAxMjUzMVoXDTExMDkyMTAxMjUzMVowgagxETAPBgNV
|
||||
BAsTCFNlY3VyaXR5MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMR4wHAYD
|
||||
VQQDExVmYWtlLWNhLTIuZXhhbXBsZS5jb20xETAPBgNVBAgTCE5ldyBZb3JrMQsw
|
||||
CQYDVQQGEwJVUzEiMCAGCSqGSIb3DQEJARYTbm9yZXBseUBleGFtcGxlLmNvbTER
|
||||
MA8GA1UEBxMITmV3IFlvcmswgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMNn
|
||||
b3EcKqBedQed1qJC4uGVx8PYmn2vxL3QwCVW1w0VjpZXyhCq/2VrYBhJAXRzpfvE
|
||||
dCqhtJKcdifwavUrTfr4yXu1MvWA0YuaAkj1TbmlHHQYACf3h+MPOXroYzhT72bO
|
||||
FSSLDWuitj0ozR+2Fk15QwLWUxaYLmwylxXAf7vpAgMBAAEwDQYJKoZIhvcNAQEE
|
||||
BQADgYEADB2N6VHHhm5M2rJqqGDXMm2dU+7abxiuN+PUygN2LXIsqdGBS6U7/rta
|
||||
lJNVeRaM423c8imfuklkIBG9Msn5+xm1xIMIULoi/efActDLbsX1x6IyHQrG5aDP
|
||||
/RMKBio9RjS8ajgSwyYVUZiCZBsn/T0/JS8K61YLpiv4Tg8uXmM=
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
|
||||
This is a self-signed certificate authority certificate to be used in tests.
|
||||
|
||||
It was created with the following command:
|
||||
certcreate -f thing2.pem -h fake-ca-2.example.com -e noreply@example.com \
|
||||
-S 1234 -o 'Twisted Matrix Labs'
|
||||
|
||||
'certcreate' may be obtained from <http://divmod.org/trac/wiki/DivmodEpsilon>
|
||||
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICwjCCAisCAgTSMA0GCSqGSIb3DQEBBAUAMIGoMREwDwYDVQQLEwhTZWN1cml0
|
||||
eTEcMBoGA1UEChMTVHdpc3RlZCBNYXRyaXggTGFiczEeMBwGA1UEAxMVZmFrZS1j
|
||||
YS0yLmV4YW1wbGUuY29tMREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMx
|
||||
IjAgBgkqhkiG9w0BCQEWE25vcmVwbHlAZXhhbXBsZS5jb20xETAPBgNVBAcTCE5l
|
||||
dyBZb3JrMB4XDTEwMDkyMTAxMjUzMVoXDTExMDkyMTAxMjUzMVowgagxETAPBgNV
|
||||
BAsTCFNlY3VyaXR5MRwwGgYDVQQKExNUd2lzdGVkIE1hdHJpeCBMYWJzMR4wHAYD
|
||||
VQQDExVmYWtlLWNhLTIuZXhhbXBsZS5jb20xETAPBgNVBAgTCE5ldyBZb3JrMQsw
|
||||
CQYDVQQGEwJVUzEiMCAGCSqGSIb3DQEJARYTbm9yZXBseUBleGFtcGxlLmNvbTER
|
||||
MA8GA1UEBxMITmV3IFlvcmswgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMNn
|
||||
b3EcKqBedQed1qJC4uGVx8PYmn2vxL3QwCVW1w0VjpZXyhCq/2VrYBhJAXRzpfvE
|
||||
dCqhtJKcdifwavUrTfr4yXu1MvWA0YuaAkj1TbmlHHQYACf3h+MPOXroYzhT72bO
|
||||
FSSLDWuitj0ozR+2Fk15QwLWUxaYLmwylxXAf7vpAgMBAAEwDQYJKoZIhvcNAQEE
|
||||
BQADgYEADB2N6VHHhm5M2rJqqGDXMm2dU+7abxiuN+PUygN2LXIsqdGBS6U7/rta
|
||||
lJNVeRaM423c8imfuklkIBG9Msn5+xm1xIMIULoi/efActDLbsX1x6IyHQrG5aDP
|
||||
/RMKBio9RjS8ajgSwyYVUZiCZBsn/T0/JS8K61YLpiv4Tg8uXmM=
|
||||
-----END CERTIFICATE-----
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
# -*- test-case-name: twisted.internet.test.test_endpoints -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Fake client and server endpoint string parser plugins for testing purposes.
|
||||
"""
|
||||
|
||||
from zope.interface.declarations import implementer
|
||||
from twisted.plugin import IPlugin
|
||||
from twisted.internet.interfaces import (
|
||||
IStreamClientEndpoint, IStreamServerEndpoint,
|
||||
IStreamClientEndpointStringParser, IStreamServerEndpointStringParser,
|
||||
IStreamClientEndpointStringParserWithReactor)
|
||||
|
||||
|
||||
@implementer(IPlugin)
|
||||
class PluginBase(object):
|
||||
|
||||
def __init__(self, pfx):
|
||||
self.prefix = pfx
|
||||
|
||||
|
||||
|
||||
@implementer(IStreamClientEndpointStringParser)
|
||||
class FakeClientParser(PluginBase):
|
||||
|
||||
def parseStreamClient(self, *a, **kw):
|
||||
return StreamClient(self, a, kw)
|
||||
|
||||
|
||||
|
||||
@implementer(IStreamClientEndpointStringParserWithReactor)
|
||||
class FakeClientParserWithReactor(PluginBase):
|
||||
|
||||
def parseStreamClient(self, *a, **kw):
|
||||
return StreamClient(self, a, kw)
|
||||
|
||||
|
||||
|
||||
@implementer(IStreamServerEndpointStringParser)
|
||||
class FakeParser(PluginBase):
|
||||
|
||||
def parseStreamServer(self, *a, **kw):
|
||||
return StreamServer(self, a, kw)
|
||||
|
||||
|
||||
|
||||
class EndpointBase(object):
|
||||
|
||||
def __init__(self, parser, args, kwargs):
|
||||
self.parser = parser
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
|
||||
|
||||
@implementer(IStreamClientEndpoint)
|
||||
class StreamClient(EndpointBase):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
@implementer(IStreamServerEndpoint)
|
||||
class StreamServer(EndpointBase):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
# Instantiate plugin interface providers to register them.
|
||||
fake = FakeParser('fake')
|
||||
fakeClient = FakeClientParser('cfake')
|
||||
fakeClientWithReactor = FakeClientParserWithReactor('crfake')
|
||||
fakeClientWithoutPreference = FakeClientParser('cpfake')
|
||||
fakeClientWithReactorAndPreference = FakeClientParserWithReactor('cpfake')
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Testing helpers related to the module system.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
__all__ = ['NoReactor', 'AlternateReactor']
|
||||
|
||||
import sys
|
||||
|
||||
import twisted.internet
|
||||
from twisted.test.test_twisted import SetAsideModule
|
||||
|
||||
|
||||
|
||||
class NoReactor(SetAsideModule):
|
||||
"""
|
||||
Context manager that uninstalls the reactor, if any, and then restores it
|
||||
afterwards.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
SetAsideModule.__init__(self, "twisted.internet.reactor")
|
||||
|
||||
|
||||
def __enter__(self):
|
||||
SetAsideModule.__enter__(self)
|
||||
if "twisted.internet.reactor" in self.modules:
|
||||
del twisted.internet.reactor
|
||||
|
||||
|
||||
def __exit__(self, excType, excValue, traceback):
|
||||
SetAsideModule.__exit__(self, excType, excValue, traceback)
|
||||
# Clean up 'reactor' attribute that may have been set on
|
||||
# twisted.internet:
|
||||
reactor = self.modules.get("twisted.internet.reactor", None)
|
||||
if reactor is not None:
|
||||
twisted.internet.reactor = reactor
|
||||
else:
|
||||
try:
|
||||
del twisted.internet.reactor
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
class AlternateReactor(NoReactor):
|
||||
"""
|
||||
A context manager which temporarily installs a different object as the
|
||||
global reactor.
|
||||
"""
|
||||
|
||||
def __init__(self, reactor):
|
||||
"""
|
||||
@param reactor: Any object to install as the global reactor.
|
||||
"""
|
||||
NoReactor.__init__(self)
|
||||
self.alternate = reactor
|
||||
|
||||
|
||||
def __enter__(self):
|
||||
NoReactor.__enter__(self)
|
||||
twisted.internet.reactor = self.alternate
|
||||
sys.modules['twisted.internet.reactor'] = self.alternate
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
import sys
|
||||
|
||||
# Override theSystemPath so it throws KeyError on gi.pygtkcompat:
|
||||
from twisted.python import modules
|
||||
modules.theSystemPath = modules.PythonPath([], moduleDict={})
|
||||
|
||||
# Now, when we import gireactor it shouldn't use pygtkcompat, and should
|
||||
# instead prevent gobject from being importable:
|
||||
from twisted.internet import gireactor
|
||||
for name in gireactor._PYGTK_MODULES:
|
||||
if sys.modules[name] is not None:
|
||||
sys.stdout.write("failure, sys.modules[%r] is %r, instead of None" %
|
||||
(name, sys.modules["gobject"]))
|
||||
sys.exit(0)
|
||||
|
||||
try:
|
||||
import gobject
|
||||
except ImportError:
|
||||
sys.stdout.write("success")
|
||||
else:
|
||||
sys.stdout.write("failure: %s was imported" % (gobject.__path__,))
|
||||
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
|
||||
# A program which exits after starting a child which inherits its
|
||||
# stdin/stdout/stderr and keeps them open until stdin is closed.
|
||||
|
||||
import sys, os
|
||||
|
||||
def grandchild():
|
||||
sys.stdout.write('grandchild started')
|
||||
sys.stdout.flush()
|
||||
sys.stdin.read()
|
||||
|
||||
def main():
|
||||
if sys.argv[1] == 'child':
|
||||
if sys.argv[2] == 'windows':
|
||||
import win32api as api, win32process as proc
|
||||
info = proc.STARTUPINFO()
|
||||
info.hStdInput = api.GetStdHandle(api.STD_INPUT_HANDLE)
|
||||
info.hStdOutput = api.GetStdHandle(api.STD_OUTPUT_HANDLE)
|
||||
info.hStdError = api.GetStdHandle(api.STD_ERROR_HANDLE)
|
||||
python = sys.executable
|
||||
scriptDir = os.path.dirname(__file__)
|
||||
scriptName = os.path.basename(__file__)
|
||||
proc.CreateProcess(
|
||||
None, " ".join((python, scriptName, "grandchild")), None,
|
||||
None, 1, 0, os.environ, scriptDir, info)
|
||||
else:
|
||||
if os.fork() == 0:
|
||||
grandchild()
|
||||
else:
|
||||
grandchild()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -0,0 +1,317 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Utilities for unit testing reactor implementations.
|
||||
|
||||
The main feature of this module is L{ReactorBuilder}, a base class for use when
|
||||
writing interface/blackbox tests for reactor implementations. Test case classes
|
||||
for reactor features should subclass L{ReactorBuilder} instead of
|
||||
L{SynchronousTestCase}. All of the features of L{SynchronousTestCase} will be
|
||||
available. Additionally, the tests will automatically be applied to all
|
||||
available reactor implementations.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
__all__ = ['TestTimeoutError', 'ReactorBuilder', 'needsRunningReactor']
|
||||
|
||||
import os, signal, time
|
||||
|
||||
from twisted.python.compat import _PY3
|
||||
from twisted.trial.unittest import SynchronousTestCase, SkipTest
|
||||
from twisted.trial.util import DEFAULT_TIMEOUT_DURATION, acquireAttribute
|
||||
from twisted.python.runtime import platform
|
||||
from twisted.python.reflect import namedAny
|
||||
from twisted.python.deprecate import _fullyQualifiedName as fullyQualifiedName
|
||||
|
||||
from twisted.python import log
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
|
||||
# Access private APIs.
|
||||
if platform.isWindows():
|
||||
process = None
|
||||
elif _PY3:
|
||||
# Enable this on Python 3 when twisted.internet.process is ported.
|
||||
# See #5968.
|
||||
process = None
|
||||
else:
|
||||
from twisted.internet import process
|
||||
|
||||
|
||||
|
||||
class TestTimeoutError(Exception):
|
||||
"""
|
||||
The reactor was still running after the timeout period elapsed in
|
||||
L{ReactorBuilder.runReactor}.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
def needsRunningReactor(reactor, thunk):
|
||||
"""
|
||||
Various functions within these tests need an already-running reactor at
|
||||
some point. They need to stop the reactor when the test has completed, and
|
||||
that means calling reactor.stop(). However, reactor.stop() raises an
|
||||
exception if the reactor isn't already running, so if the L{Deferred} that
|
||||
a particular API under test returns fires synchronously (as especially an
|
||||
endpoint's C{connect()} method may do, if the connect is to a local
|
||||
interface address) then the test won't be able to stop the reactor being
|
||||
tested and finish. So this calls C{thunk} only once C{reactor} is running.
|
||||
|
||||
(This is just an alias for
|
||||
L{twisted.internet.interfaces.IReactorCore.callWhenRunning} on the given
|
||||
reactor parameter, in order to centrally reference the above paragraph and
|
||||
repeating it everywhere as a comment.)
|
||||
|
||||
@param reactor: the L{twisted.internet.interfaces.IReactorCore} under test
|
||||
|
||||
@param thunk: a 0-argument callable, which eventually finishes the test in
|
||||
question, probably in a L{Deferred} callback.
|
||||
"""
|
||||
reactor.callWhenRunning(thunk)
|
||||
|
||||
|
||||
|
||||
class ReactorBuilder:
|
||||
"""
|
||||
L{SynchronousTestCase} mixin which provides a reactor-creation API. This
|
||||
mixin defines C{setUp} and C{tearDown}, so mix it in before
|
||||
L{SynchronousTestCase} or call its methods from the overridden ones in the
|
||||
subclass.
|
||||
|
||||
@cvar skippedReactors: A dict mapping FQPN strings of reactors for
|
||||
which the tests defined by this class will be skipped to strings
|
||||
giving the skip message.
|
||||
@cvar requiredInterfaces: A C{list} of interfaces which the reactor must
|
||||
provide or these tests will be skipped. The default, C{None}, means
|
||||
that no interfaces are required.
|
||||
@ivar reactorFactory: A no-argument callable which returns the reactor to
|
||||
use for testing.
|
||||
@ivar originalHandler: The SIGCHLD handler which was installed when setUp
|
||||
ran and which will be re-installed when tearDown runs.
|
||||
@ivar _reactors: A list of FQPN strings giving the reactors for which
|
||||
L{SynchronousTestCase}s will be created.
|
||||
"""
|
||||
|
||||
_reactors = [
|
||||
# Select works everywhere
|
||||
"twisted.internet.selectreactor.SelectReactor",
|
||||
]
|
||||
|
||||
if platform.isWindows():
|
||||
# PortableGtkReactor is only really interesting on Windows,
|
||||
# but not really Windows specific; if you want you can
|
||||
# temporarily move this up to the all-platforms list to test
|
||||
# it on other platforms. It's not there in general because
|
||||
# it's not _really_ worth it to support on other platforms,
|
||||
# since no one really wants to use it on other platforms.
|
||||
_reactors.extend([
|
||||
"twisted.internet.gtk2reactor.PortableGtkReactor",
|
||||
"twisted.internet.gireactor.PortableGIReactor",
|
||||
"twisted.internet.gtk3reactor.PortableGtk3Reactor",
|
||||
"twisted.internet.win32eventreactor.Win32Reactor",
|
||||
"twisted.internet.iocpreactor.reactor.IOCPReactor"])
|
||||
else:
|
||||
_reactors.extend([
|
||||
"twisted.internet.glib2reactor.Glib2Reactor",
|
||||
"twisted.internet.gtk2reactor.Gtk2Reactor",
|
||||
"twisted.internet.gireactor.GIReactor",
|
||||
"twisted.internet.gtk3reactor.Gtk3Reactor"])
|
||||
if platform.isMacOSX():
|
||||
_reactors.append("twisted.internet.cfreactor.CFReactor")
|
||||
else:
|
||||
_reactors.extend([
|
||||
"twisted.internet.pollreactor.PollReactor",
|
||||
"twisted.internet.epollreactor.EPollReactor"])
|
||||
if not platform.isLinux():
|
||||
# Presumably Linux is not going to start supporting kqueue, so
|
||||
# skip even trying this configuration.
|
||||
_reactors.extend([
|
||||
# Support KQueue on non-OS-X POSIX platforms for now.
|
||||
"twisted.internet.kqreactor.KQueueReactor",
|
||||
])
|
||||
|
||||
reactorFactory = None
|
||||
originalHandler = None
|
||||
requiredInterfaces = None
|
||||
skippedReactors = {}
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Clear the SIGCHLD handler, if there is one, to ensure an environment
|
||||
like the one which exists prior to a call to L{reactor.run}.
|
||||
"""
|
||||
if not platform.isWindows():
|
||||
self.originalHandler = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
"""
|
||||
Restore the original SIGCHLD handler and reap processes as long as
|
||||
there seem to be any remaining.
|
||||
"""
|
||||
if self.originalHandler is not None:
|
||||
signal.signal(signal.SIGCHLD, self.originalHandler)
|
||||
if process is not None:
|
||||
begin = time.time()
|
||||
while process.reapProcessHandlers:
|
||||
log.msg(
|
||||
"ReactorBuilder.tearDown reaping some processes %r" % (
|
||||
process.reapProcessHandlers,))
|
||||
process.reapAllProcesses()
|
||||
|
||||
# The process should exit on its own. However, if it
|
||||
# doesn't, we're stuck in this loop forever. To avoid
|
||||
# hanging the test suite, eventually give the process some
|
||||
# help exiting and move on.
|
||||
time.sleep(0.001)
|
||||
if time.time() - begin > 60:
|
||||
for pid in process.reapProcessHandlers:
|
||||
os.kill(pid, signal.SIGKILL)
|
||||
raise Exception(
|
||||
"Timeout waiting for child processes to exit: %r" % (
|
||||
process.reapProcessHandlers,))
|
||||
|
||||
|
||||
def unbuildReactor(self, reactor):
|
||||
"""
|
||||
Clean up any resources which may have been allocated for the given
|
||||
reactor by its creation or by a test which used it.
|
||||
"""
|
||||
# Chris says:
|
||||
#
|
||||
# XXX These explicit calls to clean up the waker (and any other
|
||||
# internal readers) should become obsolete when bug #3063 is
|
||||
# fixed. -radix, 2008-02-29. Fortunately it should probably cause an
|
||||
# error when bug #3063 is fixed, so it should be removed in the same
|
||||
# branch that fixes it.
|
||||
#
|
||||
# -exarkun
|
||||
reactor._uninstallHandler()
|
||||
if getattr(reactor, '_internalReaders', None) is not None:
|
||||
for reader in reactor._internalReaders:
|
||||
reactor.removeReader(reader)
|
||||
reader.connectionLost(None)
|
||||
reactor._internalReaders.clear()
|
||||
|
||||
# Here's an extra thing unrelated to wakers but necessary for
|
||||
# cleaning up after the reactors we make. -exarkun
|
||||
reactor.disconnectAll()
|
||||
|
||||
# It would also be bad if any timed calls left over were allowed to
|
||||
# run.
|
||||
calls = reactor.getDelayedCalls()
|
||||
for c in calls:
|
||||
c.cancel()
|
||||
|
||||
|
||||
def buildReactor(self):
|
||||
"""
|
||||
Create and return a reactor using C{self.reactorFactory}.
|
||||
"""
|
||||
try:
|
||||
from twisted.internet.cfreactor import CFReactor
|
||||
from twisted.internet import reactor as globalReactor
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
if (isinstance(globalReactor, CFReactor)
|
||||
and self.reactorFactory is CFReactor):
|
||||
raise SkipTest(
|
||||
"CFReactor uses APIs which manipulate global state, "
|
||||
"so it's not safe to run its own reactor-builder tests "
|
||||
"under itself")
|
||||
try:
|
||||
reactor = self.reactorFactory()
|
||||
except:
|
||||
# Unfortunately, not all errors which result in a reactor
|
||||
# being unusable are detectable without actually
|
||||
# instantiating the reactor. So we catch some more here
|
||||
# and skip the test if necessary. We also log it to aid
|
||||
# with debugging, but flush the logged error so the test
|
||||
# doesn't fail.
|
||||
log.err(None, "Failed to install reactor")
|
||||
self.flushLoggedErrors()
|
||||
raise SkipTest(Failure().getErrorMessage())
|
||||
else:
|
||||
if self.requiredInterfaces is not None:
|
||||
missing = [
|
||||
required for required in self.requiredInterfaces
|
||||
if not required.providedBy(reactor)]
|
||||
if missing:
|
||||
self.unbuildReactor(reactor)
|
||||
raise SkipTest("%s does not provide %s" % (
|
||||
fullyQualifiedName(reactor.__class__),
|
||||
",".join([fullyQualifiedName(x) for x in missing])))
|
||||
self.addCleanup(self.unbuildReactor, reactor)
|
||||
return reactor
|
||||
|
||||
|
||||
def getTimeout(self):
|
||||
"""
|
||||
Determine how long to run the test before considering it failed.
|
||||
|
||||
@return: A C{int} or C{float} giving a number of seconds.
|
||||
"""
|
||||
return acquireAttribute(self._parents, 'timeout', DEFAULT_TIMEOUT_DURATION)
|
||||
|
||||
|
||||
def runReactor(self, reactor, timeout=None):
|
||||
"""
|
||||
Run the reactor for at most the given amount of time.
|
||||
|
||||
@param reactor: The reactor to run.
|
||||
|
||||
@type timeout: C{int} or C{float}
|
||||
@param timeout: The maximum amount of time, specified in seconds, to
|
||||
allow the reactor to run. If the reactor is still running after
|
||||
this much time has elapsed, it will be stopped and an exception
|
||||
raised. If C{None}, the default test method timeout imposed by
|
||||
Trial will be used. This depends on the L{IReactorTime}
|
||||
implementation of C{reactor} for correct operation.
|
||||
|
||||
@raise TestTimeoutError: If the reactor is still running after
|
||||
C{timeout} seconds.
|
||||
"""
|
||||
if timeout is None:
|
||||
timeout = self.getTimeout()
|
||||
|
||||
timedOut = []
|
||||
def stop():
|
||||
timedOut.append(None)
|
||||
reactor.stop()
|
||||
|
||||
timedOutCall = reactor.callLater(timeout, stop)
|
||||
reactor.run()
|
||||
if timedOut:
|
||||
raise TestTimeoutError(
|
||||
"reactor still running after %s seconds" % (timeout,))
|
||||
else:
|
||||
timedOutCall.cancel()
|
||||
|
||||
|
||||
def makeTestCaseClasses(cls):
|
||||
"""
|
||||
Create a L{SynchronousTestCase} subclass which mixes in C{cls} for each
|
||||
known reactor and return a dict mapping their names to them.
|
||||
"""
|
||||
classes = {}
|
||||
for reactor in cls._reactors:
|
||||
shortReactorName = reactor.split(".")[-1]
|
||||
name = (cls.__name__ + "." + shortReactorName).replace(".", "_")
|
||||
class testcase(cls, SynchronousTestCase):
|
||||
__module__ = cls.__module__
|
||||
if reactor in cls.skippedReactors:
|
||||
skip = cls.skippedReactors[reactor]
|
||||
try:
|
||||
reactorFactory = namedAny(reactor)
|
||||
except:
|
||||
skip = Failure().getErrorMessage()
|
||||
testcase.__name__ = name
|
||||
classes[testcase.__name__] = testcase
|
||||
return classes
|
||||
makeTestCaseClasses = classmethod(makeTestCaseClasses)
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted.internet.abstract}, a collection of APIs for implementing
|
||||
reactors.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
from twisted.trial.unittest import SynchronousTestCase
|
||||
|
||||
from twisted.internet.abstract import isIPv6Address
|
||||
|
||||
class IPv6AddressTests(SynchronousTestCase):
|
||||
"""
|
||||
Tests for L{isIPv6Address}, a function for determining if a particular
|
||||
string is an IPv6 address literal.
|
||||
"""
|
||||
def test_empty(self):
|
||||
"""
|
||||
The empty string is not an IPv6 address literal.
|
||||
"""
|
||||
self.assertFalse(isIPv6Address(""))
|
||||
|
||||
|
||||
def test_colon(self):
|
||||
"""
|
||||
A single C{":"} is not an IPv6 address literal.
|
||||
"""
|
||||
self.assertFalse(isIPv6Address(":"))
|
||||
|
||||
|
||||
def test_loopback(self):
|
||||
"""
|
||||
C{"::1"} is the IPv6 loopback address literal.
|
||||
"""
|
||||
self.assertTrue(isIPv6Address("::1"))
|
||||
|
||||
|
||||
def test_scopeID(self):
|
||||
"""
|
||||
An otherwise valid IPv6 address literal may also include a C{"%"}
|
||||
followed by an arbitrary scope identifier.
|
||||
"""
|
||||
self.assertTrue(isIPv6Address("fe80::1%eth0"))
|
||||
self.assertTrue(isIPv6Address("fe80::2%1"))
|
||||
self.assertTrue(isIPv6Address("fe80::3%en2"))
|
||||
|
||||
|
||||
def test_invalidWithScopeID(self):
|
||||
"""
|
||||
An otherwise invalid IPv6 address literal is still invalid with a
|
||||
trailing scope identifier.
|
||||
"""
|
||||
self.assertFalse(isIPv6Address("%eth0"))
|
||||
self.assertFalse(isIPv6Address(":%eth0"))
|
||||
self.assertFalse(isIPv6Address("hello%eth0"))
|
||||
|
|
@ -0,0 +1,344 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import re
|
||||
import os
|
||||
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet.address import (
|
||||
IPv4Address, UNIXAddress, IPv6Address, HostnameAddress)
|
||||
|
||||
try:
|
||||
os.symlink
|
||||
except AttributeError:
|
||||
symlinkSkip = "Platform does not support symlinks"
|
||||
else:
|
||||
symlinkSkip = None
|
||||
|
||||
|
||||
class AddressTestCaseMixin(object):
|
||||
def test_addressComparison(self):
|
||||
"""
|
||||
Two different address instances, sharing the same properties are
|
||||
considered equal by C{==} and not considered not equal by C{!=}.
|
||||
|
||||
Note: When applied via UNIXAddress class, this uses the same
|
||||
filename for both objects being compared.
|
||||
"""
|
||||
self.assertTrue(self.buildAddress() == self.buildAddress())
|
||||
self.assertFalse(self.buildAddress() != self.buildAddress())
|
||||
|
||||
|
||||
def _stringRepresentation(self, stringFunction):
|
||||
"""
|
||||
Verify that the string representation of an address object conforms to a
|
||||
simple pattern (the usual one for Python object reprs) and contains
|
||||
values which accurately reflect the attributes of the address.
|
||||
"""
|
||||
addr = self.buildAddress()
|
||||
pattern = "".join([
|
||||
"^",
|
||||
"([^\(]+Address)", # class name,
|
||||
"\(", # opening bracket,
|
||||
"([^)]+)", # arguments,
|
||||
"\)", # closing bracket,
|
||||
"$"
|
||||
])
|
||||
stringValue = stringFunction(addr)
|
||||
m = re.match(pattern, stringValue)
|
||||
self.assertNotEqual(
|
||||
None, m,
|
||||
"%s does not match the standard __str__ pattern "
|
||||
"ClassName(arg1, arg2, etc)" % (stringValue,))
|
||||
self.assertEqual(addr.__class__.__name__, m.group(1))
|
||||
|
||||
args = [x.strip() for x in m.group(2).split(",")]
|
||||
self.assertEqual(
|
||||
args,
|
||||
[argSpec[1] % (getattr(addr, argSpec[0]),)
|
||||
for argSpec in self.addressArgSpec])
|
||||
|
||||
|
||||
def test_str(self):
|
||||
"""
|
||||
C{str} can be used to get a string representation of an address instance
|
||||
containing information about that address.
|
||||
"""
|
||||
self._stringRepresentation(str)
|
||||
|
||||
|
||||
def test_repr(self):
|
||||
"""
|
||||
C{repr} can be used to get a string representation of an address
|
||||
instance containing information about that address.
|
||||
"""
|
||||
self._stringRepresentation(repr)
|
||||
|
||||
|
||||
def test_hash(self):
|
||||
"""
|
||||
C{__hash__} can be used to get a hash of an address, allowing
|
||||
addresses to be used as keys in dictionaries, for instance.
|
||||
"""
|
||||
addr = self.buildAddress()
|
||||
d = {addr: True}
|
||||
self.assertTrue(d[self.buildAddress()])
|
||||
|
||||
|
||||
def test_differentNamesComparison(self):
|
||||
"""
|
||||
Check that comparison operators work correctly on address objects
|
||||
when a different name is passed in
|
||||
"""
|
||||
self.assertFalse(self.buildAddress() == self.buildDifferentAddress())
|
||||
self.assertFalse(self.buildDifferentAddress() == self.buildAddress())
|
||||
|
||||
self.assertTrue(self.buildAddress() != self.buildDifferentAddress())
|
||||
self.assertTrue(self.buildDifferentAddress() != self.buildAddress())
|
||||
|
||||
|
||||
def assertDeprecations(self, testMethod, message):
|
||||
"""
|
||||
Assert that the a DeprecationWarning with the given message was
|
||||
emitted against the given method.
|
||||
"""
|
||||
warnings = self.flushWarnings([testMethod])
|
||||
self.assertEqual(warnings[0]['category'], DeprecationWarning)
|
||||
self.assertEqual(warnings[0]['message'], message)
|
||||
self.assertEqual(len(warnings), 1)
|
||||
|
||||
|
||||
|
||||
class IPv4AddressTestCaseMixin(AddressTestCaseMixin):
|
||||
addressArgSpec = (("type", "%s"), ("host", "%r"), ("port", "%d"))
|
||||
|
||||
|
||||
|
||||
class HostnameAddressTests(unittest.TestCase, AddressTestCaseMixin):
|
||||
"""
|
||||
Test case for L{HostnameAddress}.
|
||||
"""
|
||||
addressArgSpec = (("hostname", "%s"), ("port", "%d"))
|
||||
|
||||
def buildAddress(self):
|
||||
"""
|
||||
Create an arbitrary new L{HostnameAddress} instance.
|
||||
|
||||
@return: A L{HostnameAddress} instance.
|
||||
"""
|
||||
return HostnameAddress(b"example.com", 0)
|
||||
|
||||
|
||||
def buildDifferentAddress(self):
|
||||
"""
|
||||
Like L{buildAddress}, but with a different hostname.
|
||||
|
||||
@return: A L{HostnameAddress} instance.
|
||||
"""
|
||||
return HostnameAddress(b"example.net", 0)
|
||||
|
||||
|
||||
|
||||
class IPv4AddressTCPTestCase(unittest.SynchronousTestCase,
|
||||
IPv4AddressTestCaseMixin):
|
||||
def buildAddress(self):
|
||||
"""
|
||||
Create an arbitrary new L{IPv4Address} instance with a C{"TCP"}
|
||||
type. A new instance is created for each call, but always for the
|
||||
same address.
|
||||
"""
|
||||
return IPv4Address("TCP", "127.0.0.1", 0)
|
||||
|
||||
|
||||
def buildDifferentAddress(self):
|
||||
"""
|
||||
Like L{buildAddress}, but with a different fixed address.
|
||||
"""
|
||||
return IPv4Address("TCP", "127.0.0.2", 0)
|
||||
|
||||
|
||||
def test_bwHackDeprecation(self):
|
||||
"""
|
||||
If a value is passed for the C{_bwHack} parameter to L{IPv4Address},
|
||||
a deprecation warning is emitted.
|
||||
"""
|
||||
# Construct this for warning side-effects, disregard the actual object.
|
||||
IPv4Address("TCP", "127.0.0.3", 0, _bwHack="TCP")
|
||||
|
||||
message = (
|
||||
"twisted.internet.address.IPv4Address._bwHack is deprecated "
|
||||
"since Twisted 11.0")
|
||||
return self.assertDeprecations(self.test_bwHackDeprecation, message)
|
||||
|
||||
|
||||
|
||||
class IPv4AddressUDPTestCase(unittest.SynchronousTestCase,
|
||||
IPv4AddressTestCaseMixin):
|
||||
def buildAddress(self):
|
||||
"""
|
||||
Create an arbitrary new L{IPv4Address} instance with a C{"UDP"}
|
||||
type. A new instance is created for each call, but always for the
|
||||
same address.
|
||||
"""
|
||||
return IPv4Address("UDP", "127.0.0.1", 0)
|
||||
|
||||
|
||||
def buildDifferentAddress(self):
|
||||
"""
|
||||
Like L{buildAddress}, but with a different fixed address.
|
||||
"""
|
||||
return IPv4Address("UDP", "127.0.0.2", 0)
|
||||
|
||||
|
||||
def test_bwHackDeprecation(self):
|
||||
"""
|
||||
If a value is passed for the C{_bwHack} parameter to L{IPv4Address},
|
||||
a deprecation warning is emitted.
|
||||
"""
|
||||
# Construct this for warning side-effects, disregard the actual object.
|
||||
IPv4Address("UDP", "127.0.0.3", 0, _bwHack="UDP")
|
||||
|
||||
message = (
|
||||
"twisted.internet.address.IPv4Address._bwHack is deprecated "
|
||||
"since Twisted 11.0")
|
||||
return self.assertDeprecations(self.test_bwHackDeprecation, message)
|
||||
|
||||
|
||||
|
||||
class IPv6AddressTestCase(unittest.SynchronousTestCase, AddressTestCaseMixin):
|
||||
addressArgSpec = (("type", "%s"), ("host", "%r"), ("port", "%d"))
|
||||
|
||||
def buildAddress(self):
|
||||
"""
|
||||
Create an arbitrary new L{IPv6Address} instance with a C{"TCP"}
|
||||
type. A new instance is created for each call, but always for the
|
||||
same address.
|
||||
"""
|
||||
return IPv6Address("TCP", "::1", 0)
|
||||
|
||||
|
||||
def buildDifferentAddress(self):
|
||||
"""
|
||||
Like L{buildAddress}, but with a different fixed address.
|
||||
"""
|
||||
return IPv6Address("TCP", "::2", 0)
|
||||
|
||||
|
||||
|
||||
class UNIXAddressTestCase(unittest.SynchronousTestCase, AddressTestCaseMixin):
|
||||
addressArgSpec = (("name", "%r"),)
|
||||
|
||||
def setUp(self):
|
||||
self._socketAddress = self.mktemp()
|
||||
self._otherAddress = self.mktemp()
|
||||
|
||||
|
||||
def buildAddress(self):
|
||||
"""
|
||||
Create an arbitrary new L{UNIXAddress} instance. A new instance is
|
||||
created for each call, but always for the same address.
|
||||
"""
|
||||
return UNIXAddress(self._socketAddress)
|
||||
|
||||
|
||||
def buildDifferentAddress(self):
|
||||
"""
|
||||
Like L{buildAddress}, but with a different fixed address.
|
||||
"""
|
||||
return UNIXAddress(self._otherAddress)
|
||||
|
||||
|
||||
def test_comparisonOfLinkedFiles(self):
|
||||
"""
|
||||
UNIXAddress objects compare as equal if they link to the same file.
|
||||
"""
|
||||
linkName = self.mktemp()
|
||||
self.fd = open(self._socketAddress, 'w')
|
||||
os.symlink(os.path.abspath(self._socketAddress), linkName)
|
||||
self.assertTrue(
|
||||
UNIXAddress(self._socketAddress) == UNIXAddress(linkName))
|
||||
self.assertTrue(
|
||||
UNIXAddress(linkName) == UNIXAddress(self._socketAddress))
|
||||
test_comparisonOfLinkedFiles.skip = symlinkSkip
|
||||
|
||||
|
||||
def test_hashOfLinkedFiles(self):
|
||||
"""
|
||||
UNIXAddress Objects that compare as equal have the same hash value.
|
||||
"""
|
||||
linkName = self.mktemp()
|
||||
self.fd = open(self._socketAddress, 'w')
|
||||
os.symlink(os.path.abspath(self._socketAddress), linkName)
|
||||
self.assertEqual(
|
||||
hash(UNIXAddress(self._socketAddress)), hash(UNIXAddress(linkName)))
|
||||
test_hashOfLinkedFiles.skip = symlinkSkip
|
||||
|
||||
|
||||
def test_bwHackDeprecation(self):
|
||||
"""
|
||||
If a value is passed for the C{_bwHack} parameter to L{UNIXAddress},
|
||||
a deprecation warning is emitted.
|
||||
"""
|
||||
# Construct this for warning side-effects, disregard the actual object.
|
||||
UNIXAddress(self.mktemp(), _bwHack='UNIX')
|
||||
|
||||
message = (
|
||||
"twisted.internet.address.UNIXAddress._bwHack is deprecated "
|
||||
"since Twisted 11.0")
|
||||
return self.assertDeprecations(self.test_bwHackDeprecation, message)
|
||||
|
||||
|
||||
|
||||
class EmptyUNIXAddressTestCase(unittest.SynchronousTestCase,
|
||||
AddressTestCaseMixin):
|
||||
"""
|
||||
Tests for L{UNIXAddress} operations involving a C{None} address.
|
||||
"""
|
||||
addressArgSpec = (("name", "%r"),)
|
||||
|
||||
def setUp(self):
|
||||
self._socketAddress = self.mktemp()
|
||||
|
||||
|
||||
def buildAddress(self):
|
||||
"""
|
||||
Create an arbitrary new L{UNIXAddress} instance. A new instance is
|
||||
created for each call, but always for the same address.
|
||||
"""
|
||||
return UNIXAddress(self._socketAddress)
|
||||
|
||||
|
||||
def buildDifferentAddress(self):
|
||||
"""
|
||||
Like L{buildAddress}, but with a fixed address of C{None}.
|
||||
"""
|
||||
return UNIXAddress(None)
|
||||
|
||||
|
||||
def test_comparisonOfLinkedFiles(self):
|
||||
"""
|
||||
A UNIXAddress referring to a C{None} address does not compare equal to a
|
||||
UNIXAddress referring to a symlink.
|
||||
"""
|
||||
linkName = self.mktemp()
|
||||
self.fd = open(self._socketAddress, 'w')
|
||||
os.symlink(os.path.abspath(self._socketAddress), linkName)
|
||||
self.assertTrue(
|
||||
UNIXAddress(self._socketAddress) != UNIXAddress(None))
|
||||
self.assertTrue(
|
||||
UNIXAddress(None) != UNIXAddress(self._socketAddress))
|
||||
test_comparisonOfLinkedFiles.skip = symlinkSkip
|
||||
|
||||
|
||||
def test_emptyHash(self):
|
||||
"""
|
||||
C{__hash__} can be used to get a hash of an address, even one referring
|
||||
to C{None} rather than a real path.
|
||||
"""
|
||||
addr = self.buildDifferentAddress()
|
||||
d = {addr: True}
|
||||
self.assertTrue(d[self.buildDifferentAddress()])
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,272 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted.internet.base}.
|
||||
"""
|
||||
|
||||
import socket
|
||||
try:
|
||||
from Queue import Queue
|
||||
except ImportError:
|
||||
from queue import Queue
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
from twisted.internet.interfaces import IReactorTime, IReactorThreads
|
||||
from twisted.internet.error import DNSLookupError
|
||||
from twisted.internet.base import ThreadedResolver, DelayedCall
|
||||
from twisted.internet.task import Clock
|
||||
from twisted.trial.unittest import TestCase
|
||||
|
||||
|
||||
@implementer(IReactorTime, IReactorThreads)
|
||||
class FakeReactor(object):
|
||||
"""
|
||||
A fake reactor implementation which just supports enough reactor APIs for
|
||||
L{ThreadedResolver}.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._clock = Clock()
|
||||
self.callLater = self._clock.callLater
|
||||
|
||||
self._threadpool = ThreadPool()
|
||||
self._threadpool.start()
|
||||
self.getThreadPool = lambda: self._threadpool
|
||||
|
||||
self._threadCalls = Queue()
|
||||
|
||||
|
||||
def callFromThread(self, f, *args, **kwargs):
|
||||
self._threadCalls.put((f, args, kwargs))
|
||||
|
||||
|
||||
def _runThreadCalls(self):
|
||||
f, args, kwargs = self._threadCalls.get()
|
||||
f(*args, **kwargs)
|
||||
|
||||
|
||||
def _stop(self):
|
||||
self._threadpool.stop()
|
||||
|
||||
|
||||
|
||||
class ThreadedResolverTests(TestCase):
|
||||
"""
|
||||
Tests for L{ThreadedResolver}.
|
||||
"""
|
||||
def test_success(self):
|
||||
"""
|
||||
L{ThreadedResolver.getHostByName} returns a L{Deferred} which fires
|
||||
with the value returned by the call to L{socket.gethostbyname} in the
|
||||
threadpool of the reactor passed to L{ThreadedResolver.__init__}.
|
||||
"""
|
||||
ip = "10.0.0.17"
|
||||
name = "foo.bar.example.com"
|
||||
timeout = 30
|
||||
|
||||
reactor = FakeReactor()
|
||||
self.addCleanup(reactor._stop)
|
||||
|
||||
lookedUp = []
|
||||
resolvedTo = []
|
||||
def fakeGetHostByName(name):
|
||||
lookedUp.append(name)
|
||||
return ip
|
||||
|
||||
self.patch(socket, 'gethostbyname', fakeGetHostByName)
|
||||
|
||||
resolver = ThreadedResolver(reactor)
|
||||
d = resolver.getHostByName(name, (timeout,))
|
||||
d.addCallback(resolvedTo.append)
|
||||
|
||||
reactor._runThreadCalls()
|
||||
|
||||
self.assertEqual(lookedUp, [name])
|
||||
self.assertEqual(resolvedTo, [ip])
|
||||
|
||||
# Make sure that any timeout-related stuff gets cleaned up.
|
||||
reactor._clock.advance(timeout + 1)
|
||||
self.assertEqual(reactor._clock.calls, [])
|
||||
|
||||
|
||||
def test_failure(self):
|
||||
"""
|
||||
L{ThreadedResolver.getHostByName} returns a L{Deferred} which fires a
|
||||
L{Failure} if the call to L{socket.gethostbyname} raises an exception.
|
||||
"""
|
||||
timeout = 30
|
||||
|
||||
reactor = FakeReactor()
|
||||
self.addCleanup(reactor._stop)
|
||||
|
||||
def fakeGetHostByName(name):
|
||||
raise IOError("ENOBUFS (this is a funny joke)")
|
||||
|
||||
self.patch(socket, 'gethostbyname', fakeGetHostByName)
|
||||
|
||||
failedWith = []
|
||||
resolver = ThreadedResolver(reactor)
|
||||
d = resolver.getHostByName("some.name", (timeout,))
|
||||
self.assertFailure(d, DNSLookupError)
|
||||
d.addCallback(failedWith.append)
|
||||
|
||||
reactor._runThreadCalls()
|
||||
|
||||
self.assertEqual(len(failedWith), 1)
|
||||
|
||||
# Make sure that any timeout-related stuff gets cleaned up.
|
||||
reactor._clock.advance(timeout + 1)
|
||||
self.assertEqual(reactor._clock.calls, [])
|
||||
|
||||
|
||||
def test_timeout(self):
|
||||
"""
|
||||
If L{socket.gethostbyname} does not complete before the specified
|
||||
timeout elapsed, the L{Deferred} returned by
|
||||
L{ThreadedResolver.getHostByBame} fails with L{DNSLookupError}.
|
||||
"""
|
||||
timeout = 10
|
||||
|
||||
reactor = FakeReactor()
|
||||
self.addCleanup(reactor._stop)
|
||||
|
||||
result = Queue()
|
||||
def fakeGetHostByName(name):
|
||||
raise result.get()
|
||||
|
||||
self.patch(socket, 'gethostbyname', fakeGetHostByName)
|
||||
|
||||
failedWith = []
|
||||
resolver = ThreadedResolver(reactor)
|
||||
d = resolver.getHostByName("some.name", (timeout,))
|
||||
self.assertFailure(d, DNSLookupError)
|
||||
d.addCallback(failedWith.append)
|
||||
|
||||
reactor._clock.advance(timeout - 1)
|
||||
self.assertEqual(failedWith, [])
|
||||
reactor._clock.advance(1)
|
||||
self.assertEqual(len(failedWith), 1)
|
||||
|
||||
# Eventually the socket.gethostbyname does finish - in this case, with
|
||||
# an exception. Nobody cares, though.
|
||||
result.put(IOError("The I/O was errorful"))
|
||||
|
||||
|
||||
|
||||
def nothing():
|
||||
"""
|
||||
Function used by L{DelayedCallTests.test_str}.
|
||||
"""
|
||||
|
||||
|
||||
class DelayedCallTests(TestCase):
|
||||
"""
|
||||
Tests for L{DelayedCall}.
|
||||
"""
|
||||
def _getDelayedCallAt(self, time):
|
||||
"""
|
||||
Get a L{DelayedCall} instance at a given C{time}.
|
||||
|
||||
@param time: The absolute time at which the returned L{DelayedCall}
|
||||
will be scheduled.
|
||||
"""
|
||||
def noop(call):
|
||||
pass
|
||||
return DelayedCall(time, lambda: None, (), {}, noop, noop, None)
|
||||
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Create two L{DelayedCall} instanced scheduled to run at different
|
||||
times.
|
||||
"""
|
||||
self.zero = self._getDelayedCallAt(0)
|
||||
self.one = self._getDelayedCallAt(1)
|
||||
|
||||
|
||||
def test_str(self):
|
||||
"""
|
||||
The string representation of a L{DelayedCall} instance, as returned by
|
||||
C{str}, includes the unsigned id of the instance, as well as its state,
|
||||
the function to be called, and the function arguments.
|
||||
"""
|
||||
dc = DelayedCall(12, nothing, (3, ), {"A": 5}, None, None, lambda: 1.5)
|
||||
self.assertEqual(
|
||||
str(dc),
|
||||
"<DelayedCall 0x%x [10.5s] called=0 cancelled=0 nothing(3, A=5)>"
|
||||
% (id(dc),))
|
||||
|
||||
|
||||
def test_lt(self):
|
||||
"""
|
||||
For two instances of L{DelayedCall} C{a} and C{b}, C{a < b} is true
|
||||
if and only if C{a} is scheduled to run before C{b}.
|
||||
"""
|
||||
zero, one = self.zero, self.one
|
||||
self.assertTrue(zero < one)
|
||||
self.assertFalse(one < zero)
|
||||
self.assertFalse(zero < zero)
|
||||
self.assertFalse(one < one)
|
||||
|
||||
|
||||
def test_le(self):
|
||||
"""
|
||||
For two instances of L{DelayedCall} C{a} and C{b}, C{a <= b} is true
|
||||
if and only if C{a} is scheduled to run before C{b} or at the same
|
||||
time as C{b}.
|
||||
"""
|
||||
zero, one = self.zero, self.one
|
||||
self.assertTrue(zero <= one)
|
||||
self.assertFalse(one <= zero)
|
||||
self.assertTrue(zero <= zero)
|
||||
self.assertTrue(one <= one)
|
||||
|
||||
|
||||
def test_gt(self):
|
||||
"""
|
||||
For two instances of L{DelayedCall} C{a} and C{b}, C{a > b} is true
|
||||
if and only if C{a} is scheduled to run after C{b}.
|
||||
"""
|
||||
zero, one = self.zero, self.one
|
||||
self.assertTrue(one > zero)
|
||||
self.assertFalse(zero > one)
|
||||
self.assertFalse(zero > zero)
|
||||
self.assertFalse(one > one)
|
||||
|
||||
|
||||
def test_ge(self):
|
||||
"""
|
||||
For two instances of L{DelayedCall} C{a} and C{b}, C{a > b} is true
|
||||
if and only if C{a} is scheduled to run after C{b} or at the same
|
||||
time as C{b}.
|
||||
"""
|
||||
zero, one = self.zero, self.one
|
||||
self.assertTrue(one >= zero)
|
||||
self.assertFalse(zero >= one)
|
||||
self.assertTrue(zero >= zero)
|
||||
self.assertTrue(one >= one)
|
||||
|
||||
|
||||
def test_eq(self):
|
||||
"""
|
||||
A L{DelayedCall} instance is only equal to itself.
|
||||
"""
|
||||
# Explicitly use == here, instead of assertEqual, to be more
|
||||
# confident __eq__ is being tested.
|
||||
self.assertFalse(self.zero == self.one)
|
||||
self.assertTrue(self.zero == self.zero)
|
||||
self.assertTrue(self.one == self.one)
|
||||
|
||||
|
||||
def test_ne(self):
|
||||
"""
|
||||
A L{DelayedCall} instance is not equal to any other object.
|
||||
"""
|
||||
# Explicitly use != here, instead of assertEqual, to be more
|
||||
# confident __ne__ is being tested.
|
||||
self.assertTrue(self.zero != self.one)
|
||||
self.assertFalse(self.zero != self.zero)
|
||||
self.assertFalse(self.one != self.one)
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted.internet._baseprocess} which implements process-related
|
||||
functionality that is useful in all platforms supporting L{IReactorProcess}.
|
||||
"""
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
from twisted.python.deprecate import getWarningMethod, setWarningMethod
|
||||
from twisted.trial.unittest import TestCase
|
||||
from twisted.internet._baseprocess import BaseProcess
|
||||
|
||||
|
||||
class BaseProcessTests(TestCase):
|
||||
"""
|
||||
Tests for L{BaseProcess}, a parent class for other classes which represent
|
||||
processes which implements functionality common to many different process
|
||||
implementations.
|
||||
"""
|
||||
def test_callProcessExited(self):
|
||||
"""
|
||||
L{BaseProcess._callProcessExited} calls the C{processExited} method of
|
||||
its C{proto} attribute and passes it a L{Failure} wrapping the given
|
||||
exception.
|
||||
"""
|
||||
class FakeProto:
|
||||
reason = None
|
||||
|
||||
def processExited(self, reason):
|
||||
self.reason = reason
|
||||
|
||||
reason = RuntimeError("fake reason")
|
||||
process = BaseProcess(FakeProto())
|
||||
process._callProcessExited(reason)
|
||||
process.proto.reason.trap(RuntimeError)
|
||||
self.assertIs(reason, process.proto.reason.value)
|
||||
|
||||
|
||||
def test_callProcessExitedMissing(self):
|
||||
"""
|
||||
L{BaseProcess._callProcessExited} emits a L{DeprecationWarning} if the
|
||||
object referred to by its C{proto} attribute has no C{processExited}
|
||||
method.
|
||||
"""
|
||||
class FakeProto:
|
||||
pass
|
||||
|
||||
reason = object()
|
||||
process = BaseProcess(FakeProto())
|
||||
|
||||
self.addCleanup(setWarningMethod, getWarningMethod())
|
||||
warnings = []
|
||||
def collect(message, category, stacklevel):
|
||||
warnings.append((message, category, stacklevel))
|
||||
setWarningMethod(collect)
|
||||
|
||||
process._callProcessExited(reason)
|
||||
|
||||
[(message, category, stacklevel)] = warnings
|
||||
self.assertEqual(
|
||||
message,
|
||||
"Since Twisted 8.2, IProcessProtocol.processExited is required. "
|
||||
"%s.%s must implement it." % (
|
||||
FakeProto.__module__, FakeProto.__name__))
|
||||
self.assertIs(category, DeprecationWarning)
|
||||
# The stacklevel doesn't really make sense for this kind of
|
||||
# deprecation. Requiring it to be 0 will at least avoid pointing to
|
||||
# any part of Twisted or a random part of the application's code, which
|
||||
# I think would be more misleading than having it point inside the
|
||||
# warning system itself. -exarkun
|
||||
self.assertEqual(stacklevel, 0)
|
||||
|
|
@ -0,0 +1,333 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for implementations of L{IReactorCore}.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
import signal
|
||||
import time
|
||||
import inspect
|
||||
|
||||
from twisted.internet.abstract import FileDescriptor
|
||||
from twisted.internet.error import ReactorAlreadyRunning, ReactorNotRestartable
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.test.reactormixins import ReactorBuilder
|
||||
|
||||
|
||||
class ObjectModelIntegrationMixin(object):
|
||||
"""
|
||||
Helpers for tests about the object model of reactor-related objects.
|
||||
"""
|
||||
def assertFullyNewStyle(self, instance):
|
||||
"""
|
||||
Assert that the given object is an instance of a new-style class and
|
||||
that there are no classic classes in the inheritance hierarchy of
|
||||
that class.
|
||||
|
||||
This is a beneficial condition because PyPy is better able to
|
||||
optimize attribute lookup on such classes.
|
||||
"""
|
||||
self.assertIsInstance(instance, object)
|
||||
mro = inspect.getmro(type(instance))
|
||||
for subclass in mro:
|
||||
self.assertTrue(
|
||||
issubclass(subclass, object),
|
||||
"%r is not new-style" % (subclass,))
|
||||
|
||||
|
||||
|
||||
class ObjectModelIntegrationTest(ReactorBuilder, ObjectModelIntegrationMixin):
|
||||
"""
|
||||
Test details of object model integration against all reactors.
|
||||
"""
|
||||
|
||||
def test_newstyleReactor(self):
|
||||
"""
|
||||
Checks that all reactors on a platform have method resolution order
|
||||
containing only new style classes.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
self.assertFullyNewStyle(reactor)
|
||||
|
||||
|
||||
|
||||
class SystemEventTestsBuilder(ReactorBuilder):
|
||||
"""
|
||||
Builder defining tests relating to L{IReactorCore.addSystemEventTrigger}
|
||||
and L{IReactorCore.fireSystemEvent}.
|
||||
"""
|
||||
def test_stopWhenNotStarted(self):
|
||||
"""
|
||||
C{reactor.stop()} raises L{RuntimeError} when called when the reactor
|
||||
has not been started.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
self.assertRaises(RuntimeError, reactor.stop)
|
||||
|
||||
|
||||
def test_stopWhenAlreadyStopped(self):
|
||||
"""
|
||||
C{reactor.stop()} raises L{RuntimeError} when called after the reactor
|
||||
has been stopped.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
reactor.callWhenRunning(reactor.stop)
|
||||
self.runReactor(reactor)
|
||||
self.assertRaises(RuntimeError, reactor.stop)
|
||||
|
||||
|
||||
def test_callWhenRunningOrder(self):
|
||||
"""
|
||||
Functions are run in the order that they were passed to
|
||||
L{reactor.callWhenRunning}.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
events = []
|
||||
reactor.callWhenRunning(events.append, "first")
|
||||
reactor.callWhenRunning(events.append, "second")
|
||||
reactor.callWhenRunning(reactor.stop)
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(events, ["first", "second"])
|
||||
|
||||
|
||||
def test_runningForStartupEvents(self):
|
||||
"""
|
||||
The reactor is not running when C{"before"} C{"startup"} triggers are
|
||||
called and is running when C{"during"} and C{"after"} C{"startup"}
|
||||
triggers are called.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
state = {}
|
||||
def beforeStartup():
|
||||
state['before'] = reactor.running
|
||||
def duringStartup():
|
||||
state['during'] = reactor.running
|
||||
def afterStartup():
|
||||
state['after'] = reactor.running
|
||||
reactor.addSystemEventTrigger("before", "startup", beforeStartup)
|
||||
reactor.addSystemEventTrigger("during", "startup", duringStartup)
|
||||
reactor.addSystemEventTrigger("after", "startup", afterStartup)
|
||||
reactor.callWhenRunning(reactor.stop)
|
||||
self.assertEqual(state, {})
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(
|
||||
state,
|
||||
{"before": False,
|
||||
"during": True,
|
||||
"after": True})
|
||||
|
||||
|
||||
def test_signalHandlersInstalledDuringStartup(self):
|
||||
"""
|
||||
Signal handlers are installed in responsed to the C{"during"}
|
||||
C{"startup"}.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
phase = [None]
|
||||
def beforeStartup():
|
||||
phase[0] = "before"
|
||||
def afterStartup():
|
||||
phase[0] = "after"
|
||||
reactor.addSystemEventTrigger("before", "startup", beforeStartup)
|
||||
reactor.addSystemEventTrigger("after", "startup", afterStartup)
|
||||
|
||||
sawPhase = []
|
||||
def fakeSignal(signum, action):
|
||||
sawPhase.append(phase[0])
|
||||
self.patch(signal, 'signal', fakeSignal)
|
||||
reactor.callWhenRunning(reactor.stop)
|
||||
self.assertEqual(phase[0], None)
|
||||
self.assertEqual(sawPhase, [])
|
||||
self.runReactor(reactor)
|
||||
self.assertIn("before", sawPhase)
|
||||
self.assertEqual(phase[0], "after")
|
||||
|
||||
|
||||
def test_stopShutDownEvents(self):
|
||||
"""
|
||||
C{reactor.stop()} fires all three phases of shutdown event triggers
|
||||
before it makes C{reactor.run()} return.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
events = []
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "shutdown",
|
||||
lambda: events.append(("before", "shutdown")))
|
||||
reactor.addSystemEventTrigger(
|
||||
"during", "shutdown",
|
||||
lambda: events.append(("during", "shutdown")))
|
||||
reactor.addSystemEventTrigger(
|
||||
"after", "shutdown",
|
||||
lambda: events.append(("after", "shutdown")))
|
||||
reactor.callWhenRunning(reactor.stop)
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(events, [("before", "shutdown"),
|
||||
("during", "shutdown"),
|
||||
("after", "shutdown")])
|
||||
|
||||
|
||||
def test_shutdownFiresTriggersAsynchronously(self):
|
||||
"""
|
||||
C{"before"} C{"shutdown"} triggers are not run synchronously from
|
||||
L{reactor.stop}.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
events = []
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "shutdown", events.append, "before shutdown")
|
||||
def stopIt():
|
||||
reactor.stop()
|
||||
events.append("stopped")
|
||||
reactor.callWhenRunning(stopIt)
|
||||
self.assertEqual(events, [])
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(events, ["stopped", "before shutdown"])
|
||||
|
||||
|
||||
def test_shutdownDisconnectsCleanly(self):
|
||||
"""
|
||||
A L{IFileDescriptor.connectionLost} implementation which raises an
|
||||
exception does not prevent the remaining L{IFileDescriptor}s from
|
||||
having their C{connectionLost} method called.
|
||||
"""
|
||||
lostOK = [False]
|
||||
|
||||
# Subclass FileDescriptor to get logPrefix
|
||||
class ProblematicFileDescriptor(FileDescriptor):
|
||||
def connectionLost(self, reason):
|
||||
raise RuntimeError("simulated connectionLost error")
|
||||
|
||||
class OKFileDescriptor(FileDescriptor):
|
||||
def connectionLost(self, reason):
|
||||
lostOK[0] = True
|
||||
|
||||
reactor = self.buildReactor()
|
||||
|
||||
# Unfortunately, it is necessary to patch removeAll to directly control
|
||||
# the order of the returned values. The test is only valid if
|
||||
# ProblematicFileDescriptor comes first. Also, return these
|
||||
# descriptors only the first time removeAll is called so that if it is
|
||||
# called again the file descriptors aren't re-disconnected.
|
||||
fds = iter([ProblematicFileDescriptor(), OKFileDescriptor()])
|
||||
reactor.removeAll = lambda: fds
|
||||
reactor.callWhenRunning(reactor.stop)
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1)
|
||||
self.assertTrue(lostOK[0])
|
||||
|
||||
|
||||
def test_multipleRun(self):
|
||||
"""
|
||||
C{reactor.run()} raises L{ReactorAlreadyRunning} when called when
|
||||
the reactor is already running.
|
||||
"""
|
||||
events = []
|
||||
def reentrantRun():
|
||||
self.assertRaises(ReactorAlreadyRunning, reactor.run)
|
||||
events.append("tested")
|
||||
reactor = self.buildReactor()
|
||||
reactor.callWhenRunning(reentrantRun)
|
||||
reactor.callWhenRunning(reactor.stop)
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(events, ["tested"])
|
||||
|
||||
|
||||
def test_runWithAsynchronousBeforeStartupTrigger(self):
|
||||
"""
|
||||
When there is a C{'before'} C{'startup'} trigger which returns an
|
||||
unfired L{Deferred}, C{reactor.run()} starts the reactor and does not
|
||||
return until after C{reactor.stop()} is called
|
||||
"""
|
||||
events = []
|
||||
def trigger():
|
||||
events.append('trigger')
|
||||
d = Deferred()
|
||||
d.addCallback(callback)
|
||||
reactor.callLater(0, d.callback, None)
|
||||
return d
|
||||
def callback(ignored):
|
||||
events.append('callback')
|
||||
reactor.stop()
|
||||
reactor = self.buildReactor()
|
||||
reactor.addSystemEventTrigger('before', 'startup', trigger)
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(events, ['trigger', 'callback'])
|
||||
|
||||
|
||||
def test_iterate(self):
|
||||
"""
|
||||
C{reactor.iterate()} does not block.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
t = reactor.callLater(5, reactor.crash)
|
||||
|
||||
start = time.time()
|
||||
reactor.iterate(0) # Shouldn't block
|
||||
elapsed = time.time() - start
|
||||
|
||||
self.assertTrue(elapsed < 2)
|
||||
t.cancel()
|
||||
|
||||
|
||||
def test_crash(self):
|
||||
"""
|
||||
C{reactor.crash()} stops the reactor and does not fire shutdown
|
||||
triggers.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
events = []
|
||||
reactor.addSystemEventTrigger(
|
||||
"before", "shutdown",
|
||||
lambda: events.append(("before", "shutdown")))
|
||||
reactor.callWhenRunning(reactor.callLater, 0, reactor.crash)
|
||||
self.runReactor(reactor)
|
||||
self.assertFalse(reactor.running)
|
||||
self.assertFalse(
|
||||
events,
|
||||
"Shutdown triggers invoked but they should not have been.")
|
||||
|
||||
|
||||
def test_runAfterCrash(self):
|
||||
"""
|
||||
C{reactor.run()} restarts the reactor after it has been stopped by
|
||||
C{reactor.crash()}.
|
||||
"""
|
||||
events = []
|
||||
def crash():
|
||||
events.append('crash')
|
||||
reactor.crash()
|
||||
reactor = self.buildReactor()
|
||||
reactor.callWhenRunning(crash)
|
||||
self.runReactor(reactor)
|
||||
def stop():
|
||||
events.append(('stop', reactor.running))
|
||||
reactor.stop()
|
||||
reactor.callWhenRunning(stop)
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(events, ['crash', ('stop', True)])
|
||||
|
||||
|
||||
def test_runAfterStop(self):
|
||||
"""
|
||||
C{reactor.run()} raises L{ReactorNotRestartable} when called when
|
||||
the reactor is being run after getting stopped priorly.
|
||||
"""
|
||||
events = []
|
||||
def restart():
|
||||
self.assertRaises(ReactorNotRestartable, reactor.run)
|
||||
events.append('tested')
|
||||
reactor = self.buildReactor()
|
||||
reactor.callWhenRunning(reactor.stop)
|
||||
reactor.addSystemEventTrigger('after', 'shutdown', restart)
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(events, ['tested'])
|
||||
|
||||
|
||||
|
||||
globals().update(SystemEventTestsBuilder.makeTestCaseClasses())
|
||||
globals().update(ObjectModelIntegrationTest.makeTestCaseClasses())
|
||||
|
|
@ -0,0 +1,120 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted.internet.default}.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import select, sys
|
||||
from twisted.trial.unittest import SynchronousTestCase
|
||||
from twisted.python.runtime import Platform
|
||||
from twisted.internet import default
|
||||
from twisted.internet.default import _getInstallFunction, install
|
||||
from twisted.internet.test.test_main import NoReactor
|
||||
from twisted.internet.interfaces import IReactorCore
|
||||
|
||||
unix = Platform('posix', 'other')
|
||||
linux = Platform('posix', 'linux2')
|
||||
windows = Platform('nt', 'win32')
|
||||
osx = Platform('posix', 'darwin')
|
||||
|
||||
|
||||
class PollReactorTests(SynchronousTestCase):
|
||||
"""
|
||||
Tests for the cases of L{twisted.internet.default._getInstallFunction}
|
||||
in which it picks the poll(2) or epoll(7)-based reactors.
|
||||
"""
|
||||
|
||||
def assertIsPoll(self, install):
|
||||
"""
|
||||
Assert the given function will install the poll() reactor, or select()
|
||||
if poll() is unavailable.
|
||||
"""
|
||||
if hasattr(select, "poll"):
|
||||
self.assertEqual(
|
||||
install.__module__, 'twisted.internet.pollreactor')
|
||||
else:
|
||||
self.assertEqual(
|
||||
install.__module__, 'twisted.internet.selectreactor')
|
||||
|
||||
|
||||
def test_unix(self):
|
||||
"""
|
||||
L{_getInstallFunction} chooses the poll reactor on arbitrary Unix
|
||||
platforms, falling back to select(2) if it is unavailable.
|
||||
"""
|
||||
install = _getInstallFunction(unix)
|
||||
self.assertIsPoll(install)
|
||||
|
||||
|
||||
def test_linux(self):
|
||||
"""
|
||||
L{_getInstallFunction} chooses the epoll reactor on Linux, or poll if
|
||||
epoll is unavailable.
|
||||
"""
|
||||
install = _getInstallFunction(linux)
|
||||
try:
|
||||
from twisted.internet import epollreactor
|
||||
except ImportError:
|
||||
self.assertIsPoll(install)
|
||||
else:
|
||||
self.assertEqual(
|
||||
install.__module__, 'twisted.internet.epollreactor')
|
||||
|
||||
|
||||
|
||||
class SelectReactorTests(SynchronousTestCase):
|
||||
"""
|
||||
Tests for the cases of L{twisted.internet.default._getInstallFunction}
|
||||
in which it picks the select(2)-based reactor.
|
||||
"""
|
||||
def test_osx(self):
|
||||
"""
|
||||
L{_getInstallFunction} chooses the select reactor on OS X.
|
||||
"""
|
||||
install = _getInstallFunction(osx)
|
||||
self.assertEqual(
|
||||
install.__module__, 'twisted.internet.selectreactor')
|
||||
|
||||
|
||||
def test_windows(self):
|
||||
"""
|
||||
L{_getInstallFunction} chooses the select reactor on Windows.
|
||||
"""
|
||||
install = _getInstallFunction(windows)
|
||||
self.assertEqual(
|
||||
install.__module__, 'twisted.internet.selectreactor')
|
||||
|
||||
|
||||
|
||||
class InstallationTests(SynchronousTestCase):
|
||||
"""
|
||||
Tests for actual installation of the reactor.
|
||||
"""
|
||||
|
||||
def test_install(self):
|
||||
"""
|
||||
L{install} installs a reactor.
|
||||
"""
|
||||
with NoReactor():
|
||||
install()
|
||||
self.assertIn("twisted.internet.reactor", sys.modules)
|
||||
|
||||
|
||||
def test_reactor(self):
|
||||
"""
|
||||
Importing L{twisted.internet.reactor} installs the default reactor if
|
||||
none is installed.
|
||||
"""
|
||||
installed = []
|
||||
def installer():
|
||||
installed.append(True)
|
||||
return install()
|
||||
self.patch(default, "install", installer)
|
||||
|
||||
with NoReactor():
|
||||
from twisted.internet import reactor
|
||||
self.assertTrue(IReactorCore.providedBy(reactor))
|
||||
self.assertEqual(installed, [True])
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,248 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted.internet.epollreactor}.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
from twisted.trial.unittest import TestCase
|
||||
try:
|
||||
from twisted.internet.epollreactor import _ContinuousPolling
|
||||
except ImportError:
|
||||
_ContinuousPolling = None
|
||||
from twisted.internet.task import Clock
|
||||
from twisted.internet.error import ConnectionDone
|
||||
|
||||
|
||||
|
||||
class Descriptor(object):
|
||||
"""
|
||||
Records reads and writes, as if it were a C{FileDescriptor}.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.events = []
|
||||
|
||||
|
||||
def fileno(self):
|
||||
return 1
|
||||
|
||||
|
||||
def doRead(self):
|
||||
self.events.append("read")
|
||||
|
||||
|
||||
def doWrite(self):
|
||||
self.events.append("write")
|
||||
|
||||
|
||||
def connectionLost(self, reason):
|
||||
reason.trap(ConnectionDone)
|
||||
self.events.append("lost")
|
||||
|
||||
|
||||
|
||||
class ContinuousPollingTests(TestCase):
|
||||
"""
|
||||
L{_ContinuousPolling} can be used to read and write from C{FileDescriptor}
|
||||
objects.
|
||||
"""
|
||||
|
||||
def test_addReader(self):
|
||||
"""
|
||||
Adding a reader when there was previously no reader starts up a
|
||||
C{LoopingCall}.
|
||||
"""
|
||||
poller = _ContinuousPolling(Clock())
|
||||
self.assertEqual(poller._loop, None)
|
||||
reader = object()
|
||||
self.assertFalse(poller.isReading(reader))
|
||||
poller.addReader(reader)
|
||||
self.assertNotEqual(poller._loop, None)
|
||||
self.assertTrue(poller._loop.running)
|
||||
self.assertIs(poller._loop.clock, poller._reactor)
|
||||
self.assertTrue(poller.isReading(reader))
|
||||
|
||||
|
||||
def test_addWriter(self):
|
||||
"""
|
||||
Adding a writer when there was previously no writer starts up a
|
||||
C{LoopingCall}.
|
||||
"""
|
||||
poller = _ContinuousPolling(Clock())
|
||||
self.assertEqual(poller._loop, None)
|
||||
writer = object()
|
||||
self.assertFalse(poller.isWriting(writer))
|
||||
poller.addWriter(writer)
|
||||
self.assertNotEqual(poller._loop, None)
|
||||
self.assertTrue(poller._loop.running)
|
||||
self.assertIs(poller._loop.clock, poller._reactor)
|
||||
self.assertTrue(poller.isWriting(writer))
|
||||
|
||||
|
||||
def test_removeReader(self):
|
||||
"""
|
||||
Removing a reader stops the C{LoopingCall}.
|
||||
"""
|
||||
poller = _ContinuousPolling(Clock())
|
||||
reader = object()
|
||||
poller.addReader(reader)
|
||||
poller.removeReader(reader)
|
||||
self.assertEqual(poller._loop, None)
|
||||
self.assertEqual(poller._reactor.getDelayedCalls(), [])
|
||||
self.assertFalse(poller.isReading(reader))
|
||||
|
||||
|
||||
def test_removeWriter(self):
|
||||
"""
|
||||
Removing a writer stops the C{LoopingCall}.
|
||||
"""
|
||||
poller = _ContinuousPolling(Clock())
|
||||
writer = object()
|
||||
poller.addWriter(writer)
|
||||
poller.removeWriter(writer)
|
||||
self.assertEqual(poller._loop, None)
|
||||
self.assertEqual(poller._reactor.getDelayedCalls(), [])
|
||||
self.assertFalse(poller.isWriting(writer))
|
||||
|
||||
|
||||
def test_removeUnknown(self):
|
||||
"""
|
||||
Removing unknown readers and writers silently does nothing.
|
||||
"""
|
||||
poller = _ContinuousPolling(Clock())
|
||||
poller.removeWriter(object())
|
||||
poller.removeReader(object())
|
||||
|
||||
|
||||
def test_multipleReadersAndWriters(self):
|
||||
"""
|
||||
Adding multiple readers and writers results in a single
|
||||
C{LoopingCall}.
|
||||
"""
|
||||
poller = _ContinuousPolling(Clock())
|
||||
writer = object()
|
||||
poller.addWriter(writer)
|
||||
self.assertNotEqual(poller._loop, None)
|
||||
poller.addWriter(object())
|
||||
self.assertNotEqual(poller._loop, None)
|
||||
poller.addReader(object())
|
||||
self.assertNotEqual(poller._loop, None)
|
||||
poller.addReader(object())
|
||||
poller.removeWriter(writer)
|
||||
self.assertNotEqual(poller._loop, None)
|
||||
self.assertTrue(poller._loop.running)
|
||||
self.assertEqual(len(poller._reactor.getDelayedCalls()), 1)
|
||||
|
||||
|
||||
def test_readerPolling(self):
|
||||
"""
|
||||
Adding a reader causes its C{doRead} to be called every 1
|
||||
milliseconds.
|
||||
"""
|
||||
reactor = Clock()
|
||||
poller = _ContinuousPolling(reactor)
|
||||
desc = Descriptor()
|
||||
poller.addReader(desc)
|
||||
self.assertEqual(desc.events, [])
|
||||
reactor.advance(0.00001)
|
||||
self.assertEqual(desc.events, ["read"])
|
||||
reactor.advance(0.00001)
|
||||
self.assertEqual(desc.events, ["read", "read"])
|
||||
reactor.advance(0.00001)
|
||||
self.assertEqual(desc.events, ["read", "read", "read"])
|
||||
|
||||
|
||||
def test_writerPolling(self):
|
||||
"""
|
||||
Adding a writer causes its C{doWrite} to be called every 1
|
||||
milliseconds.
|
||||
"""
|
||||
reactor = Clock()
|
||||
poller = _ContinuousPolling(reactor)
|
||||
desc = Descriptor()
|
||||
poller.addWriter(desc)
|
||||
self.assertEqual(desc.events, [])
|
||||
reactor.advance(0.001)
|
||||
self.assertEqual(desc.events, ["write"])
|
||||
reactor.advance(0.001)
|
||||
self.assertEqual(desc.events, ["write", "write"])
|
||||
reactor.advance(0.001)
|
||||
self.assertEqual(desc.events, ["write", "write", "write"])
|
||||
|
||||
|
||||
def test_connectionLostOnRead(self):
|
||||
"""
|
||||
If a C{doRead} returns a value indicating disconnection,
|
||||
C{connectionLost} is called on it.
|
||||
"""
|
||||
reactor = Clock()
|
||||
poller = _ContinuousPolling(reactor)
|
||||
desc = Descriptor()
|
||||
desc.doRead = lambda: ConnectionDone()
|
||||
poller.addReader(desc)
|
||||
self.assertEqual(desc.events, [])
|
||||
reactor.advance(0.001)
|
||||
self.assertEqual(desc.events, ["lost"])
|
||||
|
||||
|
||||
def test_connectionLostOnWrite(self):
|
||||
"""
|
||||
If a C{doWrite} returns a value indicating disconnection,
|
||||
C{connectionLost} is called on it.
|
||||
"""
|
||||
reactor = Clock()
|
||||
poller = _ContinuousPolling(reactor)
|
||||
desc = Descriptor()
|
||||
desc.doWrite = lambda: ConnectionDone()
|
||||
poller.addWriter(desc)
|
||||
self.assertEqual(desc.events, [])
|
||||
reactor.advance(0.001)
|
||||
self.assertEqual(desc.events, ["lost"])
|
||||
|
||||
|
||||
def test_removeAll(self):
|
||||
"""
|
||||
L{_ContinuousPolling.removeAll} removes all descriptors and returns
|
||||
the readers and writers.
|
||||
"""
|
||||
poller = _ContinuousPolling(Clock())
|
||||
reader = object()
|
||||
writer = object()
|
||||
both = object()
|
||||
poller.addReader(reader)
|
||||
poller.addReader(both)
|
||||
poller.addWriter(writer)
|
||||
poller.addWriter(both)
|
||||
removed = poller.removeAll()
|
||||
self.assertEqual(poller.getReaders(), [])
|
||||
self.assertEqual(poller.getWriters(), [])
|
||||
self.assertEqual(len(removed), 3)
|
||||
self.assertEqual(set(removed), set([reader, writer, both]))
|
||||
|
||||
|
||||
def test_getReaders(self):
|
||||
"""
|
||||
L{_ContinuousPolling.getReaders} returns a list of the read
|
||||
descriptors.
|
||||
"""
|
||||
poller = _ContinuousPolling(Clock())
|
||||
reader = object()
|
||||
poller.addReader(reader)
|
||||
self.assertIn(reader, poller.getReaders())
|
||||
|
||||
|
||||
def test_getWriters(self):
|
||||
"""
|
||||
L{_ContinuousPolling.getWriters} returns a list of the write
|
||||
descriptors.
|
||||
"""
|
||||
poller = _ContinuousPolling(Clock())
|
||||
writer = object()
|
||||
poller.addWriter(writer)
|
||||
self.assertIn(writer, poller.getWriters())
|
||||
|
||||
if _ContinuousPolling is None:
|
||||
skip = "epoll not supported in this environment."
|
||||
|
|
@ -0,0 +1,426 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for implementations of L{IReactorFDSet}.
|
||||
"""
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
import os, socket, traceback
|
||||
|
||||
from zope.interface import implementer
|
||||
|
||||
from twisted.python.runtime import platform
|
||||
from twisted.trial.unittest import SkipTest
|
||||
from twisted.internet.interfaces import IReactorFDSet, IReadDescriptor
|
||||
from twisted.internet.abstract import FileDescriptor
|
||||
from twisted.internet.test.reactormixins import ReactorBuilder
|
||||
|
||||
# twisted.internet.tcp nicely defines some names with proper values on
|
||||
# several different platforms.
|
||||
from twisted.internet.tcp import EINPROGRESS, EWOULDBLOCK
|
||||
|
||||
|
||||
def socketpair():
|
||||
serverSocket = socket.socket()
|
||||
serverSocket.bind(('127.0.0.1', 0))
|
||||
serverSocket.listen(1)
|
||||
try:
|
||||
client = socket.socket()
|
||||
try:
|
||||
client.setblocking(False)
|
||||
try:
|
||||
client.connect(('127.0.0.1', serverSocket.getsockname()[1]))
|
||||
except socket.error as e:
|
||||
if e.args[0] not in (EINPROGRESS, EWOULDBLOCK):
|
||||
raise
|
||||
server, addr = serverSocket.accept()
|
||||
except:
|
||||
client.close()
|
||||
raise
|
||||
finally:
|
||||
serverSocket.close()
|
||||
|
||||
return client, server
|
||||
|
||||
|
||||
class ReactorFDSetTestsBuilder(ReactorBuilder):
|
||||
"""
|
||||
Builder defining tests relating to L{IReactorFDSet}.
|
||||
"""
|
||||
requiredInterfaces = [IReactorFDSet]
|
||||
|
||||
def _connectedPair(self):
|
||||
"""
|
||||
Return the two sockets which make up a new TCP connection.
|
||||
"""
|
||||
client, server = socketpair()
|
||||
self.addCleanup(client.close)
|
||||
self.addCleanup(server.close)
|
||||
return client, server
|
||||
|
||||
|
||||
def _simpleSetup(self):
|
||||
reactor = self.buildReactor()
|
||||
|
||||
client, server = self._connectedPair()
|
||||
|
||||
fd = FileDescriptor(reactor)
|
||||
fd.fileno = client.fileno
|
||||
|
||||
return reactor, fd, server
|
||||
|
||||
|
||||
def test_addReader(self):
|
||||
"""
|
||||
C{reactor.addReader()} accepts an L{IReadDescriptor} provider and calls
|
||||
its C{doRead} method when there may be data available on its C{fileno}.
|
||||
"""
|
||||
reactor, fd, server = self._simpleSetup()
|
||||
|
||||
def removeAndStop():
|
||||
reactor.removeReader(fd)
|
||||
reactor.stop()
|
||||
fd.doRead = removeAndStop
|
||||
reactor.addReader(fd)
|
||||
server.sendall(b'x')
|
||||
|
||||
# The reactor will only stop if it calls fd.doRead.
|
||||
self.runReactor(reactor)
|
||||
# Nothing to assert, just be glad we got this far.
|
||||
|
||||
|
||||
def test_removeReader(self):
|
||||
"""
|
||||
L{reactor.removeReader()} accepts an L{IReadDescriptor} provider
|
||||
previously passed to C{reactor.addReader()} and causes it to no longer
|
||||
be monitored for input events.
|
||||
"""
|
||||
reactor, fd, server = self._simpleSetup()
|
||||
|
||||
def fail():
|
||||
self.fail("doRead should not be called")
|
||||
fd.doRead = fail
|
||||
|
||||
reactor.addReader(fd)
|
||||
reactor.removeReader(fd)
|
||||
server.sendall(b'x')
|
||||
|
||||
# Give the reactor two timed event passes to notice that there's I/O
|
||||
# (if it is incorrectly watching for I/O).
|
||||
reactor.callLater(0, reactor.callLater, 0, reactor.stop)
|
||||
|
||||
self.runReactor(reactor)
|
||||
# Getting here means the right thing happened probably.
|
||||
|
||||
|
||||
def test_addWriter(self):
|
||||
"""
|
||||
C{reactor.addWriter()} accepts an L{IWriteDescriptor} provider and
|
||||
calls its C{doWrite} method when it may be possible to write to its
|
||||
C{fileno}.
|
||||
"""
|
||||
reactor, fd, server = self._simpleSetup()
|
||||
|
||||
def removeAndStop():
|
||||
reactor.removeWriter(fd)
|
||||
reactor.stop()
|
||||
fd.doWrite = removeAndStop
|
||||
reactor.addWriter(fd)
|
||||
|
||||
self.runReactor(reactor)
|
||||
# Getting here is great.
|
||||
|
||||
|
||||
def _getFDTest(self, kind):
|
||||
"""
|
||||
Helper for getReaders and getWriters tests.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
get = getattr(reactor, 'get' + kind + 's')
|
||||
add = getattr(reactor, 'add' + kind)
|
||||
remove = getattr(reactor, 'remove' + kind)
|
||||
|
||||
client, server = self._connectedPair()
|
||||
|
||||
self.assertNotIn(client, get())
|
||||
self.assertNotIn(server, get())
|
||||
|
||||
add(client)
|
||||
self.assertIn(client, get())
|
||||
self.assertNotIn(server, get())
|
||||
|
||||
remove(client)
|
||||
self.assertNotIn(client, get())
|
||||
self.assertNotIn(server, get())
|
||||
|
||||
|
||||
def test_getReaders(self):
|
||||
"""
|
||||
L{IReactorFDSet.getReaders} reflects the additions and removals made
|
||||
with L{IReactorFDSet.addReader} and L{IReactorFDSet.removeReader}.
|
||||
"""
|
||||
self._getFDTest('Reader')
|
||||
|
||||
|
||||
def test_removeWriter(self):
|
||||
"""
|
||||
L{reactor.removeWriter()} accepts an L{IWriteDescriptor} provider
|
||||
previously passed to C{reactor.addWriter()} and causes it to no longer
|
||||
be monitored for outputability.
|
||||
"""
|
||||
reactor, fd, server = self._simpleSetup()
|
||||
|
||||
def fail():
|
||||
self.fail("doWrite should not be called")
|
||||
fd.doWrite = fail
|
||||
|
||||
reactor.addWriter(fd)
|
||||
reactor.removeWriter(fd)
|
||||
|
||||
# Give the reactor two timed event passes to notice that there's I/O
|
||||
# (if it is incorrectly watching for I/O).
|
||||
reactor.callLater(0, reactor.callLater, 0, reactor.stop)
|
||||
|
||||
self.runReactor(reactor)
|
||||
# Getting here means the right thing happened probably.
|
||||
|
||||
|
||||
def test_getWriters(self):
|
||||
"""
|
||||
L{IReactorFDSet.getWriters} reflects the additions and removals made
|
||||
with L{IReactorFDSet.addWriter} and L{IReactorFDSet.removeWriter}.
|
||||
"""
|
||||
self._getFDTest('Writer')
|
||||
|
||||
|
||||
def test_removeAll(self):
|
||||
"""
|
||||
C{reactor.removeAll()} removes all registered L{IReadDescriptor}
|
||||
providers and all registered L{IWriteDescriptor} providers and returns
|
||||
them.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
|
||||
reactor, fd, server = self._simpleSetup()
|
||||
|
||||
fd.doRead = lambda: self.fail("doRead should not be called")
|
||||
fd.doWrite = lambda: self.fail("doWrite should not be called")
|
||||
|
||||
server.sendall(b'x')
|
||||
|
||||
reactor.addReader(fd)
|
||||
reactor.addWriter(fd)
|
||||
|
||||
removed = reactor.removeAll()
|
||||
|
||||
# Give the reactor two timed event passes to notice that there's I/O
|
||||
# (if it is incorrectly watching for I/O).
|
||||
reactor.callLater(0, reactor.callLater, 0, reactor.stop)
|
||||
|
||||
self.runReactor(reactor)
|
||||
# Getting here means the right thing happened probably.
|
||||
|
||||
self.assertEqual(removed, [fd])
|
||||
|
||||
|
||||
def test_removedFromReactor(self):
|
||||
"""
|
||||
A descriptor's C{fileno} method should not be called after the
|
||||
descriptor has been removed from the reactor.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
descriptor = RemovingDescriptor(reactor)
|
||||
reactor.callWhenRunning(descriptor.start)
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(descriptor.calls, [])
|
||||
|
||||
|
||||
def test_negativeOneFileDescriptor(self):
|
||||
"""
|
||||
If L{FileDescriptor.fileno} returns C{-1}, the descriptor is removed
|
||||
from the reactor.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
|
||||
client, server = self._connectedPair()
|
||||
|
||||
class DisappearingDescriptor(FileDescriptor):
|
||||
_fileno = server.fileno()
|
||||
|
||||
_received = b""
|
||||
|
||||
def fileno(self):
|
||||
return self._fileno
|
||||
|
||||
def doRead(self):
|
||||
self._fileno = -1
|
||||
self._received += server.recv(1)
|
||||
client.send(b'y')
|
||||
|
||||
def connectionLost(self, reason):
|
||||
reactor.stop()
|
||||
|
||||
descriptor = DisappearingDescriptor(reactor)
|
||||
reactor.addReader(descriptor)
|
||||
client.send(b'x')
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(descriptor._received, b"x")
|
||||
|
||||
|
||||
def test_lostFileDescriptor(self):
|
||||
"""
|
||||
The file descriptor underlying a FileDescriptor may be closed and
|
||||
replaced by another at some point. Bytes which arrive on the new
|
||||
descriptor must not be delivered to the FileDescriptor which was
|
||||
originally registered with the original descriptor of the same number.
|
||||
|
||||
Practically speaking, this is difficult or impossible to detect. The
|
||||
implementation relies on C{fileno} raising an exception if the original
|
||||
descriptor has gone away. If C{fileno} continues to return the original
|
||||
file descriptor value, the reactor may deliver events from that
|
||||
descriptor. This is a best effort attempt to ease certain debugging
|
||||
situations. Applications should not rely on it intentionally.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
|
||||
name = reactor.__class__.__name__
|
||||
if name in ('EPollReactor', 'KQueueReactor', 'CFReactor'):
|
||||
# Closing a file descriptor immediately removes it from the epoll
|
||||
# set without generating a notification. That means epollreactor
|
||||
# will not call any methods on Victim after the close, so there's
|
||||
# no chance to notice the socket is no longer valid.
|
||||
raise SkipTest("%r cannot detect lost file descriptors" % (name,))
|
||||
|
||||
client, server = self._connectedPair()
|
||||
|
||||
class Victim(FileDescriptor):
|
||||
"""
|
||||
This L{FileDescriptor} will have its socket closed out from under it
|
||||
and another socket will take its place. It will raise a
|
||||
socket.error from C{fileno} after this happens (because socket
|
||||
objects remember whether they have been closed), so as long as the
|
||||
reactor calls the C{fileno} method the problem will be detected.
|
||||
"""
|
||||
def fileno(self):
|
||||
return server.fileno()
|
||||
|
||||
def doRead(self):
|
||||
raise Exception("Victim.doRead should never be called")
|
||||
|
||||
def connectionLost(self, reason):
|
||||
"""
|
||||
When the problem is detected, the reactor should disconnect this
|
||||
file descriptor. When that happens, stop the reactor so the
|
||||
test ends.
|
||||
"""
|
||||
reactor.stop()
|
||||
|
||||
reactor.addReader(Victim())
|
||||
|
||||
# Arrange for the socket to be replaced at some unspecified time.
|
||||
# Significantly, this will not be while any I/O processing code is on
|
||||
# the stack. It is something that happens independently and cannot be
|
||||
# relied upon to happen at a convenient time, such as within a call to
|
||||
# doRead.
|
||||
def messItUp():
|
||||
newC, newS = self._connectedPair()
|
||||
fileno = server.fileno()
|
||||
server.close()
|
||||
os.dup2(newS.fileno(), fileno)
|
||||
newC.send(b"x")
|
||||
reactor.callLater(0, messItUp)
|
||||
|
||||
self.runReactor(reactor)
|
||||
|
||||
# If the implementation feels like logging the exception raised by
|
||||
# MessedUp.fileno, that's fine.
|
||||
self.flushLoggedErrors(socket.error)
|
||||
if platform.isWindows():
|
||||
test_lostFileDescriptor.skip = (
|
||||
"Cannot duplicate socket filenos on Windows")
|
||||
|
||||
|
||||
def test_connectionLostOnShutdown(self):
|
||||
"""
|
||||
Any file descriptors added to the reactor have their C{connectionLost}
|
||||
called when C{reactor.stop} is called.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
|
||||
class DoNothingDescriptor(FileDescriptor):
|
||||
def doRead(self):
|
||||
return None
|
||||
def doWrite(self):
|
||||
return None
|
||||
|
||||
client, server = self._connectedPair()
|
||||
|
||||
fd1 = DoNothingDescriptor(reactor)
|
||||
fd1.fileno = client.fileno
|
||||
fd2 = DoNothingDescriptor(reactor)
|
||||
fd2.fileno = server.fileno
|
||||
reactor.addReader(fd1)
|
||||
reactor.addWriter(fd2)
|
||||
|
||||
reactor.callWhenRunning(reactor.stop)
|
||||
self.runReactor(reactor)
|
||||
self.assertTrue(fd1.disconnected)
|
||||
self.assertTrue(fd2.disconnected)
|
||||
|
||||
|
||||
|
||||
@implementer(IReadDescriptor)
|
||||
class RemovingDescriptor(object):
|
||||
"""
|
||||
A read descriptor which removes itself from the reactor as soon as it
|
||||
gets a chance to do a read and keeps track of when its own C{fileno}
|
||||
method is called.
|
||||
|
||||
@ivar insideReactor: A flag which is true as long as the reactor has
|
||||
this descriptor as a reader.
|
||||
|
||||
@ivar calls: A list of the bottom of the call stack for any call to
|
||||
C{fileno} when C{insideReactor} is false.
|
||||
"""
|
||||
|
||||
|
||||
def __init__(self, reactor):
|
||||
self.reactor = reactor
|
||||
self.insideReactor = False
|
||||
self.calls = []
|
||||
self.read, self.write = socketpair()
|
||||
|
||||
|
||||
def start(self):
|
||||
self.insideReactor = True
|
||||
self.reactor.addReader(self)
|
||||
self.write.send(b'a')
|
||||
|
||||
|
||||
def logPrefix(self):
|
||||
return 'foo'
|
||||
|
||||
|
||||
def doRead(self):
|
||||
self.reactor.removeReader(self)
|
||||
self.insideReactor = False
|
||||
self.reactor.stop()
|
||||
self.read.close()
|
||||
self.write.close()
|
||||
|
||||
|
||||
def fileno(self):
|
||||
if not self.insideReactor:
|
||||
self.calls.append(traceback.extract_stack(limit=5)[:-1])
|
||||
return self.read.fileno()
|
||||
|
||||
|
||||
def connectionLost(self, reason):
|
||||
# Ideally we'd close the descriptors here... but actually
|
||||
# connectionLost is never called because we remove ourselves from the
|
||||
# reactor before it stops.
|
||||
pass
|
||||
|
||||
globals().update(ReactorFDSetTestsBuilder.makeTestCaseClasses())
|
||||
|
|
@ -0,0 +1,99 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Whitebox tests for L{twisted.internet.abstract.FileDescriptor}.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
from zope.interface.verify import verifyClass
|
||||
|
||||
from twisted.internet.abstract import FileDescriptor
|
||||
from twisted.internet.interfaces import IPushProducer
|
||||
from twisted.trial.unittest import SynchronousTestCase
|
||||
|
||||
|
||||
|
||||
class MemoryFile(FileDescriptor):
|
||||
"""
|
||||
A L{FileDescriptor} customization which writes to a Python list in memory
|
||||
with certain limitations.
|
||||
|
||||
@ivar _written: A C{list} of C{bytes} which have been accepted as written.
|
||||
|
||||
@ivar _freeSpace: A C{int} giving the number of bytes which will be accepted
|
||||
by future writes.
|
||||
"""
|
||||
connected = True
|
||||
|
||||
def __init__(self):
|
||||
FileDescriptor.__init__(self, reactor=object())
|
||||
self._written = []
|
||||
self._freeSpace = 0
|
||||
|
||||
|
||||
def startWriting(self):
|
||||
pass
|
||||
|
||||
|
||||
def stopWriting(self):
|
||||
pass
|
||||
|
||||
|
||||
def writeSomeData(self, data):
|
||||
"""
|
||||
Copy at most C{self._freeSpace} bytes from C{data} into C{self._written}.
|
||||
|
||||
@return: A C{int} indicating how many bytes were copied from C{data}.
|
||||
"""
|
||||
acceptLength = min(self._freeSpace, len(data))
|
||||
if acceptLength:
|
||||
self._freeSpace -= acceptLength
|
||||
self._written.append(data[:acceptLength])
|
||||
return acceptLength
|
||||
|
||||
|
||||
|
||||
class FileDescriptorTests(SynchronousTestCase):
|
||||
"""
|
||||
Tests for L{FileDescriptor}.
|
||||
"""
|
||||
def test_writeWithUnicodeRaisesException(self):
|
||||
"""
|
||||
L{FileDescriptor.write} doesn't accept unicode data.
|
||||
"""
|
||||
fileDescriptor = FileDescriptor(reactor=object())
|
||||
self.assertRaises(TypeError, fileDescriptor.write, u'foo')
|
||||
|
||||
|
||||
def test_writeSequenceWithUnicodeRaisesException(self):
|
||||
"""
|
||||
L{FileDescriptor.writeSequence} doesn't accept unicode data.
|
||||
"""
|
||||
fileDescriptor = FileDescriptor(reactor=object())
|
||||
self.assertRaises(
|
||||
TypeError, fileDescriptor.writeSequence, [b'foo', u'bar', b'baz'])
|
||||
|
||||
|
||||
def test_implementInterfaceIPushProducer(self):
|
||||
"""
|
||||
L{FileDescriptor} should implement L{IPushProducer}.
|
||||
"""
|
||||
self.assertTrue(verifyClass(IPushProducer, FileDescriptor))
|
||||
|
||||
|
||||
|
||||
class WriteDescriptorTests(SynchronousTestCase):
|
||||
"""
|
||||
Tests for L{FileDescriptor}'s implementation of L{IWriteDescriptor}.
|
||||
"""
|
||||
def test_kernelBufferFull(self):
|
||||
"""
|
||||
When L{FileDescriptor.writeSomeData} returns C{0} to indicate no more
|
||||
data can be written immediately, L{FileDescriptor.doWrite} returns
|
||||
C{None}.
|
||||
"""
|
||||
descriptor = MemoryFile()
|
||||
descriptor.write(b"hello, world")
|
||||
self.assertIs(None, descriptor.doWrite())
|
||||
|
|
@ -0,0 +1,251 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
GI/GTK3 reactor tests.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import sys, os
|
||||
try:
|
||||
from twisted.internet import gireactor
|
||||
from gi.repository import Gio
|
||||
except ImportError:
|
||||
gireactor = None
|
||||
gtk3reactor = None
|
||||
else:
|
||||
# gtk3reactor may be unavailable even if gireactor is available; in
|
||||
# particular in pygobject 3.4/gtk 3.6, when no X11 DISPLAY is found.
|
||||
try:
|
||||
from twisted.internet import gtk3reactor
|
||||
except ImportError:
|
||||
gtk3reactor = None
|
||||
else:
|
||||
from gi.repository import Gtk
|
||||
|
||||
from twisted.python.filepath import FilePath
|
||||
from twisted.python.runtime import platform
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.error import ReactorAlreadyRunning
|
||||
from twisted.internet.protocol import ProcessProtocol
|
||||
from twisted.trial.unittest import TestCase, SkipTest
|
||||
from twisted.internet.test.reactormixins import ReactorBuilder
|
||||
from twisted.test.test_twisted import SetAsideModule
|
||||
from twisted.internet.interfaces import IReactorProcess
|
||||
|
||||
# Skip all tests if gi is unavailable:
|
||||
if gireactor is None:
|
||||
skip = "gtk3/gi not importable"
|
||||
|
||||
|
||||
class GApplicationRegistration(ReactorBuilder, TestCase):
|
||||
"""
|
||||
GtkApplication and GApplication are supported by
|
||||
L{twisted.internet.gtk3reactor} and L{twisted.internet.gireactor}.
|
||||
|
||||
We inherit from L{ReactorBuilder} in order to use some of its
|
||||
reactor-running infrastructure, but don't need its test-creation
|
||||
functionality.
|
||||
"""
|
||||
def runReactor(self, app, reactor):
|
||||
"""
|
||||
Register the app, run the reactor, make sure app was activated, and
|
||||
that reactor was running, and that reactor can be stopped.
|
||||
"""
|
||||
if not hasattr(app, "quit"):
|
||||
raise SkipTest("Version of PyGObject is too old.")
|
||||
|
||||
result = []
|
||||
def stop():
|
||||
result.append("stopped")
|
||||
reactor.stop()
|
||||
def activate(widget):
|
||||
result.append("activated")
|
||||
reactor.callLater(0, stop)
|
||||
app.connect('activate', activate)
|
||||
|
||||
# We want reactor.stop() to *always* stop the event loop, even if
|
||||
# someone has called hold() on the application and never done the
|
||||
# corresponding release() -- for more details see
|
||||
# http://developer.gnome.org/gio/unstable/GApplication.html.
|
||||
app.hold()
|
||||
|
||||
reactor.registerGApplication(app)
|
||||
ReactorBuilder.runReactor(self, reactor)
|
||||
self.assertEqual(result, ["activated", "stopped"])
|
||||
|
||||
|
||||
def test_gApplicationActivate(self):
|
||||
"""
|
||||
L{Gio.Application} instances can be registered with a gireactor.
|
||||
"""
|
||||
reactor = gireactor.GIReactor(useGtk=False)
|
||||
self.addCleanup(self.unbuildReactor, reactor)
|
||||
app = Gio.Application(
|
||||
application_id='com.twistedmatrix.trial.gireactor',
|
||||
flags=Gio.ApplicationFlags.FLAGS_NONE)
|
||||
|
||||
self.runReactor(app, reactor)
|
||||
|
||||
|
||||
def test_gtkApplicationActivate(self):
|
||||
"""
|
||||
L{Gtk.Application} instances can be registered with a gtk3reactor.
|
||||
"""
|
||||
reactor = gtk3reactor.Gtk3Reactor()
|
||||
self.addCleanup(self.unbuildReactor, reactor)
|
||||
app = Gtk.Application(
|
||||
application_id='com.twistedmatrix.trial.gtk3reactor',
|
||||
flags=Gio.ApplicationFlags.FLAGS_NONE)
|
||||
|
||||
self.runReactor(app, reactor)
|
||||
|
||||
if gtk3reactor is None:
|
||||
test_gtkApplicationActivate.skip = (
|
||||
"Gtk unavailable (may require running with X11 DISPLAY env set)")
|
||||
|
||||
|
||||
def test_portable(self):
|
||||
"""
|
||||
L{gireactor.PortableGIReactor} doesn't support application
|
||||
registration at this time.
|
||||
"""
|
||||
reactor = gireactor.PortableGIReactor()
|
||||
self.addCleanup(self.unbuildReactor, reactor)
|
||||
app = Gio.Application(
|
||||
application_id='com.twistedmatrix.trial.gireactor',
|
||||
flags=Gio.ApplicationFlags.FLAGS_NONE)
|
||||
self.assertRaises(NotImplementedError,
|
||||
reactor.registerGApplication, app)
|
||||
|
||||
|
||||
def test_noQuit(self):
|
||||
"""
|
||||
Older versions of PyGObject lack C{Application.quit}, and so won't
|
||||
allow registration.
|
||||
"""
|
||||
reactor = gireactor.GIReactor(useGtk=False)
|
||||
self.addCleanup(self.unbuildReactor, reactor)
|
||||
# An app with no "quit" method:
|
||||
app = object()
|
||||
exc = self.assertRaises(RuntimeError, reactor.registerGApplication, app)
|
||||
self.assertTrue(exc.args[0].startswith(
|
||||
"Application registration is not"))
|
||||
|
||||
|
||||
def test_cantRegisterAfterRun(self):
|
||||
"""
|
||||
It is not possible to register a C{Application} after the reactor has
|
||||
already started.
|
||||
"""
|
||||
reactor = gireactor.GIReactor(useGtk=False)
|
||||
self.addCleanup(self.unbuildReactor, reactor)
|
||||
app = Gio.Application(
|
||||
application_id='com.twistedmatrix.trial.gireactor',
|
||||
flags=Gio.ApplicationFlags.FLAGS_NONE)
|
||||
|
||||
def tryRegister():
|
||||
exc = self.assertRaises(ReactorAlreadyRunning,
|
||||
reactor.registerGApplication, app)
|
||||
self.assertEqual(exc.args[0],
|
||||
"Can't register application after reactor was started.")
|
||||
reactor.stop()
|
||||
reactor.callLater(0, tryRegister)
|
||||
ReactorBuilder.runReactor(self, reactor)
|
||||
|
||||
|
||||
def test_cantRegisterTwice(self):
|
||||
"""
|
||||
It is not possible to register more than one C{Application}.
|
||||
"""
|
||||
reactor = gireactor.GIReactor(useGtk=False)
|
||||
self.addCleanup(self.unbuildReactor, reactor)
|
||||
app = Gio.Application(
|
||||
application_id='com.twistedmatrix.trial.gireactor',
|
||||
flags=Gio.ApplicationFlags.FLAGS_NONE)
|
||||
reactor.registerGApplication(app)
|
||||
app2 = Gio.Application(
|
||||
application_id='com.twistedmatrix.trial.gireactor2',
|
||||
flags=Gio.ApplicationFlags.FLAGS_NONE)
|
||||
exc = self.assertRaises(RuntimeError,
|
||||
reactor.registerGApplication, app2)
|
||||
self.assertEqual(exc.args[0],
|
||||
"Can't register more than one application instance.")
|
||||
|
||||
|
||||
|
||||
class PygtkCompatibilityTests(TestCase):
|
||||
"""
|
||||
pygtk imports are either prevented, or a compatiblity layer is used if
|
||||
possible.
|
||||
"""
|
||||
|
||||
def test_noCompatibilityLayer(self):
|
||||
"""
|
||||
If no compatiblity layer is present, imports of gobject and friends
|
||||
are disallowed.
|
||||
|
||||
We do this by running a process where we make sure gi.pygtkcompat
|
||||
isn't present.
|
||||
"""
|
||||
from twisted.internet import reactor
|
||||
if not IReactorProcess.providedBy(reactor):
|
||||
raise SkipTest("No process support available in this reactor.")
|
||||
|
||||
result = Deferred()
|
||||
class Stdout(ProcessProtocol):
|
||||
data = b""
|
||||
|
||||
def errReceived(self, err):
|
||||
print(err)
|
||||
|
||||
def outReceived(self, data):
|
||||
self.data += data
|
||||
|
||||
def processExited(self, reason):
|
||||
result.callback(self.data)
|
||||
|
||||
path = FilePath(__file__.encode("utf-8")).sibling(
|
||||
b"process_gireactornocompat.py").path
|
||||
reactor.spawnProcess(Stdout(), sys.executable, [sys.executable, path],
|
||||
env=os.environ)
|
||||
result.addCallback(self.assertEqual, b"success")
|
||||
return result
|
||||
|
||||
|
||||
def test_compatibilityLayer(self):
|
||||
"""
|
||||
If compatiblity layer is present, importing gobject uses the gi
|
||||
compatibility layer.
|
||||
"""
|
||||
if "gi.pygtkcompat" not in sys.modules:
|
||||
raise SkipTest("This version of gi doesn't include pygtkcompat.")
|
||||
import gobject
|
||||
self.assertTrue(gobject.__name__.startswith("gi."))
|
||||
|
||||
|
||||
|
||||
class Gtk3ReactorTests(TestCase):
|
||||
"""
|
||||
Tests for L{gtk3reactor}.
|
||||
"""
|
||||
|
||||
def test_requiresDISPLAY(self):
|
||||
"""
|
||||
On X11, L{gtk3reactor} is unimportable if the C{DISPLAY} environment
|
||||
variable is not set.
|
||||
"""
|
||||
display = os.environ.get("DISPLAY", None)
|
||||
if display is not None:
|
||||
self.addCleanup(os.environ.__setitem__, "DISPLAY", display)
|
||||
del os.environ["DISPLAY"]
|
||||
with SetAsideModule("twisted.internet.gtk3reactor"):
|
||||
exc = self.assertRaises(ImportError,
|
||||
__import__, "twisted.internet.gtk3reactor")
|
||||
self.assertEqual(
|
||||
exc.args[0],
|
||||
"Gtk3 requires X11, and no DISPLAY environment variable is set")
|
||||
|
||||
if platform.getType() != "posix" or platform.isMacOSX():
|
||||
test_requiresDISPLAY.skip = "This test is only relevant when using X11"
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for twisted.internet.glibbase.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
import sys
|
||||
from twisted.trial.unittest import TestCase
|
||||
from twisted.internet._glibbase import ensureNotImported
|
||||
|
||||
|
||||
|
||||
class EnsureNotImportedTests(TestCase):
|
||||
"""
|
||||
L{ensureNotImported} protects against unwanted past and future imports.
|
||||
"""
|
||||
|
||||
def test_ensureWhenNotImported(self):
|
||||
"""
|
||||
If the specified modules have never been imported, and import
|
||||
prevention is requested, L{ensureNotImported} makes sure they will not
|
||||
be imported in the future.
|
||||
"""
|
||||
modules = {}
|
||||
self.patch(sys, "modules", modules)
|
||||
ensureNotImported(["m1", "m2"], "A message.",
|
||||
preventImports=["m1", "m2", "m3"])
|
||||
self.assertEqual(modules, {"m1": None, "m2": None, "m3": None})
|
||||
|
||||
|
||||
def test_ensureWhenNotImportedDontPrevent(self):
|
||||
"""
|
||||
If the specified modules have never been imported, and import
|
||||
prevention is not requested, L{ensureNotImported} has no effect.
|
||||
"""
|
||||
modules = {}
|
||||
self.patch(sys, "modules", modules)
|
||||
ensureNotImported(["m1", "m2"], "A message.")
|
||||
self.assertEqual(modules, {})
|
||||
|
||||
|
||||
def test_ensureWhenFailedToImport(self):
|
||||
"""
|
||||
If the specified modules have been set to C{None} in C{sys.modules},
|
||||
L{ensureNotImported} does not complain.
|
||||
"""
|
||||
modules = {"m2": None}
|
||||
self.patch(sys, "modules", modules)
|
||||
ensureNotImported(["m1", "m2"], "A message.", preventImports=["m1", "m2"])
|
||||
self.assertEqual(modules, {"m1": None, "m2": None})
|
||||
|
||||
|
||||
def test_ensureFailsWhenImported(self):
|
||||
"""
|
||||
If one of the specified modules has been previously imported,
|
||||
L{ensureNotImported} raises an exception.
|
||||
"""
|
||||
module = object()
|
||||
modules = {"m2": module}
|
||||
self.patch(sys, "modules", modules)
|
||||
e = self.assertRaises(ImportError, ensureNotImported,
|
||||
["m1", "m2"], "A message.",
|
||||
preventImports=["m1", "m2"])
|
||||
self.assertEqual(modules, {"m2": module})
|
||||
self.assertEqual(e.args, ("A message.",))
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
|
||||
deprecated.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from twisted.trial.unittest import TestCase
|
||||
|
||||
|
||||
class GtkReactorDeprecation(TestCase):
|
||||
"""
|
||||
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
|
||||
deprecated.
|
||||
"""
|
||||
|
||||
class StubGTK:
|
||||
class GDK:
|
||||
INPUT_READ = None
|
||||
def input_add(self, *params):
|
||||
pass
|
||||
|
||||
class StubPyGTK:
|
||||
def require(self, something):
|
||||
pass
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Create a stub for the module 'gtk' if it does not exist, so that it can
|
||||
be imported without errors or warnings.
|
||||
"""
|
||||
self.mods = sys.modules.copy()
|
||||
sys.modules['gtk'] = self.StubGTK()
|
||||
sys.modules['pygtk'] = self.StubPyGTK()
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
"""
|
||||
Return sys.modules to the way it was before the test.
|
||||
"""
|
||||
sys.modules.clear()
|
||||
sys.modules.update(self.mods)
|
||||
|
||||
|
||||
def lookForDeprecationWarning(self, testmethod, attributeName):
|
||||
warningsShown = self.flushWarnings([testmethod])
|
||||
self.assertEqual(len(warningsShown), 1)
|
||||
self.assertIs(warningsShown[0]['category'], DeprecationWarning)
|
||||
self.assertEqual(
|
||||
warningsShown[0]['message'],
|
||||
"twisted.internet.gtkreactor." + attributeName + " "
|
||||
"was deprecated in Twisted 10.1.0: All new applications should be "
|
||||
"written with gtk 2.x, which is supported by "
|
||||
"twisted.internet.gtk2reactor.")
|
||||
|
||||
|
||||
def test_gtkReactor(self):
|
||||
"""
|
||||
Test deprecation of L{gtkreactor.GtkReactor}
|
||||
"""
|
||||
from twisted.internet import gtkreactor
|
||||
gtkreactor.GtkReactor();
|
||||
self.lookForDeprecationWarning(self.test_gtkReactor, "GtkReactor")
|
||||
|
||||
|
||||
def test_portableGtkReactor(self):
|
||||
"""
|
||||
Test deprecation of L{gtkreactor.GtkReactor}
|
||||
"""
|
||||
from twisted.internet import gtkreactor
|
||||
gtkreactor.PortableGtkReactor()
|
||||
self.lookForDeprecationWarning(self.test_portableGtkReactor,
|
||||
"PortableGtkReactor")
|
||||
|
||||
|
||||
def test_install(self):
|
||||
"""
|
||||
Test deprecation of L{gtkreactor.install}
|
||||
"""
|
||||
from twisted.internet import gtkreactor
|
||||
self.assertRaises(AssertionError, gtkreactor.install)
|
||||
self.lookForDeprecationWarning(self.test_install, "install")
|
||||
|
||||
|
||||
def test_portableInstall(self):
|
||||
"""
|
||||
Test deprecation of L{gtkreactor.portableInstall}
|
||||
"""
|
||||
from twisted.internet import gtkreactor
|
||||
self.assertRaises(AssertionError, gtkreactor.portableInstall)
|
||||
self.lookForDeprecationWarning(self.test_portableInstall,
|
||||
"portableInstall")
|
||||
|
|
@ -0,0 +1,90 @@
|
|||
# -*- test-case-name: twisted.internet.test.test_inlinecb -*-
|
||||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted.internet.defer.inlineCallbacks}.
|
||||
|
||||
Some tests for inlineCallbacks are defined in L{twisted.test.test_defgen} as
|
||||
well.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
from twisted.trial.unittest import TestCase
|
||||
from twisted.internet.defer import Deferred, returnValue, inlineCallbacks
|
||||
|
||||
class NonLocalExitTests(TestCase):
|
||||
"""
|
||||
It's possible for L{returnValue} to be (accidentally) invoked at a stack
|
||||
level below the L{inlineCallbacks}-decorated function which it is exiting.
|
||||
If this happens, L{returnValue} should report useful errors.
|
||||
|
||||
If L{returnValue} is invoked from a function not decorated by
|
||||
L{inlineCallbacks}, it will emit a warning if it causes an
|
||||
L{inlineCallbacks} function further up the stack to exit.
|
||||
"""
|
||||
|
||||
def mistakenMethod(self):
|
||||
"""
|
||||
This method mistakenly invokes L{returnValue}, despite the fact that it
|
||||
is not decorated with L{inlineCallbacks}.
|
||||
"""
|
||||
returnValue(1)
|
||||
|
||||
|
||||
def assertMistakenMethodWarning(self, resultList):
|
||||
"""
|
||||
Flush the current warnings and assert that we have been told that
|
||||
C{mistakenMethod} was invoked, and that the result from the Deferred
|
||||
that was fired (appended to the given list) is C{mistakenMethod}'s
|
||||
result. The warning should indicate that an inlineCallbacks function
|
||||
called 'inline' was made to exit.
|
||||
"""
|
||||
self.assertEqual(resultList, [1])
|
||||
warnings = self.flushWarnings(offendingFunctions=[self.mistakenMethod])
|
||||
self.assertEqual(len(warnings), 1)
|
||||
self.assertEqual(warnings[0]['category'], DeprecationWarning)
|
||||
self.assertEqual(
|
||||
warnings[0]['message'],
|
||||
"returnValue() in 'mistakenMethod' causing 'inline' to exit: "
|
||||
"returnValue should only be invoked by functions decorated with "
|
||||
"inlineCallbacks")
|
||||
|
||||
|
||||
def test_returnValueNonLocalWarning(self):
|
||||
"""
|
||||
L{returnValue} will emit a non-local exit warning in the simplest case,
|
||||
where the offending function is invoked immediately.
|
||||
"""
|
||||
@inlineCallbacks
|
||||
def inline():
|
||||
self.mistakenMethod()
|
||||
returnValue(2)
|
||||
yield 0
|
||||
d = inline()
|
||||
results = []
|
||||
d.addCallback(results.append)
|
||||
self.assertMistakenMethodWarning(results)
|
||||
|
||||
|
||||
def test_returnValueNonLocalDeferred(self):
|
||||
"""
|
||||
L{returnValue} will emit a non-local warning in the case where the
|
||||
L{inlineCallbacks}-decorated function has already yielded a Deferred
|
||||
and therefore moved its generator function along.
|
||||
"""
|
||||
cause = Deferred()
|
||||
@inlineCallbacks
|
||||
def inline():
|
||||
yield cause
|
||||
self.mistakenMethod()
|
||||
returnValue(2)
|
||||
effect = inline()
|
||||
results = []
|
||||
effect.addCallback(results.append)
|
||||
self.assertEqual(results, [])
|
||||
cause.callback(1)
|
||||
self.assertMistakenMethodWarning(results)
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,504 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for the inotify wrapper in L{twisted.internet.inotify}.
|
||||
"""
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.python import filepath, runtime
|
||||
from twisted.trial import unittest
|
||||
|
||||
try:
|
||||
from twisted.python import _inotify
|
||||
except ImportError:
|
||||
inotify = None
|
||||
else:
|
||||
from twisted.internet import inotify
|
||||
|
||||
|
||||
|
||||
class TestINotify(unittest.TestCase):
|
||||
"""
|
||||
Define all the tests for the basic functionality exposed by
|
||||
L{inotify.INotify}.
|
||||
"""
|
||||
if not runtime.platform.supportsINotify():
|
||||
skip = "This platform doesn't support INotify."
|
||||
|
||||
def setUp(self):
|
||||
self.dirname = filepath.FilePath(self.mktemp())
|
||||
self.dirname.createDirectory()
|
||||
self.inotify = inotify.INotify()
|
||||
self.inotify.startReading()
|
||||
self.addCleanup(self.inotify.loseConnection)
|
||||
|
||||
|
||||
def test_initializationErrors(self):
|
||||
"""
|
||||
L{inotify.INotify} emits a C{RuntimeError} when initialized
|
||||
in an environment that doesn't support inotify as we expect it.
|
||||
|
||||
We just try to raise an exception for every possible case in
|
||||
the for loop in L{inotify.INotify._inotify__init__}.
|
||||
"""
|
||||
class FakeINotify:
|
||||
def init(self):
|
||||
raise inotify.INotifyError()
|
||||
self.patch(inotify.INotify, '_inotify', FakeINotify())
|
||||
self.assertRaises(inotify.INotifyError, inotify.INotify)
|
||||
|
||||
|
||||
def _notificationTest(self, mask, operation, expectedPath=None):
|
||||
"""
|
||||
Test notification from some filesystem operation.
|
||||
|
||||
@param mask: The event mask to use when setting up the watch.
|
||||
|
||||
@param operation: A function which will be called with the
|
||||
name of a file in the watched directory and which should
|
||||
trigger the event.
|
||||
|
||||
@param expectedPath: Optionally, the name of the path which is
|
||||
expected to come back in the notification event; this will
|
||||
also be passed to C{operation} (primarily useful when the
|
||||
operation is being done to the directory itself, not a
|
||||
file in it).
|
||||
|
||||
@return: A L{Deferred} which fires successfully when the
|
||||
expected event has been received or fails otherwise.
|
||||
"""
|
||||
if expectedPath is None:
|
||||
expectedPath = self.dirname.child("foo.bar")
|
||||
notified = defer.Deferred()
|
||||
def cbNotified((watch, filename, events)):
|
||||
self.assertEqual(filename, expectedPath)
|
||||
self.assertTrue(events & mask)
|
||||
notified.addCallback(cbNotified)
|
||||
|
||||
self.inotify.watch(
|
||||
self.dirname, mask=mask,
|
||||
callbacks=[lambda *args: notified.callback(args)])
|
||||
operation(expectedPath)
|
||||
return notified
|
||||
|
||||
|
||||
def test_access(self):
|
||||
"""
|
||||
Reading from a file in a monitored directory sends an
|
||||
C{inotify.IN_ACCESS} event to the callback.
|
||||
"""
|
||||
def operation(path):
|
||||
path.setContent("foo")
|
||||
path.getContent()
|
||||
|
||||
return self._notificationTest(inotify.IN_ACCESS, operation)
|
||||
|
||||
|
||||
def test_modify(self):
|
||||
"""
|
||||
Writing to a file in a monitored directory sends an
|
||||
C{inotify.IN_MODIFY} event to the callback.
|
||||
"""
|
||||
def operation(path):
|
||||
fObj = path.open("w")
|
||||
fObj.write('foo')
|
||||
fObj.close()
|
||||
|
||||
return self._notificationTest(inotify.IN_MODIFY, operation)
|
||||
|
||||
|
||||
def test_attrib(self):
|
||||
"""
|
||||
Changing the metadata of a a file in a monitored directory
|
||||
sends an C{inotify.IN_ATTRIB} event to the callback.
|
||||
"""
|
||||
def operation(path):
|
||||
path.touch()
|
||||
path.touch()
|
||||
|
||||
return self._notificationTest(inotify.IN_ATTRIB, operation)
|
||||
|
||||
|
||||
def test_closeWrite(self):
|
||||
"""
|
||||
Closing a file which was open for writing in a monitored
|
||||
directory sends an C{inotify.IN_CLOSE_WRITE} event to the
|
||||
callback.
|
||||
"""
|
||||
def operation(path):
|
||||
fObj = path.open("w")
|
||||
fObj.close()
|
||||
|
||||
return self._notificationTest(inotify.IN_CLOSE_WRITE, operation)
|
||||
|
||||
|
||||
def test_closeNoWrite(self):
|
||||
"""
|
||||
Closing a file which was open for reading but not writing in a
|
||||
monitored directory sends an C{inotify.IN_CLOSE_NOWRITE} event
|
||||
to the callback.
|
||||
"""
|
||||
def operation(path):
|
||||
path.touch()
|
||||
fObj = path.open("r")
|
||||
fObj.close()
|
||||
|
||||
return self._notificationTest(inotify.IN_CLOSE_NOWRITE, operation)
|
||||
|
||||
|
||||
def test_open(self):
|
||||
"""
|
||||
Opening a file in a monitored directory sends an
|
||||
C{inotify.IN_OPEN} event to the callback.
|
||||
"""
|
||||
def operation(path):
|
||||
fObj = path.open("w")
|
||||
fObj.close()
|
||||
|
||||
return self._notificationTest(inotify.IN_OPEN, operation)
|
||||
|
||||
|
||||
def test_movedFrom(self):
|
||||
"""
|
||||
Moving a file out of a monitored directory sends an
|
||||
C{inotify.IN_MOVED_FROM} event to the callback.
|
||||
"""
|
||||
def operation(path):
|
||||
fObj = path.open("w")
|
||||
fObj.close()
|
||||
path.moveTo(filepath.FilePath(self.mktemp()))
|
||||
|
||||
return self._notificationTest(inotify.IN_MOVED_FROM, operation)
|
||||
|
||||
|
||||
def test_movedTo(self):
|
||||
"""
|
||||
Moving a file into a monitored directory sends an
|
||||
C{inotify.IN_MOVED_TO} event to the callback.
|
||||
"""
|
||||
def operation(path):
|
||||
p = filepath.FilePath(self.mktemp())
|
||||
p.touch()
|
||||
p.moveTo(path)
|
||||
|
||||
return self._notificationTest(inotify.IN_MOVED_TO, operation)
|
||||
|
||||
|
||||
def test_create(self):
|
||||
"""
|
||||
Creating a file in a monitored directory sends an
|
||||
C{inotify.IN_CREATE} event to the callback.
|
||||
"""
|
||||
def operation(path):
|
||||
fObj = path.open("w")
|
||||
fObj.close()
|
||||
|
||||
return self._notificationTest(inotify.IN_CREATE, operation)
|
||||
|
||||
|
||||
def test_delete(self):
|
||||
"""
|
||||
Deleting a file in a monitored directory sends an
|
||||
C{inotify.IN_DELETE} event to the callback.
|
||||
"""
|
||||
def operation(path):
|
||||
path.touch()
|
||||
path.remove()
|
||||
|
||||
return self._notificationTest(inotify.IN_DELETE, operation)
|
||||
|
||||
|
||||
def test_deleteSelf(self):
|
||||
"""
|
||||
Deleting the monitored directory itself sends an
|
||||
C{inotify.IN_DELETE_SELF} event to the callback.
|
||||
"""
|
||||
def operation(path):
|
||||
path.remove()
|
||||
|
||||
return self._notificationTest(
|
||||
inotify.IN_DELETE_SELF, operation, expectedPath=self.dirname)
|
||||
|
||||
|
||||
def test_moveSelf(self):
|
||||
"""
|
||||
Renaming the monitored directory itself sends an
|
||||
C{inotify.IN_MOVE_SELF} event to the callback.
|
||||
"""
|
||||
def operation(path):
|
||||
path.moveTo(filepath.FilePath(self.mktemp()))
|
||||
|
||||
return self._notificationTest(
|
||||
inotify.IN_MOVE_SELF, operation, expectedPath=self.dirname)
|
||||
|
||||
|
||||
def test_simpleSubdirectoryAutoAdd(self):
|
||||
"""
|
||||
L{inotify.INotify} when initialized with autoAdd==True adds
|
||||
also adds the created subdirectories to the watchlist.
|
||||
"""
|
||||
def _callback(wp, filename, mask):
|
||||
# We are notified before we actually process new
|
||||
# directories, so we need to defer this check.
|
||||
def _():
|
||||
try:
|
||||
self.assertTrue(self.inotify._isWatched(subdir))
|
||||
d.callback(None)
|
||||
except Exception:
|
||||
d.errback()
|
||||
reactor.callLater(0, _)
|
||||
|
||||
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
|
||||
self.inotify.watch(
|
||||
self.dirname, mask=checkMask, autoAdd=True,
|
||||
callbacks=[_callback])
|
||||
subdir = self.dirname.child('test')
|
||||
d = defer.Deferred()
|
||||
subdir.createDirectory()
|
||||
return d
|
||||
|
||||
|
||||
def test_simpleDeleteDirectory(self):
|
||||
"""
|
||||
L{inotify.INotify} removes a directory from the watchlist when
|
||||
it's removed from the filesystem.
|
||||
"""
|
||||
calls = []
|
||||
def _callback(wp, filename, mask):
|
||||
# We are notified before we actually process new
|
||||
# directories, so we need to defer this check.
|
||||
def _():
|
||||
try:
|
||||
self.assertTrue(self.inotify._isWatched(subdir))
|
||||
subdir.remove()
|
||||
except Exception:
|
||||
d.errback()
|
||||
def _eb():
|
||||
# second call, we have just removed the subdir
|
||||
try:
|
||||
self.assertTrue(not self.inotify._isWatched(subdir))
|
||||
d.callback(None)
|
||||
except Exception:
|
||||
d.errback()
|
||||
|
||||
if not calls:
|
||||
# first call, it's the create subdir
|
||||
calls.append(filename)
|
||||
reactor.callLater(0, _)
|
||||
|
||||
else:
|
||||
reactor.callLater(0, _eb)
|
||||
|
||||
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
|
||||
self.inotify.watch(
|
||||
self.dirname, mask=checkMask, autoAdd=True,
|
||||
callbacks=[_callback])
|
||||
subdir = self.dirname.child('test')
|
||||
d = defer.Deferred()
|
||||
subdir.createDirectory()
|
||||
return d
|
||||
|
||||
|
||||
def test_ignoreDirectory(self):
|
||||
"""
|
||||
L{inotify.INotify.ignore} removes a directory from the watchlist
|
||||
"""
|
||||
self.inotify.watch(self.dirname, autoAdd=True)
|
||||
self.assertTrue(self.inotify._isWatched(self.dirname))
|
||||
self.inotify.ignore(self.dirname)
|
||||
self.assertFalse(self.inotify._isWatched(self.dirname))
|
||||
|
||||
|
||||
def test_humanReadableMask(self):
|
||||
"""
|
||||
L{inotify.humaReadableMask} translates all the possible event
|
||||
masks to a human readable string.
|
||||
"""
|
||||
for mask, value in inotify._FLAG_TO_HUMAN:
|
||||
self.assertEqual(inotify.humanReadableMask(mask)[0], value)
|
||||
|
||||
checkMask = (
|
||||
inotify.IN_CLOSE_WRITE | inotify.IN_ACCESS | inotify.IN_OPEN)
|
||||
self.assertEqual(
|
||||
set(inotify.humanReadableMask(checkMask)),
|
||||
set(['close_write', 'access', 'open']))
|
||||
|
||||
|
||||
def test_recursiveWatch(self):
|
||||
"""
|
||||
L{inotify.INotify.watch} with recursive==True will add all the
|
||||
subdirectories under the given path to the watchlist.
|
||||
"""
|
||||
subdir = self.dirname.child('test')
|
||||
subdir2 = subdir.child('test2')
|
||||
subdir3 = subdir2.child('test3')
|
||||
subdir3.makedirs()
|
||||
dirs = [subdir, subdir2, subdir3]
|
||||
self.inotify.watch(self.dirname, recursive=True)
|
||||
# let's even call this twice so that we test that nothing breaks
|
||||
self.inotify.watch(self.dirname, recursive=True)
|
||||
for d in dirs:
|
||||
self.assertTrue(self.inotify._isWatched(d))
|
||||
|
||||
|
||||
def test_connectionLostError(self):
|
||||
"""
|
||||
L{inotify.INotify.connectionLost} if there's a problem while closing
|
||||
the fd shouldn't raise the exception but should log the error
|
||||
"""
|
||||
import os
|
||||
in_ = inotify.INotify()
|
||||
os.close(in_._fd)
|
||||
in_.loseConnection()
|
||||
self.flushLoggedErrors()
|
||||
|
||||
def test_noAutoAddSubdirectory(self):
|
||||
"""
|
||||
L{inotify.INotify.watch} with autoAdd==False will stop inotify
|
||||
from watching subdirectories created under the watched one.
|
||||
"""
|
||||
def _callback(wp, fp, mask):
|
||||
# We are notified before we actually process new
|
||||
# directories, so we need to defer this check.
|
||||
def _():
|
||||
try:
|
||||
self.assertFalse(self.inotify._isWatched(subdir.path))
|
||||
d.callback(None)
|
||||
except Exception:
|
||||
d.errback()
|
||||
reactor.callLater(0, _)
|
||||
|
||||
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
|
||||
self.inotify.watch(
|
||||
self.dirname, mask=checkMask, autoAdd=False,
|
||||
callbacks=[_callback])
|
||||
subdir = self.dirname.child('test')
|
||||
d = defer.Deferred()
|
||||
subdir.createDirectory()
|
||||
return d
|
||||
|
||||
|
||||
def test_seriesOfWatchAndIgnore(self):
|
||||
"""
|
||||
L{inotify.INotify} will watch a filepath for events even if the same
|
||||
path is repeatedly added/removed/re-added to the watchpoints.
|
||||
"""
|
||||
expectedPath = self.dirname.child("foo.bar2")
|
||||
expectedPath.touch()
|
||||
|
||||
notified = defer.Deferred()
|
||||
def cbNotified((ignored, filename, events)):
|
||||
self.assertEqual(filename, expectedPath)
|
||||
self.assertTrue(events & inotify.IN_DELETE_SELF)
|
||||
|
||||
def callIt(*args):
|
||||
notified.callback(args)
|
||||
|
||||
# Watch, ignore, watch again to get into the state being tested.
|
||||
self.assertTrue(self.inotify.watch(expectedPath, callbacks=[callIt]))
|
||||
self.inotify.ignore(expectedPath)
|
||||
self.assertTrue(
|
||||
self.inotify.watch(
|
||||
expectedPath, mask=inotify.IN_DELETE_SELF, callbacks=[callIt]))
|
||||
|
||||
notified.addCallback(cbNotified)
|
||||
|
||||
# Apparently in kernel version < 2.6.25, inofify has a bug in the way
|
||||
# similar events are coalesced. So, be sure to generate a different
|
||||
# event here than the touch() at the top of this method might have
|
||||
# generated.
|
||||
expectedPath.remove()
|
||||
|
||||
return notified
|
||||
|
||||
|
||||
def test_ignoreFilePath(self):
|
||||
"""
|
||||
L{inotify.INotify} will ignore a filepath after it has been removed from
|
||||
the watch list.
|
||||
"""
|
||||
expectedPath = self.dirname.child("foo.bar2")
|
||||
expectedPath.touch()
|
||||
expectedPath2 = self.dirname.child("foo.bar3")
|
||||
expectedPath2.touch()
|
||||
|
||||
notified = defer.Deferred()
|
||||
def cbNotified((ignored, filename, events)):
|
||||
self.assertEqual(filename, expectedPath2)
|
||||
self.assertTrue(events & inotify.IN_DELETE_SELF)
|
||||
|
||||
def callIt(*args):
|
||||
notified.callback(args)
|
||||
|
||||
self.assertTrue(
|
||||
self.inotify.watch(
|
||||
expectedPath, inotify.IN_DELETE_SELF, callbacks=[callIt]))
|
||||
notified.addCallback(cbNotified)
|
||||
|
||||
self.assertTrue(
|
||||
self.inotify.watch(
|
||||
expectedPath2, inotify.IN_DELETE_SELF, callbacks=[callIt]))
|
||||
|
||||
self.inotify.ignore(expectedPath)
|
||||
|
||||
expectedPath.remove()
|
||||
expectedPath2.remove()
|
||||
|
||||
return notified
|
||||
|
||||
|
||||
def test_ignoreNonWatchedFile(self):
|
||||
"""
|
||||
L{inotify.INotify} will raise KeyError if a non-watched filepath is
|
||||
ignored.
|
||||
"""
|
||||
expectedPath = self.dirname.child("foo.ignored")
|
||||
expectedPath.touch()
|
||||
|
||||
self.assertRaises(KeyError, self.inotify.ignore, expectedPath)
|
||||
|
||||
|
||||
def test_complexSubdirectoryAutoAdd(self):
|
||||
"""
|
||||
L{inotify.INotify} with autoAdd==True for a watched path
|
||||
generates events for every file or directory already present
|
||||
in a newly created subdirectory under the watched one.
|
||||
|
||||
This tests that we solve a race condition in inotify even though
|
||||
we may generate duplicate events.
|
||||
"""
|
||||
calls = set()
|
||||
def _callback(wp, filename, mask):
|
||||
calls.add(filename)
|
||||
if len(calls) == 6:
|
||||
try:
|
||||
self.assertTrue(self.inotify._isWatched(subdir))
|
||||
self.assertTrue(self.inotify._isWatched(subdir2))
|
||||
self.assertTrue(self.inotify._isWatched(subdir3))
|
||||
created = someFiles + [subdir, subdir2, subdir3]
|
||||
self.assertEqual(len(calls), len(created))
|
||||
self.assertEqual(calls, set(created))
|
||||
except Exception:
|
||||
d.errback()
|
||||
else:
|
||||
d.callback(None)
|
||||
|
||||
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
|
||||
self.inotify.watch(
|
||||
self.dirname, mask=checkMask, autoAdd=True,
|
||||
callbacks=[_callback])
|
||||
subdir = self.dirname.child('test')
|
||||
subdir2 = subdir.child('test2')
|
||||
subdir3 = subdir2.child('test3')
|
||||
d = defer.Deferred()
|
||||
subdir3.makedirs()
|
||||
|
||||
someFiles = [subdir.child('file1.dat'),
|
||||
subdir2.child('file2.dat'),
|
||||
subdir3.child('file3.dat')]
|
||||
# Add some files in pretty much all the directories so that we
|
||||
# see that we process all of them.
|
||||
for i, filename in enumerate(someFiles):
|
||||
filename.setContent(filename.path)
|
||||
return d
|
||||
|
|
@ -0,0 +1,150 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted.internet.iocpreactor}.
|
||||
"""
|
||||
|
||||
import errno
|
||||
from array import array
|
||||
from struct import pack
|
||||
from socket import AF_INET6, AF_INET, SOCK_STREAM, SOL_SOCKET, error, socket
|
||||
|
||||
from zope.interface.verify import verifyClass
|
||||
|
||||
from twisted.trial import unittest
|
||||
from twisted.python.log import msg
|
||||
from twisted.internet.interfaces import IPushProducer
|
||||
|
||||
try:
|
||||
from twisted.internet.iocpreactor import iocpsupport as _iocp, tcp, udp
|
||||
from twisted.internet.iocpreactor.reactor import IOCPReactor, EVENTS_PER_LOOP, KEY_NORMAL
|
||||
from twisted.internet.iocpreactor.interfaces import IReadWriteHandle
|
||||
from twisted.internet.iocpreactor.const import SO_UPDATE_ACCEPT_CONTEXT
|
||||
from twisted.internet.iocpreactor.abstract import FileHandle
|
||||
except ImportError:
|
||||
skip = 'This test only applies to IOCPReactor'
|
||||
|
||||
try:
|
||||
socket(AF_INET6, SOCK_STREAM).close()
|
||||
except error, e:
|
||||
ipv6Skip = str(e)
|
||||
else:
|
||||
ipv6Skip = None
|
||||
|
||||
class SupportTests(unittest.TestCase):
|
||||
"""
|
||||
Tests for L{twisted.internet.iocpreactor.iocpsupport}, low-level reactor
|
||||
implementation helpers.
|
||||
"""
|
||||
def _acceptAddressTest(self, family, localhost):
|
||||
"""
|
||||
Create a C{SOCK_STREAM} connection to localhost using a socket with an
|
||||
address family of C{family} and assert that the result of
|
||||
L{iocpsupport.get_accept_addrs} is consistent with the result of
|
||||
C{socket.getsockname} and C{socket.getpeername}.
|
||||
"""
|
||||
msg("family = %r" % (family,))
|
||||
port = socket(family, SOCK_STREAM)
|
||||
self.addCleanup(port.close)
|
||||
port.bind(('', 0))
|
||||
port.listen(1)
|
||||
client = socket(family, SOCK_STREAM)
|
||||
self.addCleanup(client.close)
|
||||
client.setblocking(False)
|
||||
try:
|
||||
client.connect((localhost, port.getsockname()[1]))
|
||||
except error, (errnum, message):
|
||||
self.assertIn(errnum, (errno.EINPROGRESS, errno.EWOULDBLOCK))
|
||||
|
||||
server = socket(family, SOCK_STREAM)
|
||||
self.addCleanup(server.close)
|
||||
buff = array('c', '\0' * 256)
|
||||
self.assertEqual(
|
||||
0, _iocp.accept(port.fileno(), server.fileno(), buff, None))
|
||||
server.setsockopt(
|
||||
SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, pack('P', server.fileno()))
|
||||
self.assertEqual(
|
||||
(family, client.getpeername()[:2], client.getsockname()[:2]),
|
||||
_iocp.get_accept_addrs(server.fileno(), buff))
|
||||
|
||||
|
||||
def test_ipv4AcceptAddress(self):
|
||||
"""
|
||||
L{iocpsupport.get_accept_addrs} returns a three-tuple of address
|
||||
information about the socket associated with the file descriptor passed
|
||||
to it. For a connection using IPv4:
|
||||
|
||||
- the first element is C{AF_INET}
|
||||
- the second element is a two-tuple of a dotted decimal notation IPv4
|
||||
address and a port number giving the peer address of the connection
|
||||
- the third element is the same type giving the host address of the
|
||||
connection
|
||||
"""
|
||||
self._acceptAddressTest(AF_INET, '127.0.0.1')
|
||||
|
||||
|
||||
def test_ipv6AcceptAddress(self):
|
||||
"""
|
||||
Like L{test_ipv4AcceptAddress}, but for IPv6 connections. In this case:
|
||||
|
||||
- the first element is C{AF_INET6}
|
||||
- the second element is a two-tuple of a hexadecimal IPv6 address
|
||||
literal and a port number giving the peer address of the connection
|
||||
- the third element is the same type giving the host address of the
|
||||
connection
|
||||
"""
|
||||
self._acceptAddressTest(AF_INET6, '::1')
|
||||
if ipv6Skip is not None:
|
||||
test_ipv6AcceptAddress.skip = ipv6Skip
|
||||
|
||||
|
||||
|
||||
class IOCPReactorTestCase(unittest.TestCase):
|
||||
def test_noPendingTimerEvents(self):
|
||||
"""
|
||||
Test reactor behavior (doIteration) when there are no pending time
|
||||
events.
|
||||
"""
|
||||
ir = IOCPReactor()
|
||||
ir.wakeUp()
|
||||
self.assertFalse(ir.doIteration(None))
|
||||
|
||||
|
||||
def test_reactorInterfaces(self):
|
||||
"""
|
||||
Verify that IOCP socket-representing classes implement IReadWriteHandle
|
||||
"""
|
||||
self.assertTrue(verifyClass(IReadWriteHandle, tcp.Connection))
|
||||
self.assertTrue(verifyClass(IReadWriteHandle, udp.Port))
|
||||
|
||||
|
||||
def test_fileHandleInterfaces(self):
|
||||
"""
|
||||
Verify that L{Filehandle} implements L{IPushProducer}.
|
||||
"""
|
||||
self.assertTrue(verifyClass(IPushProducer, FileHandle))
|
||||
|
||||
|
||||
def test_maxEventsPerIteration(self):
|
||||
"""
|
||||
Verify that we don't lose an event when more than EVENTS_PER_LOOP
|
||||
events occur in the same reactor iteration
|
||||
"""
|
||||
class FakeFD:
|
||||
counter = 0
|
||||
def logPrefix(self):
|
||||
return 'FakeFD'
|
||||
def cb(self, rc, bytes, evt):
|
||||
self.counter += 1
|
||||
|
||||
ir = IOCPReactor()
|
||||
fd = FakeFD()
|
||||
event = _iocp.Event(fd.cb, fd)
|
||||
for _ in range(EVENTS_PER_LOOP + 1):
|
||||
ir.port.postEvent(0, KEY_NORMAL, event)
|
||||
ir.doIteration(None)
|
||||
self.assertEqual(fd.counter, EVENTS_PER_LOOP)
|
||||
ir.doIteration(0)
|
||||
self.assertEqual(fd.counter, EVENTS_PER_LOOP + 1)
|
||||
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted.internet.main}.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet.error import ReactorAlreadyInstalledError
|
||||
from twisted.internet.main import installReactor
|
||||
|
||||
from twisted.internet.test.modulehelpers import NoReactor
|
||||
|
||||
|
||||
class InstallReactorTests(unittest.SynchronousTestCase):
|
||||
"""
|
||||
Tests for L{installReactor}.
|
||||
"""
|
||||
|
||||
def test_installReactor(self):
|
||||
"""
|
||||
L{installReactor} installs a new reactor if none is present.
|
||||
"""
|
||||
with NoReactor():
|
||||
newReactor = object()
|
||||
installReactor(newReactor)
|
||||
from twisted.internet import reactor
|
||||
self.assertIs(newReactor, reactor)
|
||||
|
||||
|
||||
def test_alreadyInstalled(self):
|
||||
"""
|
||||
If a reactor is already installed, L{installReactor} raises
|
||||
L{ReactorAlreadyInstalledError}.
|
||||
"""
|
||||
with NoReactor():
|
||||
installReactor(object())
|
||||
self.assertRaises(ReactorAlreadyInstalledError, installReactor,
|
||||
object())
|
||||
|
||||
|
||||
def test_errorIsAnAssertionError(self):
|
||||
"""
|
||||
For backwards compatibility, L{ReactorAlreadyInstalledError} is an
|
||||
L{AssertionError}.
|
||||
"""
|
||||
self.assertTrue(issubclass(ReactorAlreadyInstalledError,
|
||||
AssertionError))
|
||||
|
|
@ -0,0 +1,197 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted.internet._newtls}.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet.test.reactormixins import ReactorBuilder
|
||||
from twisted.internet.test.connectionmixins import (
|
||||
ConnectableProtocol, runProtocolsWithReactor)
|
||||
from twisted.internet.test.test_tls import SSLCreator, TLSMixin
|
||||
from twisted.internet.test.test_tls import StartTLSClientCreator
|
||||
from twisted.internet.test.test_tls import ContextGeneratingMixin
|
||||
from twisted.internet.test.test_tcp import TCPCreator
|
||||
try:
|
||||
from twisted.protocols import tls
|
||||
from twisted.internet import _newtls
|
||||
except ImportError:
|
||||
_newtls = None
|
||||
|
||||
|
||||
class BypassTLSTests(unittest.TestCase):
|
||||
"""
|
||||
Tests for the L{_newtls._BypassTLS} class.
|
||||
"""
|
||||
|
||||
if not _newtls:
|
||||
skip = "Couldn't import _newtls, perhaps pyOpenSSL is old or missing"
|
||||
|
||||
def test_loseConnectionPassThrough(self):
|
||||
"""
|
||||
C{_BypassTLS.loseConnection} calls C{loseConnection} on the base
|
||||
class, while preserving any default argument in the base class'
|
||||
C{loseConnection} implementation.
|
||||
"""
|
||||
default = object()
|
||||
result = []
|
||||
|
||||
class FakeTransport(object):
|
||||
def loseConnection(self, _connDone=default):
|
||||
result.append(_connDone)
|
||||
|
||||
bypass = _newtls._BypassTLS(FakeTransport, FakeTransport())
|
||||
|
||||
# The default from FakeTransport is used:
|
||||
bypass.loseConnection()
|
||||
self.assertEqual(result, [default])
|
||||
|
||||
# And we can pass our own:
|
||||
notDefault = object()
|
||||
bypass.loseConnection(notDefault)
|
||||
self.assertEqual(result, [default, notDefault])
|
||||
|
||||
|
||||
|
||||
class FakeProducer(object):
|
||||
"""
|
||||
A producer that does nothing.
|
||||
"""
|
||||
|
||||
def pauseProducing(self):
|
||||
pass
|
||||
|
||||
|
||||
def resumeProducing(self):
|
||||
pass
|
||||
|
||||
|
||||
def stopProducing(self):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
class ProducerProtocol(ConnectableProtocol):
|
||||
"""
|
||||
Register a producer, unregister it, and verify the producer hooks up to
|
||||
innards of C{TLSMemoryBIOProtocol}.
|
||||
"""
|
||||
|
||||
def __init__(self, producer, result):
|
||||
self.producer = producer
|
||||
self.result = result
|
||||
|
||||
|
||||
def connectionMade(self):
|
||||
if not isinstance(self.transport.protocol,
|
||||
tls.TLSMemoryBIOProtocol):
|
||||
# Either the test or the code have a bug...
|
||||
raise RuntimeError("TLSMemoryBIOProtocol not hooked up.")
|
||||
|
||||
self.transport.registerProducer(self.producer, True)
|
||||
# The producer was registered with the TLSMemoryBIOProtocol:
|
||||
self.result.append(self.transport.protocol._producer._producer)
|
||||
|
||||
self.transport.unregisterProducer()
|
||||
# The producer was unregistered from the TLSMemoryBIOProtocol:
|
||||
self.result.append(self.transport.protocol._producer)
|
||||
self.transport.loseConnection()
|
||||
|
||||
|
||||
|
||||
class ProducerTestsMixin(ReactorBuilder, TLSMixin, ContextGeneratingMixin):
|
||||
"""
|
||||
Test the new TLS code integrates C{TLSMemoryBIOProtocol} correctly.
|
||||
"""
|
||||
|
||||
if not _newtls:
|
||||
skip = "Could not import twisted.internet._newtls"
|
||||
|
||||
def test_producerSSLFromStart(self):
|
||||
"""
|
||||
C{registerProducer} and C{unregisterProducer} on TLS transports
|
||||
created as SSL from the get go are passed to the
|
||||
C{TLSMemoryBIOProtocol}, not the underlying transport directly.
|
||||
"""
|
||||
result = []
|
||||
producer = FakeProducer()
|
||||
|
||||
runProtocolsWithReactor(self, ConnectableProtocol(),
|
||||
ProducerProtocol(producer, result),
|
||||
SSLCreator())
|
||||
self.assertEqual(result, [producer, None])
|
||||
|
||||
|
||||
def test_producerAfterStartTLS(self):
|
||||
"""
|
||||
C{registerProducer} and C{unregisterProducer} on TLS transports
|
||||
created by C{startTLS} are passed to the C{TLSMemoryBIOProtocol}, not
|
||||
the underlying transport directly.
|
||||
"""
|
||||
result = []
|
||||
producer = FakeProducer()
|
||||
|
||||
runProtocolsWithReactor(self, ConnectableProtocol(),
|
||||
ProducerProtocol(producer, result),
|
||||
StartTLSClientCreator())
|
||||
self.assertEqual(result, [producer, None])
|
||||
|
||||
|
||||
def startTLSAfterRegisterProducer(self, streaming):
|
||||
"""
|
||||
When a producer is registered, and then startTLS is called,
|
||||
the producer is re-registered with the C{TLSMemoryBIOProtocol}.
|
||||
"""
|
||||
clientContext = self.getClientContext()
|
||||
serverContext = self.getServerContext()
|
||||
result = []
|
||||
producer = FakeProducer()
|
||||
|
||||
class RegisterTLSProtocol(ConnectableProtocol):
|
||||
def connectionMade(self):
|
||||
self.transport.registerProducer(producer, streaming)
|
||||
self.transport.startTLS(serverContext)
|
||||
# Store TLSMemoryBIOProtocol and underlying transport producer
|
||||
# status:
|
||||
if streaming:
|
||||
# _ProducerMembrane -> producer:
|
||||
result.append(self.transport.protocol._producer._producer)
|
||||
result.append(self.transport.producer._producer)
|
||||
else:
|
||||
# _ProducerMembrane -> _PullToPush -> producer:
|
||||
result.append(
|
||||
self.transport.protocol._producer._producer._producer)
|
||||
result.append(self.transport.producer._producer._producer)
|
||||
self.transport.unregisterProducer()
|
||||
self.transport.loseConnection()
|
||||
|
||||
class StartTLSProtocol(ConnectableProtocol):
|
||||
def connectionMade(self):
|
||||
self.transport.startTLS(clientContext)
|
||||
|
||||
runProtocolsWithReactor(self, RegisterTLSProtocol(),
|
||||
StartTLSProtocol(), TCPCreator())
|
||||
self.assertEqual(result, [producer, producer])
|
||||
|
||||
|
||||
def test_startTLSAfterRegisterProducerStreaming(self):
|
||||
"""
|
||||
When a streaming producer is registered, and then startTLS is called,
|
||||
the producer is re-registered with the C{TLSMemoryBIOProtocol}.
|
||||
"""
|
||||
self.startTLSAfterRegisterProducer(True)
|
||||
|
||||
|
||||
def test_startTLSAfterRegisterProducerNonStreaming(self):
|
||||
"""
|
||||
When a non-streaming producer is registered, and then startTLS is
|
||||
called, the producer is re-registered with the
|
||||
C{TLSMemoryBIOProtocol}.
|
||||
"""
|
||||
self.startTLSAfterRegisterProducer(False)
|
||||
|
||||
|
||||
globals().update(ProducerTestsMixin.makeTestCaseClasses())
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted.internet._pollingfile}.
|
||||
"""
|
||||
|
||||
from twisted.python.runtime import platform
|
||||
from twisted.trial.unittest import TestCase
|
||||
|
||||
if platform.isWindows():
|
||||
from twisted.internet import _pollingfile
|
||||
else:
|
||||
_pollingfile = None
|
||||
|
||||
|
||||
|
||||
class TestPollableWritePipe(TestCase):
|
||||
"""
|
||||
Tests for L{_pollingfile._PollableWritePipe}.
|
||||
"""
|
||||
|
||||
def test_writeUnicode(self):
|
||||
"""
|
||||
L{_pollingfile._PollableWritePipe.write} raises a C{TypeError} if an
|
||||
attempt is made to append unicode data to the output buffer.
|
||||
"""
|
||||
p = _pollingfile._PollableWritePipe(1, lambda: None)
|
||||
self.assertRaises(TypeError, p.write, u"test")
|
||||
|
||||
|
||||
def test_writeSequenceUnicode(self):
|
||||
"""
|
||||
L{_pollingfile._PollableWritePipe.writeSequence} raises a C{TypeError}
|
||||
if unicode data is part of the data sequence to be appended to the
|
||||
output buffer.
|
||||
"""
|
||||
p = _pollingfile._PollableWritePipe(1, lambda: None)
|
||||
self.assertRaises(TypeError, p.writeSequence, [u"test"])
|
||||
self.assertRaises(TypeError, p.writeSequence, (u"test", ))
|
||||
|
||||
|
||||
|
||||
|
||||
if _pollingfile is None:
|
||||
TestPollableWritePipe.skip = "Test will run only on Windows."
|
||||
|
|
@ -0,0 +1,320 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for L{twisted.internet.posixbase} and supporting code.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import
|
||||
|
||||
from twisted.python.compat import _PY3
|
||||
from twisted.trial.unittest import TestCase
|
||||
from twisted.internet.defer import Deferred
|
||||
from twisted.internet.posixbase import PosixReactorBase, _Waker
|
||||
from twisted.internet.protocol import ServerFactory
|
||||
|
||||
skipSockets = None
|
||||
if _PY3:
|
||||
skipSockets = "Re-enable when Python 3 port supports AF_UNIX"
|
||||
else:
|
||||
try:
|
||||
from twisted.internet import unix
|
||||
from twisted.test.test_unix import ClientProto
|
||||
except ImportError:
|
||||
skipSockets = "Platform does not support AF_UNIX sockets"
|
||||
|
||||
from twisted.internet.tcp import Port
|
||||
from twisted.internet import reactor
|
||||
|
||||
|
||||
|
||||
|
||||
class TrivialReactor(PosixReactorBase):
|
||||
def __init__(self):
|
||||
self._readers = {}
|
||||
self._writers = {}
|
||||
PosixReactorBase.__init__(self)
|
||||
|
||||
|
||||
def addReader(self, reader):
|
||||
self._readers[reader] = True
|
||||
|
||||
|
||||
def removeReader(self, reader):
|
||||
del self._readers[reader]
|
||||
|
||||
|
||||
def addWriter(self, writer):
|
||||
self._writers[writer] = True
|
||||
|
||||
|
||||
def removeWriter(self, writer):
|
||||
del self._writers[writer]
|
||||
|
||||
|
||||
|
||||
class PosixReactorBaseTests(TestCase):
|
||||
"""
|
||||
Tests for L{PosixReactorBase}.
|
||||
"""
|
||||
|
||||
def _checkWaker(self, reactor):
|
||||
self.assertIsInstance(reactor.waker, _Waker)
|
||||
self.assertIn(reactor.waker, reactor._internalReaders)
|
||||
self.assertIn(reactor.waker, reactor._readers)
|
||||
|
||||
|
||||
def test_wakerIsInternalReader(self):
|
||||
"""
|
||||
When L{PosixReactorBase} is instantiated, it creates a waker and adds
|
||||
it to its internal readers set.
|
||||
"""
|
||||
reactor = TrivialReactor()
|
||||
self._checkWaker(reactor)
|
||||
|
||||
|
||||
def test_removeAllSkipsInternalReaders(self):
|
||||
"""
|
||||
Any L{IReadDescriptors} in L{PosixReactorBase._internalReaders} are
|
||||
left alone by L{PosixReactorBase._removeAll}.
|
||||
"""
|
||||
reactor = TrivialReactor()
|
||||
extra = object()
|
||||
reactor._internalReaders.add(extra)
|
||||
reactor.addReader(extra)
|
||||
reactor._removeAll(reactor._readers, reactor._writers)
|
||||
self._checkWaker(reactor)
|
||||
self.assertIn(extra, reactor._internalReaders)
|
||||
self.assertIn(extra, reactor._readers)
|
||||
|
||||
|
||||
def test_removeAllReturnsRemovedDescriptors(self):
|
||||
"""
|
||||
L{PosixReactorBase._removeAll} returns a list of removed
|
||||
L{IReadDescriptor} and L{IWriteDescriptor} objects.
|
||||
"""
|
||||
reactor = TrivialReactor()
|
||||
reader = object()
|
||||
writer = object()
|
||||
reactor.addReader(reader)
|
||||
reactor.addWriter(writer)
|
||||
removed = reactor._removeAll(
|
||||
reactor._readers, reactor._writers)
|
||||
self.assertEqual(set(removed), set([reader, writer]))
|
||||
self.assertNotIn(reader, reactor._readers)
|
||||
self.assertNotIn(writer, reactor._writers)
|
||||
|
||||
|
||||
|
||||
class TCPPortTests(TestCase):
|
||||
"""
|
||||
Tests for L{twisted.internet.tcp.Port}.
|
||||
"""
|
||||
|
||||
if not isinstance(reactor, PosixReactorBase):
|
||||
skip = "Non-posixbase reactor"
|
||||
|
||||
def test_connectionLostFailed(self):
|
||||
"""
|
||||
L{Port.stopListening} returns a L{Deferred} which errbacks if
|
||||
L{Port.connectionLost} raises an exception.
|
||||
"""
|
||||
port = Port(12345, ServerFactory())
|
||||
port.connected = True
|
||||
port.connectionLost = lambda reason: 1 // 0
|
||||
return self.assertFailure(port.stopListening(), ZeroDivisionError)
|
||||
|
||||
|
||||
|
||||
class TimeoutReportReactor(PosixReactorBase):
|
||||
"""
|
||||
A reactor which is just barely runnable and which cannot monitor any
|
||||
readers or writers, and which fires a L{Deferred} with the timeout
|
||||
passed to its C{doIteration} method as soon as that method is invoked.
|
||||
"""
|
||||
def __init__(self):
|
||||
PosixReactorBase.__init__(self)
|
||||
self.iterationTimeout = Deferred()
|
||||
self.now = 100
|
||||
|
||||
|
||||
def addReader(self, reader):
|
||||
"""
|
||||
Ignore the reader. This is necessary because the waker will be
|
||||
added. However, we won't actually monitor it for any events.
|
||||
"""
|
||||
|
||||
|
||||
def removeAll(self):
|
||||
"""
|
||||
There are no readers or writers, so there is nothing to remove.
|
||||
This will be called when the reactor stops, though, so it must be
|
||||
implemented.
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
def seconds(self):
|
||||
"""
|
||||
Override the real clock with a deterministic one that can be easily
|
||||
controlled in a unit test.
|
||||
"""
|
||||
return self.now
|
||||
|
||||
|
||||
def doIteration(self, timeout):
|
||||
d = self.iterationTimeout
|
||||
if d is not None:
|
||||
self.iterationTimeout = None
|
||||
d.callback(timeout)
|
||||
|
||||
|
||||
|
||||
class IterationTimeoutTests(TestCase):
|
||||
"""
|
||||
Tests for the timeout argument L{PosixReactorBase.run} calls
|
||||
L{PosixReactorBase.doIteration} with in the presence of various delayed
|
||||
calls.
|
||||
"""
|
||||
def _checkIterationTimeout(self, reactor):
|
||||
timeout = []
|
||||
reactor.iterationTimeout.addCallback(timeout.append)
|
||||
reactor.iterationTimeout.addCallback(lambda ignored: reactor.stop())
|
||||
reactor.run()
|
||||
return timeout[0]
|
||||
|
||||
|
||||
def test_noCalls(self):
|
||||
"""
|
||||
If there are no delayed calls, C{doIteration} is called with a
|
||||
timeout of C{None}.
|
||||
"""
|
||||
reactor = TimeoutReportReactor()
|
||||
timeout = self._checkIterationTimeout(reactor)
|
||||
self.assertEqual(timeout, None)
|
||||
|
||||
|
||||
def test_delayedCall(self):
|
||||
"""
|
||||
If there is a delayed call, C{doIteration} is called with a timeout
|
||||
which is the difference between the current time and the time at
|
||||
which that call is to run.
|
||||
"""
|
||||
reactor = TimeoutReportReactor()
|
||||
reactor.callLater(100, lambda: None)
|
||||
timeout = self._checkIterationTimeout(reactor)
|
||||
self.assertEqual(timeout, 100)
|
||||
|
||||
|
||||
def test_timePasses(self):
|
||||
"""
|
||||
If a delayed call is scheduled and then some time passes, the
|
||||
timeout passed to C{doIteration} is reduced by the amount of time
|
||||
which passed.
|
||||
"""
|
||||
reactor = TimeoutReportReactor()
|
||||
reactor.callLater(100, lambda: None)
|
||||
reactor.now += 25
|
||||
timeout = self._checkIterationTimeout(reactor)
|
||||
self.assertEqual(timeout, 75)
|
||||
|
||||
|
||||
def test_multipleDelayedCalls(self):
|
||||
"""
|
||||
If there are several delayed calls, C{doIteration} is called with a
|
||||
timeout which is the difference between the current time and the
|
||||
time at which the earlier of the two calls is to run.
|
||||
"""
|
||||
reactor = TimeoutReportReactor()
|
||||
reactor.callLater(50, lambda: None)
|
||||
reactor.callLater(10, lambda: None)
|
||||
reactor.callLater(100, lambda: None)
|
||||
timeout = self._checkIterationTimeout(reactor)
|
||||
self.assertEqual(timeout, 10)
|
||||
|
||||
|
||||
def test_resetDelayedCall(self):
|
||||
"""
|
||||
If a delayed call is reset, the timeout passed to C{doIteration} is
|
||||
based on the interval between the time when reset is called and the
|
||||
new delay of the call.
|
||||
"""
|
||||
reactor = TimeoutReportReactor()
|
||||
call = reactor.callLater(50, lambda: None)
|
||||
reactor.now += 25
|
||||
call.reset(15)
|
||||
timeout = self._checkIterationTimeout(reactor)
|
||||
self.assertEqual(timeout, 15)
|
||||
|
||||
|
||||
def test_delayDelayedCall(self):
|
||||
"""
|
||||
If a delayed call is re-delayed, the timeout passed to
|
||||
C{doIteration} is based on the remaining time before the call would
|
||||
have been made and the additional amount of time passed to the delay
|
||||
method.
|
||||
"""
|
||||
reactor = TimeoutReportReactor()
|
||||
call = reactor.callLater(50, lambda: None)
|
||||
reactor.now += 10
|
||||
call.delay(20)
|
||||
timeout = self._checkIterationTimeout(reactor)
|
||||
self.assertEqual(timeout, 60)
|
||||
|
||||
|
||||
def test_cancelDelayedCall(self):
|
||||
"""
|
||||
If the only delayed call is canceled, C{None} is the timeout passed
|
||||
to C{doIteration}.
|
||||
"""
|
||||
reactor = TimeoutReportReactor()
|
||||
call = reactor.callLater(50, lambda: None)
|
||||
call.cancel()
|
||||
timeout = self._checkIterationTimeout(reactor)
|
||||
self.assertEqual(timeout, None)
|
||||
|
||||
|
||||
|
||||
class ConnectedDatagramPortTestCase(TestCase):
|
||||
"""
|
||||
Test connected datagram UNIX sockets.
|
||||
"""
|
||||
if skipSockets is not None:
|
||||
skip = skipSockets
|
||||
|
||||
|
||||
def test_connectionFailedDoesntCallLoseConnection(self):
|
||||
"""
|
||||
L{ConnectedDatagramPort} does not call the deprecated C{loseConnection}
|
||||
in L{ConnectedDatagramPort.connectionFailed}.
|
||||
"""
|
||||
def loseConnection():
|
||||
"""
|
||||
Dummy C{loseConnection} method. C{loseConnection} is deprecated and
|
||||
should not get called.
|
||||
"""
|
||||
self.fail("loseConnection is deprecated and should not get called.")
|
||||
|
||||
port = unix.ConnectedDatagramPort(None, ClientProto())
|
||||
port.loseConnection = loseConnection
|
||||
port.connectionFailed("goodbye")
|
||||
|
||||
|
||||
def test_connectionFailedCallsStopListening(self):
|
||||
"""
|
||||
L{ConnectedDatagramPort} calls L{ConnectedDatagramPort.stopListening}
|
||||
instead of the deprecated C{loseConnection} in
|
||||
L{ConnectedDatagramPort.connectionFailed}.
|
||||
"""
|
||||
self.called = False
|
||||
|
||||
def stopListening():
|
||||
"""
|
||||
Dummy C{stopListening} method.
|
||||
"""
|
||||
self.called = True
|
||||
|
||||
port = unix.ConnectedDatagramPort(None, ClientProto())
|
||||
port.stopListening = stopListening
|
||||
port.connectionFailed("goodbye")
|
||||
self.assertEqual(self.called, True)
|
||||
|
|
@ -0,0 +1,340 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for POSIX-based L{IReactorProcess} implementations.
|
||||
"""
|
||||
|
||||
import errno, os, sys
|
||||
|
||||
try:
|
||||
import fcntl
|
||||
except ImportError:
|
||||
platformSkip = "non-POSIX platform"
|
||||
else:
|
||||
from twisted.internet import process
|
||||
platformSkip = None
|
||||
|
||||
from twisted.trial.unittest import TestCase
|
||||
|
||||
|
||||
class FakeFile(object):
|
||||
"""
|
||||
A dummy file object which records when it is closed.
|
||||
"""
|
||||
def __init__(self, testcase, fd):
|
||||
self.testcase = testcase
|
||||
self.fd = fd
|
||||
|
||||
|
||||
def close(self):
|
||||
self.testcase._files.remove(self.fd)
|
||||
|
||||
|
||||
|
||||
class FakeResourceModule(object):
|
||||
"""
|
||||
Fake version of L{resource} which hard-codes a particular rlimit for maximum
|
||||
open files.
|
||||
|
||||
@ivar _limit: The value to return for the hard limit of number of open files.
|
||||
"""
|
||||
RLIMIT_NOFILE = 1
|
||||
|
||||
def __init__(self, limit):
|
||||
self._limit = limit
|
||||
|
||||
|
||||
def getrlimit(self, no):
|
||||
"""
|
||||
A fake of L{resource.getrlimit} which returns a pre-determined result.
|
||||
"""
|
||||
if no == self.RLIMIT_NOFILE:
|
||||
return [0, self._limit]
|
||||
return [123, 456]
|
||||
|
||||
|
||||
|
||||
class FDDetectorTests(TestCase):
|
||||
"""
|
||||
Tests for _FDDetector class in twisted.internet.process, which detects
|
||||
which function to drop in place for the _listOpenFDs method.
|
||||
|
||||
@ivar devfs: A flag indicating whether the filesystem fake will indicate
|
||||
that /dev/fd exists.
|
||||
|
||||
@ivar accurateDevFDResults: A flag indicating whether the /dev/fd fake
|
||||
returns accurate open file information.
|
||||
|
||||
@ivar procfs: A flag indicating whether the filesystem fake will indicate
|
||||
that /proc/<pid>/fd exists.
|
||||
"""
|
||||
skip = platformSkip
|
||||
|
||||
devfs = False
|
||||
accurateDevFDResults = False
|
||||
|
||||
procfs = False
|
||||
|
||||
def getpid(self):
|
||||
"""
|
||||
Fake os.getpid, always return the same thing
|
||||
"""
|
||||
return 123
|
||||
|
||||
|
||||
def listdir(self, arg):
|
||||
"""
|
||||
Fake os.listdir, depending on what mode we're in to simulate behaviour.
|
||||
|
||||
@param arg: the directory to list
|
||||
"""
|
||||
accurate = map(str, self._files)
|
||||
if self.procfs and arg == ('/proc/%d/fd' % (self.getpid(),)):
|
||||
return accurate
|
||||
if self.devfs and arg == '/dev/fd':
|
||||
if self.accurateDevFDResults:
|
||||
return accurate
|
||||
return ["0", "1", "2"]
|
||||
raise OSError()
|
||||
|
||||
|
||||
def openfile(self, fname, mode):
|
||||
"""
|
||||
This is a mock for L{open}. It keeps track of opened files so extra
|
||||
descriptors can be returned from the mock for L{os.listdir} when used on
|
||||
one of the list-of-filedescriptors directories.
|
||||
|
||||
A L{FakeFile} is returned which can be closed to remove the new
|
||||
descriptor from the open list.
|
||||
"""
|
||||
# Find the smallest unused file descriptor and give it to the new file.
|
||||
f = FakeFile(self, min(set(range(1024)) - set(self._files)))
|
||||
self._files.append(f.fd)
|
||||
return f
|
||||
|
||||
|
||||
def hideResourceModule(self):
|
||||
"""
|
||||
Make the L{resource} module unimportable for the remainder of the
|
||||
current test method.
|
||||
"""
|
||||
sys.modules['resource'] = None
|
||||
|
||||
|
||||
def revealResourceModule(self, limit):
|
||||
"""
|
||||
Make a L{FakeResourceModule} instance importable at the L{resource}
|
||||
name.
|
||||
|
||||
@param limit: The value which will be returned for the hard limit of
|
||||
number of open files by the fake resource module's C{getrlimit}
|
||||
function.
|
||||
"""
|
||||
sys.modules['resource'] = FakeResourceModule(limit)
|
||||
|
||||
|
||||
def replaceResourceModule(self, value):
|
||||
"""
|
||||
Restore the original resource module to L{sys.modules}.
|
||||
"""
|
||||
if value is None:
|
||||
try:
|
||||
del sys.modules['resource']
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
sys.modules['resource'] = value
|
||||
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Set up the tests, giving ourselves a detector object to play with and
|
||||
setting up its testable knobs to refer to our mocked versions.
|
||||
"""
|
||||
self.detector = process._FDDetector()
|
||||
self.detector.listdir = self.listdir
|
||||
self.detector.getpid = self.getpid
|
||||
self.detector.openfile = self.openfile
|
||||
self._files = [0, 1, 2]
|
||||
self.addCleanup(
|
||||
self.replaceResourceModule, sys.modules.get('resource'))
|
||||
|
||||
|
||||
def test_selectFirstWorking(self):
|
||||
"""
|
||||
L{FDDetector._getImplementation} returns the first method from its
|
||||
C{_implementations} list which returns results which reflect a newly
|
||||
opened file descriptor.
|
||||
"""
|
||||
def failWithException():
|
||||
raise ValueError("This does not work")
|
||||
|
||||
def failWithWrongResults():
|
||||
return [0, 1, 2]
|
||||
|
||||
def correct():
|
||||
return self._files[:]
|
||||
|
||||
self.detector._implementations = [
|
||||
failWithException, failWithWrongResults, correct]
|
||||
|
||||
self.assertIs(correct, self.detector._getImplementation())
|
||||
|
||||
|
||||
def test_selectLast(self):
|
||||
"""
|
||||
L{FDDetector._getImplementation} returns the last method from its
|
||||
C{_implementations} list if none of the implementations manage to return
|
||||
results which reflect a newly opened file descriptor.
|
||||
"""
|
||||
def failWithWrongResults():
|
||||
return [3, 5, 9]
|
||||
|
||||
def failWithOtherWrongResults():
|
||||
return [0, 1, 2]
|
||||
|
||||
self.detector._implementations = [
|
||||
failWithWrongResults, failWithOtherWrongResults]
|
||||
|
||||
self.assertIs(
|
||||
failWithOtherWrongResults, self.detector._getImplementation())
|
||||
|
||||
|
||||
def test_identityOfListOpenFDsChanges(self):
|
||||
"""
|
||||
Check that the identity of _listOpenFDs changes after running
|
||||
_listOpenFDs the first time, but not after the second time it's run.
|
||||
|
||||
In other words, check that the monkey patching actually works.
|
||||
"""
|
||||
# Create a new instance
|
||||
detector = process._FDDetector()
|
||||
|
||||
first = detector._listOpenFDs.func_name
|
||||
detector._listOpenFDs()
|
||||
second = detector._listOpenFDs.func_name
|
||||
detector._listOpenFDs()
|
||||
third = detector._listOpenFDs.func_name
|
||||
|
||||
self.assertNotEqual(first, second)
|
||||
self.assertEqual(second, third)
|
||||
|
||||
|
||||
def test_devFDImplementation(self):
|
||||
"""
|
||||
L{_FDDetector._devFDImplementation} raises L{OSError} if there is no
|
||||
I{/dev/fd} directory, otherwise it returns the basenames of its children
|
||||
interpreted as integers.
|
||||
"""
|
||||
self.devfs = False
|
||||
self.assertRaises(OSError, self.detector._devFDImplementation)
|
||||
self.devfs = True
|
||||
self.accurateDevFDResults = False
|
||||
self.assertEqual([0, 1, 2], self.detector._devFDImplementation())
|
||||
|
||||
|
||||
def test_procFDImplementation(self):
|
||||
"""
|
||||
L{_FDDetector._procFDImplementation} raises L{OSError} if there is no
|
||||
I{/proc/<pid>/fd} directory, otherwise it returns the basenames of its
|
||||
children interpreted as integers.
|
||||
"""
|
||||
self.procfs = False
|
||||
self.assertRaises(OSError, self.detector._procFDImplementation)
|
||||
self.procfs = True
|
||||
self.assertEqual([0, 1, 2], self.detector._procFDImplementation())
|
||||
|
||||
|
||||
def test_resourceFDImplementation(self):
|
||||
"""
|
||||
L{_FDDetector._fallbackFDImplementation} uses the L{resource} module if
|
||||
it is available, returning a range of integers from 0 to the the
|
||||
minimum of C{1024} and the hard I{NOFILE} limit.
|
||||
"""
|
||||
# When the resource module is here, use its value.
|
||||
self.revealResourceModule(512)
|
||||
self.assertEqual(
|
||||
range(512), self.detector._fallbackFDImplementation())
|
||||
|
||||
# But limit its value to the arbitrarily selected value 1024.
|
||||
self.revealResourceModule(2048)
|
||||
self.assertEqual(
|
||||
range(1024), self.detector._fallbackFDImplementation())
|
||||
|
||||
|
||||
def test_fallbackFDImplementation(self):
|
||||
"""
|
||||
L{_FDDetector._fallbackFDImplementation}, the implementation of last
|
||||
resort, succeeds with a fixed range of integers from 0 to 1024 when the
|
||||
L{resource} module is not importable.
|
||||
"""
|
||||
self.hideResourceModule()
|
||||
self.assertEqual(range(1024), self.detector._fallbackFDImplementation())
|
||||
|
||||
|
||||
|
||||
class FileDescriptorTests(TestCase):
|
||||
"""
|
||||
Tests for L{twisted.internet.process._listOpenFDs}
|
||||
"""
|
||||
skip = platformSkip
|
||||
|
||||
def test_openFDs(self):
|
||||
"""
|
||||
File descriptors returned by L{_listOpenFDs} are mostly open.
|
||||
|
||||
This test assumes that zero-legth writes fail with EBADF on closed
|
||||
file descriptors.
|
||||
"""
|
||||
for fd in process._listOpenFDs():
|
||||
try:
|
||||
fcntl.fcntl(fd, fcntl.F_GETFL)
|
||||
except IOError, err:
|
||||
self.assertEqual(
|
||||
errno.EBADF, err.errno,
|
||||
"fcntl(%d, F_GETFL) failed with unexpected errno %d" % (
|
||||
fd, err.errno))
|
||||
|
||||
|
||||
def test_expectedFDs(self):
|
||||
"""
|
||||
L{_listOpenFDs} lists expected file descriptors.
|
||||
"""
|
||||
# This is a tricky test. A priori, there is no way to know what file
|
||||
# descriptors are open now, so there is no way to know what _listOpenFDs
|
||||
# should return. Work around this by creating some new file descriptors
|
||||
# which we can know the state of and then just making assertions about
|
||||
# their presence or absence in the result.
|
||||
|
||||
# Expect a file we just opened to be listed.
|
||||
f = file(os.devnull)
|
||||
openfds = process._listOpenFDs()
|
||||
self.assertIn(f.fileno(), openfds)
|
||||
|
||||
# Expect a file we just closed not to be listed - with a caveat. The
|
||||
# implementation may need to open a file to discover the result. That
|
||||
# open file descriptor will be allocated the same number as the one we
|
||||
# just closed. So, instead, create a hole in the file descriptor space
|
||||
# to catch that internal descriptor and make the assertion about a
|
||||
# different closed file descriptor.
|
||||
|
||||
# This gets allocated a file descriptor larger than f's, since nothing
|
||||
# has been closed since we opened f.
|
||||
fd = os.dup(f.fileno())
|
||||
|
||||
# But sanity check that; if it fails the test is invalid.
|
||||
self.assertTrue(
|
||||
fd > f.fileno(),
|
||||
"Expected duplicate file descriptor to be greater than original")
|
||||
|
||||
try:
|
||||
# Get rid of the original, creating the hole. The copy should still
|
||||
# be open, of course.
|
||||
f.close()
|
||||
self.assertIn(fd, process._listOpenFDs())
|
||||
finally:
|
||||
# Get rid of the copy now
|
||||
os.close(fd)
|
||||
# And it should not appear in the result.
|
||||
self.assertNotIn(fd, process._listOpenFDs())
|
||||
|
|
@ -0,0 +1,791 @@
|
|||
# Copyright (c) Twisted Matrix Laboratories.
|
||||
# See LICENSE for details.
|
||||
|
||||
"""
|
||||
Tests for implementations of L{IReactorProcess}.
|
||||
"""
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
import os, sys, signal, threading
|
||||
|
||||
from twisted.trial.unittest import TestCase
|
||||
from twisted.internet.test.reactormixins import ReactorBuilder
|
||||
from twisted.python.log import msg, err
|
||||
from twisted.python.runtime import platform, platformType
|
||||
from twisted.python.filepath import FilePath
|
||||
from twisted.internet import utils
|
||||
from twisted.internet.interfaces import IReactorProcess, IProcessTransport
|
||||
from twisted.internet.defer import Deferred, succeed
|
||||
from twisted.internet.protocol import ProcessProtocol
|
||||
from twisted.internet.error import ProcessDone, ProcessTerminated
|
||||
|
||||
_uidgidSkip = None
|
||||
if platform.isWindows():
|
||||
process = None
|
||||
_uidgidSkip = "Cannot change UID/GID on Windows"
|
||||
else:
|
||||
from twisted.internet import process
|
||||
if os.getuid() != 0:
|
||||
_uidgidSkip = "Cannot change UID/GID except as root"
|
||||
|
||||
|
||||
class _ShutdownCallbackProcessProtocol(ProcessProtocol):
|
||||
"""
|
||||
An L{IProcessProtocol} which fires a Deferred when the process it is
|
||||
associated with ends.
|
||||
|
||||
@ivar received: A C{dict} mapping file descriptors to lists of bytes
|
||||
received from the child process on those file descriptors.
|
||||
"""
|
||||
def __init__(self, whenFinished):
|
||||
self.whenFinished = whenFinished
|
||||
self.received = {}
|
||||
|
||||
|
||||
def childDataReceived(self, fd, bytes):
|
||||
self.received.setdefault(fd, []).append(bytes)
|
||||
|
||||
|
||||
def processEnded(self, reason):
|
||||
self.whenFinished.callback(None)
|
||||
|
||||
|
||||
|
||||
class ProcessTestsBuilderBase(ReactorBuilder):
|
||||
"""
|
||||
Base class for L{IReactorProcess} tests which defines some tests which
|
||||
can be applied to PTY or non-PTY uses of C{spawnProcess}.
|
||||
|
||||
Subclasses are expected to set the C{usePTY} attribute to C{True} or
|
||||
C{False}.
|
||||
"""
|
||||
requiredInterfaces = [IReactorProcess]
|
||||
|
||||
|
||||
def test_processTransportInterface(self):
|
||||
"""
|
||||
L{IReactorProcess.spawnProcess} connects the protocol passed to it
|
||||
to a transport which provides L{IProcessTransport}.
|
||||
"""
|
||||
ended = Deferred()
|
||||
protocol = _ShutdownCallbackProcessProtocol(ended)
|
||||
|
||||
reactor = self.buildReactor()
|
||||
transport = reactor.spawnProcess(
|
||||
protocol, sys.executable, [sys.executable, "-c", ""],
|
||||
usePTY=self.usePTY)
|
||||
|
||||
# The transport is available synchronously, so we can check it right
|
||||
# away (unlike many transport-based tests). This is convenient even
|
||||
# though it's probably not how the spawnProcess interface should really
|
||||
# work.
|
||||
# We're not using verifyObject here because part of
|
||||
# IProcessTransport is a lie - there are no getHost or getPeer
|
||||
# methods. See #1124.
|
||||
self.assertTrue(IProcessTransport.providedBy(transport))
|
||||
|
||||
# Let the process run and exit so we don't leave a zombie around.
|
||||
ended.addCallback(lambda ignored: reactor.stop())
|
||||
self.runReactor(reactor)
|
||||
|
||||
|
||||
def _writeTest(self, write):
|
||||
"""
|
||||
Helper for testing L{IProcessTransport} write functionality. This
|
||||
method spawns a child process and gives C{write} a chance to write some
|
||||
bytes to it. It then verifies that the bytes were actually written to
|
||||
it (by relying on the child process to echo them back).
|
||||
|
||||
@param write: A two-argument callable. This is invoked with a process
|
||||
transport and some bytes to write to it.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
|
||||
ended = Deferred()
|
||||
protocol = _ShutdownCallbackProcessProtocol(ended)
|
||||
|
||||
bytes = "hello, world" + os.linesep
|
||||
program = (
|
||||
"import sys\n"
|
||||
"sys.stdout.write(sys.stdin.readline())\n"
|
||||
)
|
||||
|
||||
def startup():
|
||||
transport = reactor.spawnProcess(
|
||||
protocol, sys.executable, [sys.executable, "-c", program])
|
||||
try:
|
||||
write(transport, bytes)
|
||||
except:
|
||||
err(None, "Unhandled exception while writing")
|
||||
transport.signalProcess('KILL')
|
||||
reactor.callWhenRunning(startup)
|
||||
|
||||
ended.addCallback(lambda ignored: reactor.stop())
|
||||
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(bytes, "".join(protocol.received[1]))
|
||||
|
||||
|
||||
def test_write(self):
|
||||
"""
|
||||
L{IProcessTransport.write} writes the specified C{str} to the standard
|
||||
input of the child process.
|
||||
"""
|
||||
def write(transport, bytes):
|
||||
transport.write(bytes)
|
||||
self._writeTest(write)
|
||||
|
||||
|
||||
def test_writeSequence(self):
|
||||
"""
|
||||
L{IProcessTransport.writeSequence} writes the specified C{list} of
|
||||
C{str} to the standard input of the child process.
|
||||
"""
|
||||
def write(transport, bytes):
|
||||
transport.writeSequence(list(bytes))
|
||||
self._writeTest(write)
|
||||
|
||||
|
||||
def test_writeToChild(self):
|
||||
"""
|
||||
L{IProcessTransport.writeToChild} writes the specified C{str} to the
|
||||
specified file descriptor of the child process.
|
||||
"""
|
||||
def write(transport, bytes):
|
||||
transport.writeToChild(0, bytes)
|
||||
self._writeTest(write)
|
||||
|
||||
|
||||
def test_writeToChildBadFileDescriptor(self):
|
||||
"""
|
||||
L{IProcessTransport.writeToChild} raises L{KeyError} if passed a file
|
||||
descriptor which is was not set up by L{IReactorProcess.spawnProcess}.
|
||||
"""
|
||||
def write(transport, bytes):
|
||||
try:
|
||||
self.assertRaises(KeyError, transport.writeToChild, 13, bytes)
|
||||
finally:
|
||||
# Just get the process to exit so the test can complete
|
||||
transport.write(bytes)
|
||||
self._writeTest(write)
|
||||
|
||||
|
||||
def test_spawnProcessEarlyIsReaped(self):
|
||||
"""
|
||||
If, before the reactor is started with L{IReactorCore.run}, a
|
||||
process is started with L{IReactorProcess.spawnProcess} and
|
||||
terminates, the process is reaped once the reactor is started.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
|
||||
# Create the process with no shared file descriptors, so that there
|
||||
# are no other events for the reactor to notice and "cheat" with.
|
||||
# We want to be sure it's really dealing with the process exiting,
|
||||
# not some associated event.
|
||||
if self.usePTY:
|
||||
childFDs = None
|
||||
else:
|
||||
childFDs = {}
|
||||
|
||||
# Arrange to notice the SIGCHLD.
|
||||
signaled = threading.Event()
|
||||
def handler(*args):
|
||||
signaled.set()
|
||||
signal.signal(signal.SIGCHLD, handler)
|
||||
|
||||
# Start a process - before starting the reactor!
|
||||
ended = Deferred()
|
||||
reactor.spawnProcess(
|
||||
_ShutdownCallbackProcessProtocol(ended), sys.executable,
|
||||
[sys.executable, "-c", ""], usePTY=self.usePTY, childFDs=childFDs)
|
||||
|
||||
# Wait for the SIGCHLD (which might have been delivered before we got
|
||||
# here, but that's okay because the signal handler was installed above,
|
||||
# before we could have gotten it).
|
||||
signaled.wait(120)
|
||||
if not signaled.isSet():
|
||||
self.fail("Timed out waiting for child process to exit.")
|
||||
|
||||
# Capture the processEnded callback.
|
||||
result = []
|
||||
ended.addCallback(result.append)
|
||||
|
||||
if result:
|
||||
# The synchronous path through spawnProcess / Process.__init__ /
|
||||
# registerReapProcessHandler was encountered. There's no reason to
|
||||
# start the reactor, because everything is done already.
|
||||
return
|
||||
|
||||
# Otherwise, though, start the reactor so it can tell us the process
|
||||
# exited.
|
||||
ended.addCallback(lambda ignored: reactor.stop())
|
||||
self.runReactor(reactor)
|
||||
|
||||
# Make sure the reactor stopped because the Deferred fired.
|
||||
self.assertTrue(result)
|
||||
|
||||
if getattr(signal, 'SIGCHLD', None) is None:
|
||||
test_spawnProcessEarlyIsReaped.skip = (
|
||||
"Platform lacks SIGCHLD, early-spawnProcess test can't work.")
|
||||
|
||||
|
||||
def test_processExitedWithSignal(self):
|
||||
"""
|
||||
The C{reason} argument passed to L{IProcessProtocol.processExited} is a
|
||||
L{ProcessTerminated} instance if the child process exits with a signal.
|
||||
"""
|
||||
sigName = 'TERM'
|
||||
sigNum = getattr(signal, 'SIG' + sigName)
|
||||
exited = Deferred()
|
||||
source = (
|
||||
"import sys\n"
|
||||
# Talk so the parent process knows the process is running. This is
|
||||
# necessary because ProcessProtocol.makeConnection may be called
|
||||
# before this process is exec'd. It would be unfortunate if we
|
||||
# SIGTERM'd the Twisted process while it was on its way to doing
|
||||
# the exec.
|
||||
"sys.stdout.write('x')\n"
|
||||
"sys.stdout.flush()\n"
|
||||
"sys.stdin.read()\n")
|
||||
|
||||
class Exiter(ProcessProtocol):
|
||||
def childDataReceived(self, fd, data):
|
||||
msg('childDataReceived(%d, %r)' % (fd, data))
|
||||
self.transport.signalProcess(sigName)
|
||||
|
||||
def childConnectionLost(self, fd):
|
||||
msg('childConnectionLost(%d)' % (fd,))
|
||||
|
||||
def processExited(self, reason):
|
||||
msg('processExited(%r)' % (reason,))
|
||||
# Protect the Deferred from the failure so that it follows
|
||||
# the callback chain. This doesn't use the errback chain
|
||||
# because it wants to make sure reason is a Failure. An
|
||||
# Exception would also make an errback-based test pass, and
|
||||
# that would be wrong.
|
||||
exited.callback([reason])
|
||||
|
||||
def processEnded(self, reason):
|
||||
msg('processEnded(%r)' % (reason,))
|
||||
|
||||
reactor = self.buildReactor()
|
||||
reactor.callWhenRunning(
|
||||
reactor.spawnProcess, Exiter(), sys.executable,
|
||||
[sys.executable, "-c", source], usePTY=self.usePTY)
|
||||
|
||||
def cbExited((failure,)):
|
||||
# Trapping implicitly verifies that it's a Failure (rather than
|
||||
# an exception) and explicitly makes sure it's the right type.
|
||||
failure.trap(ProcessTerminated)
|
||||
err = failure.value
|
||||
if platform.isWindows():
|
||||
# Windows can't really /have/ signals, so it certainly can't
|
||||
# report them as the reason for termination. Maybe there's
|
||||
# something better we could be doing here, anyway? Hard to
|
||||
# say. Anyway, this inconsistency between different platforms
|
||||
# is extremely unfortunate and I would remove it if I
|
||||
# could. -exarkun
|
||||
self.assertIs(err.signal, None)
|
||||
self.assertEqual(err.exitCode, 1)
|
||||
else:
|
||||
self.assertEqual(err.signal, sigNum)
|
||||
self.assertIs(err.exitCode, None)
|
||||
|
||||
exited.addCallback(cbExited)
|
||||
exited.addErrback(err)
|
||||
exited.addCallback(lambda ign: reactor.stop())
|
||||
|
||||
self.runReactor(reactor)
|
||||
|
||||
|
||||
def test_systemCallUninterruptedByChildExit(self):
|
||||
"""
|
||||
If a child process exits while a system call is in progress, the system
|
||||
call should not be interfered with. In particular, it should not fail
|
||||
with EINTR.
|
||||
|
||||
Older versions of Twisted installed a SIGCHLD handler on POSIX without
|
||||
using the feature exposed by the SA_RESTART flag to sigaction(2). The
|
||||
most noticable problem this caused was for blocking reads and writes to
|
||||
sometimes fail with EINTR.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
result = []
|
||||
|
||||
def f():
|
||||
try:
|
||||
os.popen('%s -c "import time; time.sleep(0.1)"' %
|
||||
(sys.executable,))
|
||||
f2 = os.popen('%s -c "import time; time.sleep(0.5); print \'Foo\'"' %
|
||||
(sys.executable,))
|
||||
# The read call below will blow up with an EINTR from the
|
||||
# SIGCHLD from the first process exiting if we install a
|
||||
# SIGCHLD handler without SA_RESTART. (which we used to do)
|
||||
result.append(f2.read())
|
||||
finally:
|
||||
reactor.stop()
|
||||
|
||||
reactor.callWhenRunning(f)
|
||||
self.runReactor(reactor)
|
||||
self.assertEqual(result, ["Foo\n"])
|
||||
|
||||
|
||||
def test_openFileDescriptors(self):
|
||||
"""
|
||||
A spawned process has only stdin, stdout and stderr open
|
||||
(file descriptor 3 is also reported as open, because of the call to
|
||||
'os.listdir()').
|
||||
"""
|
||||
here = FilePath(__file__)
|
||||
top = here.parent().parent().parent().parent()
|
||||
source = (
|
||||
"import sys",
|
||||
"sys.path.insert(0, '%s')" % (top.path,),
|
||||
"from twisted.internet import process",
|
||||
"sys.stdout.write(str(process._listOpenFDs()))",
|
||||
"sys.stdout.flush()")
|
||||
|
||||
def checkOutput(output):
|
||||
self.assertEqual('[0, 1, 2, 3]', output)
|
||||
|
||||
reactor = self.buildReactor()
|
||||
|
||||
class Protocol(ProcessProtocol):
|
||||
def __init__(self):
|
||||
self.output = []
|
||||
|
||||
def outReceived(self, data):
|
||||
self.output.append(data)
|
||||
|
||||
def processEnded(self, reason):
|
||||
try:
|
||||
checkOutput("".join(self.output))
|
||||
finally:
|
||||
reactor.stop()
|
||||
|
||||
proto = Protocol()
|
||||
reactor.callWhenRunning(
|
||||
reactor.spawnProcess, proto, sys.executable,
|
||||
[sys.executable, "-Wignore", "-c", "\n".join(source)],
|
||||
env=os.environ, usePTY=self.usePTY)
|
||||
self.runReactor(reactor)
|
||||
|
||||
if platformType != "posix":
|
||||
test_openFileDescriptors.skip = "Test only applies to POSIX platforms"
|
||||
|
||||
|
||||
def test_timelyProcessExited(self):
|
||||
"""
|
||||
If a spawned process exits, C{processExited} will be called in a
|
||||
timely manner.
|
||||
"""
|
||||
reactor = self.buildReactor()
|
||||
|
||||
class ExitingProtocol(ProcessProtocol):
|
||||
exited = False
|
||||
|
||||
def processExited(protoSelf, reason):
|
||||
protoSelf.exited = True
|
||||
reactor.stop()
|
||||
self.assertEqual(reason.value.exitCode, 0)
|
||||
|
||||
protocol = ExitingProtocol()
|
||||
reactor.callWhenRunning(
|
||||
reactor.spawnProcess, protocol, sys.executable,
|
||||
[sys.executable, "-c", "raise SystemExit(0)"],
|
||||
usePTY=self.usePTY)
|
||||
|
||||
# This will timeout if processExited isn't called:
|
||||
self.runReactor(reactor, timeout=30)
|
||||
self.assertEqual(protocol.exited, True)
|
||||
|
||||
|
||||
def _changeIDTest(self, which):
|
||||
"""
|
||||
Launch a child process, using either the C{uid} or C{gid} argument to
|
||||
L{IReactorProcess.spawnProcess} to change either its UID or GID to a
|
||||
different value. If the child process reports this hasn't happened,
|
||||
raise an exception to fail the test.
|
||||
|
||||
@param which: Either C{b"uid"} or C{b"gid"}.
|
||||
"""
|
||||
program = [
|
||||
b"import os",
|
||||
b"raise SystemExit(os.get%s() != 1)" % (which,)]
|
||||
|
||||
container = []
|
||||
class CaptureExitStatus(ProcessProtocol):
|
||||
def childDataReceived(self, fd, bytes):
|
||||
print fd, bytes
|
||||
def processEnded(self, reason):
|
||||
container.append(reason)
|
||||
reactor.stop()
|
||||
|
||||
reactor = self.buildReactor()
|
||||
protocol = CaptureExitStatus()
|
||||
reactor.callWhenRunning(
|
||||
reactor.spawnProcess, protocol, sys.executable,
|
||||
[sys.executable, b"-c", b"\n".join(program)],
|
||||
**{which: 1})
|
||||
|
||||
self.runReactor(reactor)
|
||||
|
||||
self.assertEqual(0, container[0].value.exitCode)
|
||||
|
||||
|
||||
def test_changeUID(self):
|
||||
"""
|
||||
If a value is passed for L{IReactorProcess.spawnProcess}'s C{uid}, the
|
||||
child process is run with that UID.
|
||||
"""
|
||||
self._changeIDTest(b"uid")
|
||||
if _uidgidSkip is not None:
|
||||
test_changeUID.skip = _uidgidSkip
|
||||
|
||||
|
||||
def test_changeGID(self):
|
||||
"""
|
||||
If a value is passed for L{IReactorProcess.spawnProcess}'s C{gid}, the
|
||||
child process is run with that GID.
|
||||
"""
|
||||
self._changeIDTest(b"gid")
|
||||
if _uidgidSkip is not None:
|
||||
test_changeGID.skip = _uidgidSkip
|
||||
|
||||
|
||||
def test_processExitedRaises(self):
|
||||
"""
|
||||
If L{IProcessProtocol.processExited} raises an exception, it is logged.
|
||||
"""
|
||||
# Ideally we wouldn't need to poke the process module; see
|
||||
# https://twistedmatrix.com/trac/ticket/6889
|
||||
reactor = self.buildReactor()
|
||||
|
||||
class TestException(Exception):
|
||||
pass
|
||||
|
||||
class Protocol(ProcessProtocol):
|
||||
def processExited(self, reason):
|
||||
reactor.stop()
|
||||
raise TestException("processedExited raised")
|
||||
|
||||
protocol = Protocol()
|
||||
transport = reactor.spawnProcess(
|
||||
protocol, sys.executable, [sys.executable, "-c", ""],
|
||||
usePTY=self.usePTY)
|
||||
self.runReactor(reactor)
|
||||
|
||||
# Manually clean-up broken process handler.
|
||||
# Only required if the test fails on systems that support
|
||||
# the process module.
|
||||
if process is not None:
|
||||
for pid, handler in process.reapProcessHandlers.items():
|
||||
if handler is not transport:
|
||||
continue
|
||||
process.unregisterReapProcessHandler(pid, handler)
|
||||
self.fail("After processExited raised, transport was left in"
|
||||
" reapProcessHandlers")
|
||||
|
||||
self.assertEqual(1, len(self.flushLoggedErrors(TestException)))
|
||||
|
||||
|
||||
|
||||
class ProcessTestsBuilder(ProcessTestsBuilderBase):
|
||||
"""
|
||||
Builder defining tests relating to L{IReactorProcess} for child processes
|
||||
which do not have a PTY.
|
||||
"""
|
||||
usePTY = False
|
||||
|
||||
keepStdioOpenProgram = FilePath(__file__).sibling('process_helper.py').path
|
||||
if platform.isWindows():
|
||||
keepStdioOpenArg = "windows"
|
||||
else:
|
||||
# Just a value that doesn't equal "windows"
|
||||
keepStdioOpenArg = ""
|
||||
|
||||
# Define this test here because PTY-using processes only have stdin and
|
||||
# stdout and the test would need to be different for that to work.
|
||||
def test_childConnectionLost(self):
|
||||
"""
|
||||
L{IProcessProtocol.childConnectionLost} is called each time a file
|
||||
descriptor associated with a child process is closed.
|
||||
"""
|
||||
connected = Deferred()
|
||||
lost = {0: Deferred(), 1: Deferred(), 2: Deferred()}
|
||||
|
||||
class Closer(ProcessProtocol):
|
||||
def makeConnection(self, transport):
|
||||
connected.callback(transport)
|
||||
|
||||
def childConnectionLost(self, childFD):
|
||||
lost[childFD].callback(None)
|
||||
|
||||
source = (
|
||||
"import os, sys\n"
|
||||
"while 1:\n"
|
||||
" line = sys.stdin.readline().strip()\n"
|
||||
" if not line:\n"
|
||||
" break\n"
|
||||
" os.close(int(line))\n")
|
||||
|
||||
reactor = self.buildReactor()
|
||||
reactor.callWhenRunning(
|
||||
reactor.spawnProcess, Closer(), sys.executable,
|
||||
[sys.executable, "-c", source], usePTY=self.usePTY)
|
||||
|
||||
def cbConnected(transport):
|
||||
transport.write('2\n')
|
||||
return lost[2].addCallback(lambda ign: transport)
|
||||
connected.addCallback(cbConnected)
|
||||
|
||||
def lostSecond(transport):
|
||||
transport.write('1\n')
|
||||
return lost[1].addCallback(lambda ign: transport)
|
||||
connected.addCallback(lostSecond)
|
||||
|
||||
def lostFirst(transport):
|
||||
transport.write('\n')
|
||||
connected.addCallback(lostFirst)
|
||||
connected.addErrback(err)
|
||||
|
||||
def cbEnded(ignored):
|
||||
reactor.stop()
|
||||
connected.addCallback(cbEnded)
|
||||
|
||||
self.runReactor(reactor)
|
||||
|
||||
|
||||
# This test is here because PTYProcess never delivers childConnectionLost.
|
||||
def test_processEnded(self):
|
||||
"""
|
||||
L{IProcessProtocol.processEnded} is called after the child process
|
||||
exits and L{IProcessProtocol.childConnectionLost} is called for each of
|
||||
its file descriptors.
|
||||
"""
|
||||
ended = Deferred()
|
||||
lost = []
|
||||
|
||||
class Ender(ProcessProtocol):
|
||||
def childDataReceived(self, fd, data):
|
||||
msg('childDataReceived(%d, %r)' % (fd, data))
|
||||
self.transport.loseConnection()
|
||||
|
||||
def childConnectionLost(self, childFD):
|
||||
msg('childConnectionLost(%d)' % (childFD,))
|
||||
lost.append(childFD)
|
||||
|
||||
def processExited(self, reason):
|
||||
msg('processExited(%r)' % (reason,))
|
||||
|
||||
def processEnded(self, reason):
|
||||
msg('processEnded(%r)' % (reason,))
|
||||
ended.callback([reason])
|
||||
|
||||
reactor = self.buildReactor()
|
||||
reactor.callWhenRunning(
|
||||
reactor.spawnProcess, Ender(), sys.executable,
|
||||
[sys.executable, self.keepStdioOpenProgram, "child",
|
||||
self.keepStdioOpenArg],
|
||||
usePTY=self.usePTY)
|
||||
|
||||
def cbEnded((failure,)):
|
||||
failure.trap(ProcessDone)
|
||||
self.assertEqual(set(lost), set([0, 1, 2]))
|
||||
ended.addCallback(cbEnded)
|
||||
|
||||
ended.addErrback(err)
|
||||
ended.addCallback(lambda ign: reactor.stop())
|
||||
|
||||
self.runReactor(reactor)
|
||||
|
||||
|
||||
# This test is here because PTYProcess.loseConnection does not actually
|
||||
# close the file descriptors to the child process. This test needs to be
|
||||
# written fairly differently for PTYProcess.
|
||||
def test_processExited(self):
|
||||
"""
|
||||
L{IProcessProtocol.processExited} is called when the child process
|
||||
exits, even if file descriptors associated with the child are still
|
||||
open.
|
||||
"""
|
||||
exited = Deferred()
|
||||
allLost = Deferred()
|
||||
lost = []
|
||||
|
||||
class Waiter(ProcessProtocol):
|
||||
def childDataReceived(self, fd, data):
|
||||
msg('childDataReceived(%d, %r)' % (fd, data))
|
||||
|
||||
def childConnectionLost(self, childFD):
|
||||
msg('childConnectionLost(%d)' % (childFD,))
|
||||
lost.append(childFD)
|
||||
if len(lost) == 3:
|
||||
allLost.callback(None)
|
||||
|
||||
def processExited(self, reason):
|
||||
msg('processExited(%r)' % (reason,))
|
||||
# See test_processExitedWithSignal
|
||||
exited.callback([reason])
|
||||
self.transport.loseConnection()
|
||||
|
||||
reactor = self.buildReactor()
|
||||
reactor.callWhenRunning(
|
||||
reactor.spawnProcess, Waiter(), sys.executable,
|
||||
[sys.executable, self.keepStdioOpenProgram, "child",
|
||||
self.keepStdioOpenArg],
|
||||
usePTY=self.usePTY)
|
||||
|
||||
def cbExited((failure,)):
|
||||
failure.trap(ProcessDone)
|
||||
msg('cbExited; lost = %s' % (lost,))
|
||||
self.assertEqual(lost, [])
|
||||
return allLost
|
||||
exited.addCallback(cbExited)
|
||||
|
||||
def cbAllLost(ignored):
|
||||
self.assertEqual(set(lost), set([0, 1, 2]))
|
||||
exited.addCallback(cbAllLost)
|
||||
|
||||
exited.addErrback(err)
|
||||
exited.addCallback(lambda ign: reactor.stop())
|
||||
|
||||
self.runReactor(reactor)
|
||||
|
||||
|
||||
def makeSourceFile(self, sourceLines):
|
||||
"""
|
||||
Write the given list of lines to a text file and return the absolute
|
||||
path to it.
|
||||
"""
|
||||
script = self.mktemp()
|
||||
scriptFile = file(script, 'wt')
|
||||
scriptFile.write(os.linesep.join(sourceLines) + os.linesep)
|
||||
scriptFile.close()
|
||||
return os.path.abspath(script)
|
||||
|
||||
|
||||
def test_shebang(self):
|
||||
"""
|
||||
Spawning a process with an executable which is a script starting
|
||||
with an interpreter definition line (#!) uses that interpreter to
|
||||
evaluate the script.
|
||||
"""
|
||||
SHEBANG_OUTPUT = 'this is the shebang output'
|
||||
|
||||
scriptFile = self.makeSourceFile([
|
||||
"#!%s" % (sys.executable,),
|
||||
"import sys",
|
||||
"sys.stdout.write('%s')" % (SHEBANG_OUTPUT,),
|
||||
"sys.stdout.flush()"])
|
||||
os.chmod(scriptFile, 0700)
|
||||
|
||||
reactor = self.buildReactor()
|
||||
|
||||
def cbProcessExited((out, err, code)):
|
||||
msg("cbProcessExited((%r, %r, %d))" % (out, err, code))
|
||||
self.assertEqual(out, SHEBANG_OUTPUT)
|
||||
self.assertEqual(err, "")
|
||||
self.assertEqual(code, 0)
|
||||
|
||||
def shutdown(passthrough):
|
||||
reactor.stop()
|
||||
return passthrough
|
||||
|
||||
def start():
|
||||
d = utils.getProcessOutputAndValue(scriptFile, reactor=reactor)
|
||||
d.addBoth(shutdown)
|
||||
d.addCallback(cbProcessExited)
|
||||
d.addErrback(err)
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
self.runReactor(reactor)
|
||||
|
||||
|
||||
def test_processCommandLineArguments(self):
|
||||
"""
|
||||
Arguments given to spawnProcess are passed to the child process as
|
||||
originally intended.
|
||||
"""
|
||||
source = (
|
||||
# On Windows, stdout is not opened in binary mode by default,
|
||||
# so newline characters are munged on writing, interfering with
|
||||
# the tests.
|
||||
'import sys, os\n'
|
||||
'try:\n'
|
||||
' import msvcrt\n'
|
||||
' msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)\n'
|
||||
'except ImportError:\n'
|
||||
' pass\n'
|
||||
'for arg in sys.argv[1:]:\n'
|
||||
' sys.stdout.write(arg + chr(0))\n'
|
||||
' sys.stdout.flush()')
|
||||
|
||||
args = ['hello', '"', ' \t|<>^&', r'"\\"hello\\"', r'"foo\ bar baz\""']
|
||||
# Ensure that all non-NUL characters can be passed too.
|
||||
args.append(''.join(map(chr, xrange(1, 256))))
|
||||
|
||||
reactor = self.buildReactor()
|
||||
|
||||
def processFinished(output):
|
||||
output = output.split('\0')
|
||||
# Drop the trailing \0.
|
||||
output.pop()
|
||||
self.assertEqual(args, output)
|
||||
|
||||
def shutdown(result):
|
||||
reactor.stop()
|
||||
return result
|
||||
|
||||
def spawnChild():
|
||||
d = succeed(None)
|
||||
d.addCallback(lambda dummy: utils.getProcessOutput(
|
||||
sys.executable, ['-c', source] + args, reactor=reactor))
|
||||
d.addCallback(processFinished)
|
||||
d.addBoth(shutdown)
|
||||
|
||||
reactor.callWhenRunning(spawnChild)
|
||||
self.runReactor(reactor)
|
||||
globals().update(ProcessTestsBuilder.makeTestCaseClasses())
|
||||
|
||||
|
||||
|
||||
class PTYProcessTestsBuilder(ProcessTestsBuilderBase):
|
||||
"""
|
||||
Builder defining tests relating to L{IReactorProcess} for child processes
|
||||
which have a PTY.
|
||||
"""
|
||||
usePTY = True
|
||||
|
||||
if platform.isWindows():
|
||||
skip = "PTYs are not supported on Windows."
|
||||
elif platform.isMacOSX():
|
||||
skippedReactors = {
|
||||
"twisted.internet.pollreactor.PollReactor":
|
||||
"OS X's poll() does not support PTYs"}
|
||||
globals().update(PTYProcessTestsBuilder.makeTestCaseClasses())
|
||||
|
||||
|
||||
|
||||
class PotentialZombieWarningTests(TestCase):
|
||||
"""
|
||||
Tests for L{twisted.internet.error.PotentialZombieWarning}.
|
||||
"""
|
||||
def test_deprecated(self):
|
||||
"""
|
||||
Accessing L{PotentialZombieWarning} via the
|
||||
I{PotentialZombieWarning} attribute of L{twisted.internet.error}
|
||||
results in a deprecation warning being emitted.
|
||||
"""
|
||||
from twisted.internet import error
|
||||
error.PotentialZombieWarning
|
||||
|
||||
warnings = self.flushWarnings([self.test_deprecated])
|
||||
self.assertEqual(warnings[0]['category'], DeprecationWarning)
|
||||
self.assertEqual(
|
||||
warnings[0]['message'],
|
||||
"twisted.internet.error.PotentialZombieWarning was deprecated in "
|
||||
"Twisted 10.0.0: There is no longer any potential for zombie "
|
||||
"process.")
|
||||
self.assertEqual(len(warnings), 1)
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue