update to tornado 4.0 and requests 2.3.0
This commit is contained in:
parent
060f459965
commit
f187000dc9
239 changed files with 19071 additions and 20369 deletions
|
|
@ -25,5 +25,5 @@ from __future__ import absolute_import, division, print_function, with_statement
|
|||
# is zero for an official release, positive for a development branch,
|
||||
# or negative for a release candidate or beta (after the base version
|
||||
# number has been incremented)
|
||||
version = "3.1.1"
|
||||
version_info = (3, 1, 1, 0)
|
||||
version = "4.0"
|
||||
version_info = (4, 0, 0, 0)
|
||||
|
|
|
|||
|
|
@ -34,16 +34,29 @@ See the individual service classes below for complete documentation.
|
|||
|
||||
Example usage for Google OpenID::
|
||||
|
||||
class GoogleLoginHandler(tornado.web.RequestHandler,
|
||||
tornado.auth.GoogleMixin):
|
||||
@tornado.web.asynchronous
|
||||
class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
|
||||
tornado.auth.GoogleOAuth2Mixin):
|
||||
@tornado.gen.coroutine
|
||||
def get(self):
|
||||
if self.get_argument("openid.mode", None):
|
||||
user = yield self.get_authenticated_user()
|
||||
# Save the user with e.g. set_secure_cookie()
|
||||
if self.get_argument('code', False):
|
||||
user = yield self.get_authenticated_user(
|
||||
redirect_uri='http://your.site.com/auth/google',
|
||||
code=self.get_argument('code'))
|
||||
# Save the user with e.g. set_secure_cookie
|
||||
else:
|
||||
yield self.authenticate_redirect()
|
||||
yield self.authorize_redirect(
|
||||
redirect_uri='http://your.site.com/auth/google',
|
||||
client_id=self.settings['google_oauth']['key'],
|
||||
scope=['profile', 'email'],
|
||||
response_type='code',
|
||||
extra_params={'approval_prompt': 'auto'})
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
All of the callback interfaces in this module are now guaranteed
|
||||
to run their callback with an argument of ``None`` on error.
|
||||
Previously some functions would do this while others would simply
|
||||
terminate the request on their own. This change also ensures that
|
||||
errors are more consistently reported through the ``Future`` interfaces.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
|
@ -56,12 +69,13 @@ import hmac
|
|||
import time
|
||||
import uuid
|
||||
|
||||
from tornado.concurrent import Future, chain_future, return_future
|
||||
from tornado.concurrent import TracebackFuture, chain_future, return_future
|
||||
from tornado import gen
|
||||
from tornado import httpclient
|
||||
from tornado import escape
|
||||
from tornado.httputil import url_concat
|
||||
from tornado.log import gen_log
|
||||
from tornado.stack_context import ExceptionStackContext
|
||||
from tornado.util import bytes_type, u, unicode_type, ArgReplacer
|
||||
|
||||
try:
|
||||
|
|
@ -74,6 +88,11 @@ try:
|
|||
except ImportError:
|
||||
import urllib as urllib_parse # py2
|
||||
|
||||
try:
|
||||
long # py2
|
||||
except NameError:
|
||||
long = int # py3
|
||||
|
||||
|
||||
class AuthError(Exception):
|
||||
pass
|
||||
|
|
@ -99,12 +118,19 @@ def _auth_return_future(f):
|
|||
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
future = Future()
|
||||
future = TracebackFuture()
|
||||
callback, args, kwargs = replacer.replace(future, args, kwargs)
|
||||
if callback is not None:
|
||||
future.add_done_callback(
|
||||
functools.partial(_auth_future_to_callback, callback))
|
||||
f(*args, **kwargs)
|
||||
def handle_exception(typ, value, tb):
|
||||
if future.done():
|
||||
return False
|
||||
else:
|
||||
future.set_exc_info((typ, value, tb))
|
||||
return True
|
||||
with ExceptionStackContext(handle_exception):
|
||||
f(*args, **kwargs)
|
||||
return future
|
||||
return wrapper
|
||||
|
||||
|
|
@ -162,7 +188,7 @@ class OpenIdMixin(object):
|
|||
url = self._OPENID_ENDPOINT
|
||||
if http_client is None:
|
||||
http_client = self.get_auth_http_client()
|
||||
http_client.fetch(url, self.async_callback(
|
||||
http_client.fetch(url, functools.partial(
|
||||
self._on_authentication_verified, callback),
|
||||
method="POST", body=urllib_parse.urlencode(args))
|
||||
|
||||
|
|
@ -334,7 +360,7 @@ class OAuthMixin(object):
|
|||
http_client.fetch(
|
||||
self._oauth_request_token_url(callback_uri=callback_uri,
|
||||
extra_params=extra_params),
|
||||
self.async_callback(
|
||||
functools.partial(
|
||||
self._on_request_token,
|
||||
self._OAUTH_AUTHORIZE_URL,
|
||||
callback_uri,
|
||||
|
|
@ -342,7 +368,7 @@ class OAuthMixin(object):
|
|||
else:
|
||||
http_client.fetch(
|
||||
self._oauth_request_token_url(),
|
||||
self.async_callback(
|
||||
functools.partial(
|
||||
self._on_request_token, self._OAUTH_AUTHORIZE_URL,
|
||||
callback_uri,
|
||||
callback))
|
||||
|
|
@ -379,7 +405,7 @@ class OAuthMixin(object):
|
|||
if http_client is None:
|
||||
http_client = self.get_auth_http_client()
|
||||
http_client.fetch(self._oauth_access_token_url(token),
|
||||
self.async_callback(self._on_access_token, callback))
|
||||
functools.partial(self._on_access_token, callback))
|
||||
|
||||
def _oauth_request_token_url(self, callback_uri=None, extra_params=None):
|
||||
consumer_token = self._oauth_consumer_token()
|
||||
|
|
@ -456,7 +482,7 @@ class OAuthMixin(object):
|
|||
|
||||
access_token = _oauth_parse_response(response.body)
|
||||
self._oauth_get_user_future(access_token).add_done_callback(
|
||||
self.async_callback(self._on_oauth_get_user, access_token, future))
|
||||
functools.partial(self._on_oauth_get_user, access_token, future))
|
||||
|
||||
def _oauth_consumer_token(self):
|
||||
"""Subclasses must override this to return their OAuth consumer keys.
|
||||
|
|
@ -549,7 +575,7 @@ class OAuth2Mixin(object):
|
|||
@return_future
|
||||
def authorize_redirect(self, redirect_uri=None, client_id=None,
|
||||
client_secret=None, extra_params=None,
|
||||
callback=None):
|
||||
callback=None, scope=None, response_type="code"):
|
||||
"""Redirects the user to obtain OAuth authorization for this service.
|
||||
|
||||
Some providers require that you register a redirect URL with
|
||||
|
|
@ -566,10 +592,13 @@ class OAuth2Mixin(object):
|
|||
"""
|
||||
args = {
|
||||
"redirect_uri": redirect_uri,
|
||||
"client_id": client_id
|
||||
"client_id": client_id,
|
||||
"response_type": response_type
|
||||
}
|
||||
if extra_params:
|
||||
args.update(extra_params)
|
||||
if scope:
|
||||
args['scope'] = ' '.join(scope)
|
||||
self.redirect(
|
||||
url_concat(self._OAUTH_AUTHORIZE_URL, args))
|
||||
callback()
|
||||
|
|
@ -604,7 +633,6 @@ class TwitterMixin(OAuthMixin):
|
|||
|
||||
class TwitterLoginHandler(tornado.web.RequestHandler,
|
||||
tornado.auth.TwitterMixin):
|
||||
@tornado.web.asynchronous
|
||||
@tornado.gen.coroutine
|
||||
def get(self):
|
||||
if self.get_argument("oauth_token", None):
|
||||
|
|
@ -639,7 +667,7 @@ class TwitterMixin(OAuthMixin):
|
|||
"""
|
||||
http = self.get_auth_http_client()
|
||||
http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),
|
||||
self.async_callback(
|
||||
functools.partial(
|
||||
self._on_request_token, self._OAUTH_AUTHENTICATE_URL,
|
||||
None, callback))
|
||||
|
||||
|
|
@ -666,7 +694,6 @@ class TwitterMixin(OAuthMixin):
|
|||
class MainHandler(tornado.web.RequestHandler,
|
||||
tornado.auth.TwitterMixin):
|
||||
@tornado.web.authenticated
|
||||
@tornado.web.asynchronous
|
||||
@tornado.gen.coroutine
|
||||
def get(self):
|
||||
new_entry = yield self.twitter_request(
|
||||
|
|
@ -698,7 +725,7 @@ class TwitterMixin(OAuthMixin):
|
|||
if args:
|
||||
url += "?" + urllib_parse.urlencode(args)
|
||||
http = self.get_auth_http_client()
|
||||
http_callback = self.async_callback(self._on_twitter_request, callback)
|
||||
http_callback = functools.partial(self._on_twitter_request, callback)
|
||||
if post_args is not None:
|
||||
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
|
||||
callback=http_callback)
|
||||
|
|
@ -745,7 +772,6 @@ class FriendFeedMixin(OAuthMixin):
|
|||
|
||||
class FriendFeedLoginHandler(tornado.web.RequestHandler,
|
||||
tornado.auth.FriendFeedMixin):
|
||||
@tornado.web.asynchronous
|
||||
@tornado.gen.coroutine
|
||||
def get(self):
|
||||
if self.get_argument("oauth_token", None):
|
||||
|
|
@ -790,7 +816,6 @@ class FriendFeedMixin(OAuthMixin):
|
|||
class MainHandler(tornado.web.RequestHandler,
|
||||
tornado.auth.FriendFeedMixin):
|
||||
@tornado.web.authenticated
|
||||
@tornado.web.asynchronous
|
||||
@tornado.gen.coroutine
|
||||
def get(self):
|
||||
new_entry = yield self.friendfeed_request(
|
||||
|
|
@ -817,7 +842,7 @@ class FriendFeedMixin(OAuthMixin):
|
|||
args.update(oauth)
|
||||
if args:
|
||||
url += "?" + urllib_parse.urlencode(args)
|
||||
callback = self.async_callback(self._on_friendfeed_request, callback)
|
||||
callback = functools.partial(self._on_friendfeed_request, callback)
|
||||
http = self.get_auth_http_client()
|
||||
if post_args is not None:
|
||||
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
|
||||
|
|
@ -858,6 +883,11 @@ class FriendFeedMixin(OAuthMixin):
|
|||
class GoogleMixin(OpenIdMixin, OAuthMixin):
|
||||
"""Google Open ID / OAuth authentication.
|
||||
|
||||
.. deprecated:: 4.0
|
||||
New applications should use `GoogleOAuth2Mixin`
|
||||
below instead of this class. As of May 19, 2014, Google has stopped
|
||||
supporting registration-free authentication.
|
||||
|
||||
No application registration is necessary to use Google for
|
||||
authentication or to access Google resources on behalf of a user.
|
||||
|
||||
|
|
@ -874,7 +904,6 @@ class GoogleMixin(OpenIdMixin, OAuthMixin):
|
|||
|
||||
class GoogleLoginHandler(tornado.web.RequestHandler,
|
||||
tornado.auth.GoogleMixin):
|
||||
@tornado.web.asynchronous
|
||||
@tornado.gen.coroutine
|
||||
def get(self):
|
||||
if self.get_argument("openid.mode", None):
|
||||
|
|
@ -929,7 +958,7 @@ class GoogleMixin(OpenIdMixin, OAuthMixin):
|
|||
http = self.get_auth_http_client()
|
||||
token = dict(key=token, secret="")
|
||||
http.fetch(self._oauth_access_token_url(token),
|
||||
self.async_callback(self._on_access_token, callback))
|
||||
functools.partial(self._on_access_token, callback))
|
||||
else:
|
||||
chain_future(OpenIdMixin.get_authenticated_user(self),
|
||||
callback)
|
||||
|
|
@ -945,12 +974,90 @@ class GoogleMixin(OpenIdMixin, OAuthMixin):
|
|||
return OpenIdMixin.get_authenticated_user(self)
|
||||
|
||||
|
||||
class GoogleOAuth2Mixin(OAuth2Mixin):
|
||||
"""Google authentication using OAuth2.
|
||||
|
||||
In order to use, register your application with Google and copy the
|
||||
relevant parameters to your application settings.
|
||||
|
||||
* Go to the Google Dev Console at http://console.developers.google.com
|
||||
* Select a project, or create a new one.
|
||||
* In the sidebar on the left, select APIs & Auth.
|
||||
* In the list of APIs, find the Google+ API service and set it to ON.
|
||||
* In the sidebar on the left, select Credentials.
|
||||
* In the OAuth section of the page, select Create New Client ID.
|
||||
* Set the Redirect URI to point to your auth handler
|
||||
* Copy the "Client secret" and "Client ID" to the application settings as
|
||||
{"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}}
|
||||
|
||||
.. versionadded:: 3.2
|
||||
"""
|
||||
_OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth"
|
||||
_OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
|
||||
_OAUTH_NO_CALLBACKS = False
|
||||
_OAUTH_SETTINGS_KEY = 'google_oauth'
|
||||
|
||||
@_auth_return_future
|
||||
def get_authenticated_user(self, redirect_uri, code, callback):
|
||||
"""Handles the login for the Google user, returning a user object.
|
||||
|
||||
Example usage::
|
||||
|
||||
class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
|
||||
tornado.auth.GoogleOAuth2Mixin):
|
||||
@tornado.gen.coroutine
|
||||
def get(self):
|
||||
if self.get_argument('code', False):
|
||||
user = yield self.get_authenticated_user(
|
||||
redirect_uri='http://your.site.com/auth/google',
|
||||
code=self.get_argument('code'))
|
||||
# Save the user with e.g. set_secure_cookie
|
||||
else:
|
||||
yield self.authorize_redirect(
|
||||
redirect_uri='http://your.site.com/auth/google',
|
||||
client_id=self.settings['google_oauth']['key'],
|
||||
scope=['profile', 'email'],
|
||||
response_type='code',
|
||||
extra_params={'approval_prompt': 'auto'})
|
||||
"""
|
||||
http = self.get_auth_http_client()
|
||||
body = urllib_parse.urlencode({
|
||||
"redirect_uri": redirect_uri,
|
||||
"code": code,
|
||||
"client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'],
|
||||
"client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'],
|
||||
"grant_type": "authorization_code",
|
||||
})
|
||||
|
||||
http.fetch(self._OAUTH_ACCESS_TOKEN_URL,
|
||||
functools.partial(self._on_access_token, callback),
|
||||
method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body)
|
||||
|
||||
def _on_access_token(self, future, response):
|
||||
"""Callback function for the exchange to the access token."""
|
||||
if response.error:
|
||||
future.set_exception(AuthError('Google auth error: %s' % str(response)))
|
||||
return
|
||||
|
||||
args = escape.json_decode(response.body)
|
||||
future.set_result(args)
|
||||
|
||||
def get_auth_http_client(self):
|
||||
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
|
||||
|
||||
May be overridden by subclasses to use an HTTP client other than
|
||||
the default.
|
||||
"""
|
||||
return httpclient.AsyncHTTPClient()
|
||||
|
||||
|
||||
class FacebookMixin(object):
|
||||
"""Facebook Connect authentication.
|
||||
|
||||
*Deprecated:* New applications should use `FacebookGraphMixin`
|
||||
below instead of this class. This class does not support the
|
||||
Future-based interface seen on other classes in this module.
|
||||
.. deprecated:: 1.1
|
||||
New applications should use `FacebookGraphMixin`
|
||||
below instead of this class. This class does not support the
|
||||
Future-based interface seen on other classes in this module.
|
||||
|
||||
To authenticate with Facebook, register your application with
|
||||
Facebook at http://www.facebook.com/developers/apps.php. Then
|
||||
|
|
@ -965,7 +1072,7 @@ class FacebookMixin(object):
|
|||
@tornado.web.asynchronous
|
||||
def get(self):
|
||||
if self.get_argument("session", None):
|
||||
self.get_authenticated_user(self.async_callback(self._on_auth))
|
||||
self.get_authenticated_user(self._on_auth)
|
||||
return
|
||||
yield self.authenticate_redirect()
|
||||
|
||||
|
|
@ -1051,7 +1158,7 @@ class FacebookMixin(object):
|
|||
session = escape.json_decode(self.get_argument("session"))
|
||||
self.facebook_request(
|
||||
method="facebook.users.getInfo",
|
||||
callback=self.async_callback(
|
||||
callback=functools.partial(
|
||||
self._on_get_user_info, callback, session),
|
||||
session_key=session["session_key"],
|
||||
uids=session["uid"],
|
||||
|
|
@ -1077,7 +1184,7 @@ class FacebookMixin(object):
|
|||
def get(self):
|
||||
self.facebook_request(
|
||||
method="stream.get",
|
||||
callback=self.async_callback(self._on_stream),
|
||||
callback=self._on_stream,
|
||||
session_key=self.current_user["session_key"])
|
||||
|
||||
def _on_stream(self, stream):
|
||||
|
|
@ -1101,7 +1208,7 @@ class FacebookMixin(object):
|
|||
url = "http://api.facebook.com/restserver.php?" + \
|
||||
urllib_parse.urlencode(args)
|
||||
http = self.get_auth_http_client()
|
||||
http.fetch(url, callback=self.async_callback(
|
||||
http.fetch(url, callback=functools.partial(
|
||||
self._parse_response, callback))
|
||||
|
||||
def _on_get_user_info(self, callback, session, users):
|
||||
|
|
@ -1158,7 +1265,7 @@ class FacebookMixin(object):
|
|||
class FacebookGraphMixin(OAuth2Mixin):
|
||||
"""Facebook authentication using the new Graph API and OAuth2."""
|
||||
_OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?"
|
||||
_OAUTH_AUTHORIZE_URL = "https://graph.facebook.com/oauth/authorize?"
|
||||
_OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?"
|
||||
_OAUTH_NO_CALLBACKS = False
|
||||
_FACEBOOK_BASE_URL = "https://graph.facebook.com"
|
||||
|
||||
|
|
@ -1170,7 +1277,6 @@ class FacebookGraphMixin(OAuth2Mixin):
|
|||
Example usage::
|
||||
|
||||
class FacebookGraphLoginHandler(LoginHandler, tornado.auth.FacebookGraphMixin):
|
||||
@tornado.web.asynchronous
|
||||
@tornado.gen.coroutine
|
||||
def get(self):
|
||||
if self.get_argument("code", False):
|
||||
|
|
@ -1200,7 +1306,7 @@ class FacebookGraphMixin(OAuth2Mixin):
|
|||
fields.update(extra_fields)
|
||||
|
||||
http.fetch(self._oauth_request_token_url(**args),
|
||||
self.async_callback(self._on_access_token, redirect_uri, client_id,
|
||||
functools.partial(self._on_access_token, redirect_uri, client_id,
|
||||
client_secret, callback, fields))
|
||||
|
||||
def _on_access_token(self, redirect_uri, client_id, client_secret,
|
||||
|
|
@ -1217,7 +1323,7 @@ class FacebookGraphMixin(OAuth2Mixin):
|
|||
|
||||
self.facebook_request(
|
||||
path="/me",
|
||||
callback=self.async_callback(
|
||||
callback=functools.partial(
|
||||
self._on_get_user_info, future, session, fields),
|
||||
access_token=session["access_token"],
|
||||
fields=",".join(fields)
|
||||
|
|
@ -1257,7 +1363,6 @@ class FacebookGraphMixin(OAuth2Mixin):
|
|||
class MainHandler(tornado.web.RequestHandler,
|
||||
tornado.auth.FacebookGraphMixin):
|
||||
@tornado.web.authenticated
|
||||
@tornado.web.asynchronous
|
||||
@tornado.gen.coroutine
|
||||
def get(self):
|
||||
new_entry = yield self.facebook_request(
|
||||
|
|
@ -1285,7 +1390,7 @@ class FacebookGraphMixin(OAuth2Mixin):
|
|||
|
||||
if all_args:
|
||||
url += "?" + urllib_parse.urlencode(all_args)
|
||||
callback = self.async_callback(self._on_facebook_request, callback)
|
||||
callback = functools.partial(self._on_facebook_request, callback)
|
||||
http = self.get_auth_http_client()
|
||||
if post_args is not None:
|
||||
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
|
||||
|
|
|
|||
|
|
@ -14,13 +14,17 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""xAutomatically restart the server when a source file is modified.
|
||||
"""Automatically restart the server when a source file is modified.
|
||||
|
||||
Most applications should not access this module directly. Instead, pass the
|
||||
keyword argument ``debug=True`` to the `tornado.web.Application` constructor.
|
||||
This will enable autoreload mode as well as checking for changes to templates
|
||||
and static resources. Note that restarting is a destructive operation
|
||||
and any requests in progress will be aborted when the process restarts.
|
||||
Most applications should not access this module directly. Instead,
|
||||
pass the keyword argument ``autoreload=True`` to the
|
||||
`tornado.web.Application` constructor (or ``debug=True``, which
|
||||
enables this setting and several others). This will enable autoreload
|
||||
mode as well as checking for changes to templates and static
|
||||
resources. Note that restarting is a destructive operation and any
|
||||
requests in progress will be aborted when the process restarts. (If
|
||||
you want to disable autoreload while using other debug-mode features,
|
||||
pass both ``debug=True`` and ``autoreload=False``).
|
||||
|
||||
This module can also be used as a command-line wrapper around scripts
|
||||
such as unit test runners. See the `main` method for details.
|
||||
|
|
@ -38,6 +42,7 @@ Reloading loses any Python interpreter command-line arguments (e.g. ``-u``)
|
|||
because it re-executes Python using ``sys.executable`` and ``sys.argv``.
|
||||
Additionally, modifying these variables will cause reloading to behave
|
||||
incorrectly.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -40,52 +40,132 @@ class ReturnValueIgnoredError(Exception):
|
|||
pass
|
||||
|
||||
|
||||
class _DummyFuture(object):
|
||||
class Future(object):
|
||||
"""Placeholder for an asynchronous result.
|
||||
|
||||
A ``Future`` encapsulates the result of an asynchronous
|
||||
operation. In synchronous applications ``Futures`` are used
|
||||
to wait for the result from a thread or process pool; in
|
||||
Tornado they are normally used with `.IOLoop.add_future` or by
|
||||
yielding them in a `.gen.coroutine`.
|
||||
|
||||
`tornado.concurrent.Future` is similar to
|
||||
`concurrent.futures.Future`, but not thread-safe (and therefore
|
||||
faster for use with single-threaded event loops).
|
||||
|
||||
In addition to ``exception`` and ``set_exception``, methods ``exc_info``
|
||||
and ``set_exc_info`` are supported to capture tracebacks in Python 2.
|
||||
The traceback is automatically available in Python 3, but in the
|
||||
Python 2 futures backport this information is discarded.
|
||||
This functionality was previously available in a separate class
|
||||
``TracebackFuture``, which is now a deprecated alias for this class.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
`tornado.concurrent.Future` is always a thread-unsafe ``Future``
|
||||
with support for the ``exc_info`` methods. Previously it would
|
||||
be an alias for the thread-safe `concurrent.futures.Future`
|
||||
if that package was available and fall back to the thread-unsafe
|
||||
implementation if it was not.
|
||||
|
||||
"""
|
||||
def __init__(self):
|
||||
self._done = False
|
||||
self._result = None
|
||||
self._exception = None
|
||||
self._exc_info = None
|
||||
self._callbacks = []
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel the operation, if possible.
|
||||
|
||||
Tornado ``Futures`` do not support cancellation, so this method always
|
||||
returns False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def cancelled(self):
|
||||
"""Returns True if the operation has been cancelled.
|
||||
|
||||
Tornado ``Futures`` do not support cancellation, so this method
|
||||
always returns False.
|
||||
"""
|
||||
return False
|
||||
|
||||
def running(self):
|
||||
"""Returns True if this operation is currently running."""
|
||||
return not self._done
|
||||
|
||||
def done(self):
|
||||
"""Returns True if the future has finished running."""
|
||||
return self._done
|
||||
|
||||
def result(self, timeout=None):
|
||||
self._check_done()
|
||||
if self._exception:
|
||||
"""If the operation succeeded, return its result. If it failed,
|
||||
re-raise its exception.
|
||||
"""
|
||||
if self._result is not None:
|
||||
return self._result
|
||||
if self._exc_info is not None:
|
||||
raise_exc_info(self._exc_info)
|
||||
elif self._exception is not None:
|
||||
raise self._exception
|
||||
self._check_done()
|
||||
return self._result
|
||||
|
||||
def exception(self, timeout=None):
|
||||
self._check_done()
|
||||
if self._exception:
|
||||
"""If the operation raised an exception, return the `Exception`
|
||||
object. Otherwise returns None.
|
||||
"""
|
||||
if self._exception is not None:
|
||||
return self._exception
|
||||
else:
|
||||
self._check_done()
|
||||
return None
|
||||
|
||||
def add_done_callback(self, fn):
|
||||
"""Attaches the given callback to the `Future`.
|
||||
|
||||
It will be invoked with the `Future` as its argument when the Future
|
||||
has finished running and its result is available. In Tornado
|
||||
consider using `.IOLoop.add_future` instead of calling
|
||||
`add_done_callback` directly.
|
||||
"""
|
||||
if self._done:
|
||||
fn(self)
|
||||
else:
|
||||
self._callbacks.append(fn)
|
||||
|
||||
def set_result(self, result):
|
||||
"""Sets the result of a ``Future``.
|
||||
|
||||
It is undefined to call any of the ``set`` methods more than once
|
||||
on the same object.
|
||||
"""
|
||||
self._result = result
|
||||
self._set_done()
|
||||
|
||||
def set_exception(self, exception):
|
||||
"""Sets the exception of a ``Future.``"""
|
||||
self._exception = exception
|
||||
self._set_done()
|
||||
|
||||
def exc_info(self):
|
||||
"""Returns a tuple in the same format as `sys.exc_info` or None.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
return self._exc_info
|
||||
|
||||
def set_exc_info(self, exc_info):
|
||||
"""Sets the exception information of a ``Future.``
|
||||
|
||||
Preserves tracebacks on Python 2.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
self._exc_info = exc_info
|
||||
self.set_exception(exc_info[1])
|
||||
|
||||
def _check_done(self):
|
||||
if not self._done:
|
||||
raise Exception("DummyFuture does not support blocking for results")
|
||||
|
|
@ -97,38 +177,16 @@ class _DummyFuture(object):
|
|||
cb(self)
|
||||
self._callbacks = None
|
||||
|
||||
TracebackFuture = Future
|
||||
|
||||
if futures is None:
|
||||
Future = _DummyFuture
|
||||
FUTURES = Future
|
||||
else:
|
||||
Future = futures.Future
|
||||
FUTURES = (futures.Future, Future)
|
||||
|
||||
|
||||
class TracebackFuture(Future):
|
||||
"""Subclass of `Future` which can store a traceback with
|
||||
exceptions.
|
||||
|
||||
The traceback is automatically available in Python 3, but in the
|
||||
Python 2 futures backport this information is discarded.
|
||||
"""
|
||||
def __init__(self):
|
||||
super(TracebackFuture, self).__init__()
|
||||
self.__exc_info = None
|
||||
|
||||
def exc_info(self):
|
||||
return self.__exc_info
|
||||
|
||||
def set_exc_info(self, exc_info):
|
||||
"""Traceback-aware replacement for
|
||||
`~concurrent.futures.Future.set_exception`.
|
||||
"""
|
||||
self.__exc_info = exc_info
|
||||
self.set_exception(exc_info[1])
|
||||
|
||||
def result(self):
|
||||
if self.__exc_info is not None:
|
||||
raise_exc_info(self.__exc_info)
|
||||
else:
|
||||
return super(TracebackFuture, self).result()
|
||||
def is_future(x):
|
||||
return isinstance(x, FUTURES)
|
||||
|
||||
|
||||
class DummyExecutor(object):
|
||||
|
|
@ -151,6 +209,9 @@ def run_on_executor(fn):
|
|||
|
||||
The decorated method may be called with a ``callback`` keyword
|
||||
argument and returns a future.
|
||||
|
||||
This decorator should be used only on methods of objects with attributes
|
||||
``executor`` and ``io_loop``.
|
||||
"""
|
||||
@functools.wraps(fn)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
|
|
@ -251,10 +312,13 @@ def return_future(f):
|
|||
def chain_future(a, b):
|
||||
"""Chain two futures together so that when one completes, so does the other.
|
||||
|
||||
The result (success or failure) of ``a`` will be copied to ``b``.
|
||||
The result (success or failure) of ``a`` will be copied to ``b``, unless
|
||||
``b`` has already been completed or cancelled by the time ``a`` finishes.
|
||||
"""
|
||||
def copy(future):
|
||||
assert future is a
|
||||
if b.done():
|
||||
return
|
||||
if (isinstance(a, TracebackFuture) and isinstance(b, TracebackFuture)
|
||||
and a.exc_info() is not None):
|
||||
b.set_exc_info(a.exc_info())
|
||||
|
|
|
|||
|
|
@ -51,18 +51,6 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
|
|||
self._fds = {}
|
||||
self._timeout = None
|
||||
|
||||
try:
|
||||
self._socket_action = self._multi.socket_action
|
||||
except AttributeError:
|
||||
# socket_action is found in pycurl since 7.18.2 (it's been
|
||||
# in libcurl longer than that but wasn't accessible to
|
||||
# python).
|
||||
gen_log.warning("socket_action method missing from pycurl; "
|
||||
"falling back to socket_all. Upgrading "
|
||||
"libcurl and pycurl will improve performance")
|
||||
self._socket_action = \
|
||||
lambda fd, action: self._multi.socket_all()
|
||||
|
||||
# libcurl has bugs that sometimes cause it to not report all
|
||||
# relevant file descriptors and timeouts to TIMERFUNCTION/
|
||||
# SOCKETFUNCTION. Mitigate the effects of such bugs by
|
||||
|
|
@ -87,7 +75,6 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
|
|||
for curl in self._curls:
|
||||
curl.close()
|
||||
self._multi.close()
|
||||
self._closed = True
|
||||
super(CurlAsyncHTTPClient, self).close()
|
||||
|
||||
def fetch_impl(self, request, callback):
|
||||
|
|
@ -143,7 +130,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
|
|||
action |= pycurl.CSELECT_OUT
|
||||
while True:
|
||||
try:
|
||||
ret, num_handles = self._socket_action(fd, action)
|
||||
ret, num_handles = self._multi.socket_action(fd, action)
|
||||
except pycurl.error as e:
|
||||
ret = e.args[0]
|
||||
if ret != pycurl.E_CALL_MULTI_PERFORM:
|
||||
|
|
@ -156,7 +143,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
|
|||
self._timeout = None
|
||||
while True:
|
||||
try:
|
||||
ret, num_handles = self._socket_action(
|
||||
ret, num_handles = self._multi.socket_action(
|
||||
pycurl.SOCKET_TIMEOUT, 0)
|
||||
except pycurl.error as e:
|
||||
ret = e.args[0]
|
||||
|
|
@ -224,11 +211,6 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
|
|||
"callback": callback,
|
||||
"curl_start_time": time.time(),
|
||||
}
|
||||
# Disable IPv6 to mitigate the effects of this bug
|
||||
# on curl versions <= 7.21.0
|
||||
# http://sourceforge.net/tracker/?func=detail&aid=3017819&group_id=976&atid=100976
|
||||
if pycurl.version_info()[2] <= 0x71500: # 7.21.0
|
||||
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
|
||||
_curl_setup_request(curl, request, curl.info["buffer"],
|
||||
curl.info["headers"])
|
||||
self._multi.add_handle(curl)
|
||||
|
|
@ -268,6 +250,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
|
|||
info["callback"](HTTPResponse(
|
||||
request=info["request"], code=code, headers=info["headers"],
|
||||
buffer=buffer, effective_url=effective_url, error=error,
|
||||
reason=info['headers'].get("X-Http-Reason", None),
|
||||
request_time=time.time() - info["curl_start_time"],
|
||||
time_info=time_info))
|
||||
except Exception:
|
||||
|
|
@ -318,10 +301,12 @@ def _curl_setup_request(curl, request, buffer, headers):
|
|||
[native_str("%s: %s" % i) for i in request.headers.items()])
|
||||
|
||||
if request.header_callback:
|
||||
curl.setopt(pycurl.HEADERFUNCTION, request.header_callback)
|
||||
curl.setopt(pycurl.HEADERFUNCTION,
|
||||
lambda line: request.header_callback(native_str(line)))
|
||||
else:
|
||||
curl.setopt(pycurl.HEADERFUNCTION,
|
||||
lambda line: _curl_header_callback(headers, line))
|
||||
lambda line: _curl_header_callback(headers,
|
||||
native_str(line)))
|
||||
if request.streaming_callback:
|
||||
write_function = request.streaming_callback
|
||||
else:
|
||||
|
|
@ -347,7 +332,7 @@ def _curl_setup_request(curl, request, buffer, headers):
|
|||
curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
|
||||
if request.network_interface:
|
||||
curl.setopt(pycurl.INTERFACE, request.network_interface)
|
||||
if request.use_gzip:
|
||||
if request.decompress_response:
|
||||
curl.setopt(pycurl.ENCODING, "gzip,deflate")
|
||||
else:
|
||||
curl.setopt(pycurl.ENCODING, "none")
|
||||
|
|
@ -360,6 +345,7 @@ def _curl_setup_request(curl, request, buffer, headers):
|
|||
curl.setopt(pycurl.PROXYUSERPWD, credentials)
|
||||
else:
|
||||
curl.setopt(pycurl.PROXY, '')
|
||||
curl.unsetopt(pycurl.PROXYUSERPWD)
|
||||
if request.validate_cert:
|
||||
curl.setopt(pycurl.SSL_VERIFYPEER, 1)
|
||||
curl.setopt(pycurl.SSL_VERIFYHOST, 2)
|
||||
|
|
@ -380,8 +366,9 @@ def _curl_setup_request(curl, request, buffer, headers):
|
|||
if request.allow_ipv6 is False:
|
||||
# Curl behaves reasonably when DNS resolution gives an ipv6 address
|
||||
# that we can't reach, so allow ipv6 unless the user asks to disable.
|
||||
# (but see version check in _process_queue above)
|
||||
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
|
||||
else:
|
||||
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
|
||||
|
||||
# Set the request method through curl's irritating interface which makes
|
||||
# up names for almost every single method
|
||||
|
|
@ -404,6 +391,11 @@ def _curl_setup_request(curl, request, buffer, headers):
|
|||
|
||||
# Handle curl's cryptic options for every individual HTTP method
|
||||
if request.method in ("POST", "PUT"):
|
||||
if request.body is None:
|
||||
raise AssertionError(
|
||||
'Body must not be empty for "%s" request'
|
||||
% request.method)
|
||||
|
||||
request_buffer = BytesIO(utf8(request.body))
|
||||
curl.setopt(pycurl.READFUNCTION, request_buffer.read)
|
||||
if request.method == "POST":
|
||||
|
|
@ -414,6 +406,9 @@ def _curl_setup_request(curl, request, buffer, headers):
|
|||
curl.setopt(pycurl.POSTFIELDSIZE, len(request.body))
|
||||
else:
|
||||
curl.setopt(pycurl.INFILESIZE, len(request.body))
|
||||
elif request.method == "GET":
|
||||
if request.body is not None:
|
||||
raise AssertionError('Body must be empty for GET request')
|
||||
|
||||
if request.auth_username is not None:
|
||||
userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
|
||||
|
|
@ -457,7 +452,11 @@ def _curl_header_callback(headers, header_line):
|
|||
header_line = header_line.strip()
|
||||
if header_line.startswith("HTTP/"):
|
||||
headers.clear()
|
||||
return
|
||||
try:
|
||||
(__, __, reason) = httputil.parse_response_start_line(header_line)
|
||||
header_line = "X-Http-Reason: %s" % reason
|
||||
except httputil.HTTPInputError:
|
||||
return
|
||||
if not header_line:
|
||||
return
|
||||
headers.parse_line(header_line)
|
||||
|
|
|
|||
|
|
@ -49,12 +49,22 @@ try:
|
|||
except NameError:
|
||||
unichr = chr
|
||||
|
||||
_XHTML_ESCAPE_RE = re.compile('[&<>"]')
|
||||
_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"'}
|
||||
_XHTML_ESCAPE_RE = re.compile('[&<>"\']')
|
||||
_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"',
|
||||
'\'': '''}
|
||||
|
||||
|
||||
def xhtml_escape(value):
|
||||
"""Escapes a string so it is valid within HTML or XML."""
|
||||
"""Escapes a string so it is valid within HTML or XML.
|
||||
|
||||
Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``.
|
||||
When used in attribute values the escaped strings must be enclosed
|
||||
in quotes.
|
||||
|
||||
.. versionchanged:: 3.2
|
||||
|
||||
Added the single quote to the list of escaped characters.
|
||||
"""
|
||||
return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)],
|
||||
to_basestring(value))
|
||||
|
||||
|
|
@ -65,7 +75,7 @@ def xhtml_unescape(value):
|
|||
|
||||
|
||||
# The fact that json_encode wraps json.dumps is an implementation detail.
|
||||
# Please see https://github.com/facebook/tornado/pull/706
|
||||
# Please see https://github.com/tornadoweb/tornado/pull/706
|
||||
# before sending a pull request that adds **kwargs to this function.
|
||||
def json_encode(value):
|
||||
"""JSON-encodes the given Python object."""
|
||||
|
|
@ -188,8 +198,10 @@ def utf8(value):
|
|||
"""
|
||||
if isinstance(value, _UTF8_TYPES):
|
||||
return value
|
||||
assert isinstance(value, unicode_type), \
|
||||
"Expected bytes, unicode, or None; got %r" % type(value)
|
||||
if not isinstance(value, unicode_type):
|
||||
raise TypeError(
|
||||
"Expected bytes, unicode, or None; got %r" % type(value)
|
||||
)
|
||||
return value.encode("utf-8")
|
||||
|
||||
_TO_UNICODE_TYPES = (unicode_type, type(None))
|
||||
|
|
@ -203,8 +215,10 @@ def to_unicode(value):
|
|||
"""
|
||||
if isinstance(value, _TO_UNICODE_TYPES):
|
||||
return value
|
||||
assert isinstance(value, bytes_type), \
|
||||
"Expected bytes, unicode, or None; got %r" % type(value)
|
||||
if not isinstance(value, bytes_type):
|
||||
raise TypeError(
|
||||
"Expected bytes, unicode, or None; got %r" % type(value)
|
||||
)
|
||||
return value.decode("utf-8")
|
||||
|
||||
# to_unicode was previously named _unicode not because it was private,
|
||||
|
|
@ -232,8 +246,10 @@ def to_basestring(value):
|
|||
"""
|
||||
if isinstance(value, _BASESTRING_TYPES):
|
||||
return value
|
||||
assert isinstance(value, bytes_type), \
|
||||
"Expected bytes, unicode, or None; got %r" % type(value)
|
||||
if not isinstance(value, bytes_type):
|
||||
raise TypeError(
|
||||
"Expected bytes, unicode, or None; got %r" % type(value)
|
||||
)
|
||||
return value.decode("utf-8")
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -29,17 +29,8 @@ could be written with ``gen`` as::
|
|||
Most asynchronous functions in Tornado return a `.Future`;
|
||||
yielding this object returns its `~.Future.result`.
|
||||
|
||||
For functions that do not return ``Futures``, `Task` works with any
|
||||
function that takes a ``callback`` keyword argument (most Tornado functions
|
||||
can be used in either style, although the ``Future`` style is preferred
|
||||
since it is both shorter and provides better exception handling)::
|
||||
|
||||
@gen.coroutine
|
||||
def get(self):
|
||||
yield gen.Task(AsyncHTTPClient().fetch, "http://example.com")
|
||||
|
||||
You can also yield a list of ``Futures`` and/or ``Tasks``, which will be
|
||||
started at the same time and run in parallel; a list of results will
|
||||
You can also yield a list or dict of ``Futures``, which will be
|
||||
started at the same time and run in parallel; a list or dict of results will
|
||||
be returned when they are all finished::
|
||||
|
||||
@gen.coroutine
|
||||
|
|
@ -47,31 +38,13 @@ be returned when they are all finished::
|
|||
http_client = AsyncHTTPClient()
|
||||
response1, response2 = yield [http_client.fetch(url1),
|
||||
http_client.fetch(url2)]
|
||||
response_dict = yield dict(response3=http_client.fetch(url3),
|
||||
response4=http_client.fetch(url4))
|
||||
response3 = response_dict['response3']
|
||||
response4 = response_dict['response4']
|
||||
|
||||
For more complicated interfaces, `Task` can be split into two parts:
|
||||
`Callback` and `Wait`::
|
||||
|
||||
class GenAsyncHandler2(RequestHandler):
|
||||
@asynchronous
|
||||
@gen.coroutine
|
||||
def get(self):
|
||||
http_client = AsyncHTTPClient()
|
||||
http_client.fetch("http://example.com",
|
||||
callback=(yield gen.Callback("key"))
|
||||
response = yield gen.Wait("key")
|
||||
do_something_with_response(response)
|
||||
self.render("template.html")
|
||||
|
||||
The ``key`` argument to `Callback` and `Wait` allows for multiple
|
||||
asynchronous operations to be started at different times and proceed
|
||||
in parallel: yield several callbacks with different keys, then wait
|
||||
for them once all the async operations have started.
|
||||
|
||||
The result of a `Wait` or `Task` yield expression depends on how the callback
|
||||
was run. If it was called with no arguments, the result is ``None``. If
|
||||
it was called with one argument, the result is that argument. If it was
|
||||
called with more than one argument or any keyword arguments, the result
|
||||
is an `Arguments` object, which is a named tuple ``(args, kwargs)``.
|
||||
.. versionchanged:: 3.2
|
||||
Dict support added.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
|
|
@ -81,9 +54,9 @@ import itertools
|
|||
import sys
|
||||
import types
|
||||
|
||||
from tornado.concurrent import Future, TracebackFuture
|
||||
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.stack_context import ExceptionStackContext, wrap
|
||||
from tornado import stack_context
|
||||
|
||||
|
||||
class KeyReuseError(Exception):
|
||||
|
|
@ -106,6 +79,10 @@ class ReturnValueIgnoredError(Exception):
|
|||
pass
|
||||
|
||||
|
||||
class TimeoutError(Exception):
|
||||
"""Exception raised by ``with_timeout``."""
|
||||
|
||||
|
||||
def engine(func):
|
||||
"""Callback-oriented decorator for asynchronous generators.
|
||||
|
||||
|
|
@ -123,45 +100,20 @@ def engine(func):
|
|||
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
|
||||
which use ``self.finish()`` in place of a callback argument.
|
||||
"""
|
||||
func = _make_coroutine_wrapper(func, replace_callback=False)
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
runner = None
|
||||
|
||||
def handle_exception(typ, value, tb):
|
||||
# if the function throws an exception before its first "yield"
|
||||
# (or is not a generator at all), the Runner won't exist yet.
|
||||
# However, in that case we haven't reached anything asynchronous
|
||||
# yet, so we can just let the exception propagate.
|
||||
if runner is not None:
|
||||
return runner.handle_exception(typ, value, tb)
|
||||
return False
|
||||
with ExceptionStackContext(handle_exception) as deactivate:
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
except (Return, StopIteration) as e:
|
||||
result = getattr(e, 'value', None)
|
||||
else:
|
||||
if isinstance(result, types.GeneratorType):
|
||||
def final_callback(value):
|
||||
if value is not None:
|
||||
raise ReturnValueIgnoredError(
|
||||
"@gen.engine functions cannot return values: "
|
||||
"%r" % (value,))
|
||||
assert value is None
|
||||
deactivate()
|
||||
runner = Runner(result, final_callback)
|
||||
runner.run()
|
||||
return
|
||||
if result is not None:
|
||||
future = func(*args, **kwargs)
|
||||
def final_callback(future):
|
||||
if future.result() is not None:
|
||||
raise ReturnValueIgnoredError(
|
||||
"@gen.engine functions cannot return values: %r" %
|
||||
(result,))
|
||||
deactivate()
|
||||
# no yield, so we're done
|
||||
(future.result(),))
|
||||
future.add_done_callback(final_callback)
|
||||
return wrapper
|
||||
|
||||
|
||||
def coroutine(func):
|
||||
def coroutine(func, replace_callback=True):
|
||||
"""Decorator for asynchronous generators.
|
||||
|
||||
Any generator that yields objects from this module must be wrapped
|
||||
|
|
@ -185,43 +137,56 @@ def coroutine(func):
|
|||
From the caller's perspective, ``@gen.coroutine`` is similar to
|
||||
the combination of ``@return_future`` and ``@gen.engine``.
|
||||
"""
|
||||
return _make_coroutine_wrapper(func, replace_callback=True)
|
||||
|
||||
|
||||
def _make_coroutine_wrapper(func, replace_callback):
|
||||
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
|
||||
|
||||
The two decorators differ in their treatment of the ``callback``
|
||||
argument, so we cannot simply implement ``@engine`` in terms of
|
||||
``@coroutine``.
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
runner = None
|
||||
future = TracebackFuture()
|
||||
|
||||
if 'callback' in kwargs:
|
||||
if replace_callback and 'callback' in kwargs:
|
||||
callback = kwargs.pop('callback')
|
||||
IOLoop.current().add_future(
|
||||
future, lambda future: callback(future.result()))
|
||||
|
||||
def handle_exception(typ, value, tb):
|
||||
try:
|
||||
if runner is not None and runner.handle_exception(typ, value, tb):
|
||||
return True
|
||||
except Exception:
|
||||
typ, value, tb = sys.exc_info()
|
||||
future.set_exc_info((typ, value, tb))
|
||||
return True
|
||||
with ExceptionStackContext(handle_exception) as deactivate:
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
except (Return, StopIteration) as e:
|
||||
result = getattr(e, 'value', None)
|
||||
except Exception:
|
||||
deactivate()
|
||||
future.set_exc_info(sys.exc_info())
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
except (Return, StopIteration) as e:
|
||||
result = getattr(e, 'value', None)
|
||||
except Exception:
|
||||
future.set_exc_info(sys.exc_info())
|
||||
return future
|
||||
else:
|
||||
if isinstance(result, types.GeneratorType):
|
||||
# Inline the first iteration of Runner.run. This lets us
|
||||
# avoid the cost of creating a Runner when the coroutine
|
||||
# never actually yields, which in turn allows us to
|
||||
# use "optional" coroutines in critical path code without
|
||||
# performance penalty for the synchronous case.
|
||||
try:
|
||||
orig_stack_contexts = stack_context._state.contexts
|
||||
yielded = next(result)
|
||||
if stack_context._state.contexts is not orig_stack_contexts:
|
||||
yielded = TracebackFuture()
|
||||
yielded.set_exception(
|
||||
stack_context.StackContextInconsistentError(
|
||||
'stack_context inconsistency (probably caused '
|
||||
'by yield within a "with StackContext" block)'))
|
||||
except (StopIteration, Return) as e:
|
||||
future.set_result(getattr(e, 'value', None))
|
||||
except Exception:
|
||||
future.set_exc_info(sys.exc_info())
|
||||
else:
|
||||
Runner(result, future, yielded)
|
||||
return future
|
||||
else:
|
||||
if isinstance(result, types.GeneratorType):
|
||||
def final_callback(value):
|
||||
deactivate()
|
||||
future.set_result(value)
|
||||
runner = Runner(result, final_callback)
|
||||
runner.run()
|
||||
return future
|
||||
deactivate()
|
||||
future.set_result(result)
|
||||
future.set_result(result)
|
||||
return future
|
||||
return wrapper
|
||||
|
||||
|
|
@ -254,8 +219,8 @@ class Return(Exception):
|
|||
class YieldPoint(object):
|
||||
"""Base class for objects that may be yielded from the generator.
|
||||
|
||||
Applications do not normally need to use this class, but it may be
|
||||
subclassed to provide additional yielding behavior.
|
||||
.. deprecated:: 4.0
|
||||
Use `Futures <.Future>` instead.
|
||||
"""
|
||||
def start(self, runner):
|
||||
"""Called by the runner after the generator has yielded.
|
||||
|
|
@ -291,6 +256,9 @@ class Callback(YieldPoint):
|
|||
|
||||
The callback may be called with zero or one arguments; if an argument
|
||||
is given it will be returned by `Wait`.
|
||||
|
||||
.. deprecated:: 4.0
|
||||
Use `Futures <.Future>` instead.
|
||||
"""
|
||||
def __init__(self, key):
|
||||
self.key = key
|
||||
|
|
@ -307,7 +275,11 @@ class Callback(YieldPoint):
|
|||
|
||||
|
||||
class Wait(YieldPoint):
|
||||
"""Returns the argument passed to the result of a previous `Callback`."""
|
||||
"""Returns the argument passed to the result of a previous `Callback`.
|
||||
|
||||
.. deprecated:: 4.0
|
||||
Use `Futures <.Future>` instead.
|
||||
"""
|
||||
def __init__(self, key):
|
||||
self.key = key
|
||||
|
||||
|
|
@ -328,6 +300,9 @@ class WaitAll(YieldPoint):
|
|||
a list of results in the same order.
|
||||
|
||||
`WaitAll` is equivalent to yielding a list of `Wait` objects.
|
||||
|
||||
.. deprecated:: 4.0
|
||||
Use `Futures <.Future>` instead.
|
||||
"""
|
||||
def __init__(self, keys):
|
||||
self.keys = keys
|
||||
|
|
@ -342,39 +317,31 @@ class WaitAll(YieldPoint):
|
|||
return [self.runner.pop_result(key) for key in self.keys]
|
||||
|
||||
|
||||
class Task(YieldPoint):
|
||||
"""Runs a single asynchronous operation.
|
||||
def Task(func, *args, **kwargs):
|
||||
"""Adapts a callback-based asynchronous function for use in coroutines.
|
||||
|
||||
Takes a function (and optional additional arguments) and runs it with
|
||||
those arguments plus a ``callback`` keyword argument. The argument passed
|
||||
to the callback is returned as the result of the yield expression.
|
||||
|
||||
A `Task` is equivalent to a `Callback`/`Wait` pair (with a unique
|
||||
key generated automatically)::
|
||||
|
||||
result = yield gen.Task(func, args)
|
||||
|
||||
func(args, callback=(yield gen.Callback(key)))
|
||||
result = yield gen.Wait(key)
|
||||
.. versionchanged:: 4.0
|
||||
``gen.Task`` is now a function that returns a `.Future`, instead of
|
||||
a subclass of `YieldPoint`. It still behaves the same way when
|
||||
yielded.
|
||||
"""
|
||||
def __init__(self, func, *args, **kwargs):
|
||||
assert "callback" not in kwargs
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
self.func = func
|
||||
|
||||
def start(self, runner):
|
||||
self.runner = runner
|
||||
self.key = object()
|
||||
runner.register_callback(self.key)
|
||||
self.kwargs["callback"] = runner.result_callback(self.key)
|
||||
self.func(*self.args, **self.kwargs)
|
||||
|
||||
def is_ready(self):
|
||||
return self.runner.is_ready(self.key)
|
||||
|
||||
def get_result(self):
|
||||
return self.runner.pop_result(self.key)
|
||||
future = Future()
|
||||
def handle_exception(typ, value, tb):
|
||||
if future.done():
|
||||
return False
|
||||
future.set_exc_info((typ, value, tb))
|
||||
return True
|
||||
def set_result(result):
|
||||
if future.done():
|
||||
return
|
||||
future.set_result(result)
|
||||
with stack_context.ExceptionStackContext(handle_exception):
|
||||
func(*args, callback=_argument_adapter(set_result), **kwargs)
|
||||
return future
|
||||
|
||||
|
||||
class YieldFuture(YieldPoint):
|
||||
|
|
@ -383,30 +350,48 @@ class YieldFuture(YieldPoint):
|
|||
self.io_loop = io_loop or IOLoop.current()
|
||||
|
||||
def start(self, runner):
|
||||
self.runner = runner
|
||||
self.key = object()
|
||||
runner.register_callback(self.key)
|
||||
self.io_loop.add_future(self.future, runner.result_callback(self.key))
|
||||
if not self.future.done():
|
||||
self.runner = runner
|
||||
self.key = object()
|
||||
runner.register_callback(self.key)
|
||||
self.io_loop.add_future(self.future, runner.result_callback(self.key))
|
||||
else:
|
||||
self.runner = None
|
||||
self.result = self.future.result()
|
||||
|
||||
def is_ready(self):
|
||||
return self.runner.is_ready(self.key)
|
||||
if self.runner is not None:
|
||||
return self.runner.is_ready(self.key)
|
||||
else:
|
||||
return True
|
||||
|
||||
def get_result(self):
|
||||
return self.runner.pop_result(self.key).result()
|
||||
if self.runner is not None:
|
||||
return self.runner.pop_result(self.key).result()
|
||||
else:
|
||||
return self.result
|
||||
|
||||
|
||||
class Multi(YieldPoint):
|
||||
"""Runs multiple asynchronous operations in parallel.
|
||||
|
||||
Takes a list of ``Tasks`` or other ``YieldPoints`` and returns a list of
|
||||
Takes a list of ``YieldPoints`` or ``Futures`` and returns a list of
|
||||
their responses. It is not necessary to call `Multi` explicitly,
|
||||
since the engine will do so automatically when the generator yields
|
||||
a list of ``YieldPoints``.
|
||||
a list of ``YieldPoints`` or a mixture of ``YieldPoints`` and ``Futures``.
|
||||
|
||||
Instead of a list, the argument may also be a dictionary whose values are
|
||||
Futures, in which case a parallel dictionary is returned mapping the same
|
||||
keys to their results.
|
||||
"""
|
||||
def __init__(self, children):
|
||||
self.keys = None
|
||||
if isinstance(children, dict):
|
||||
self.keys = list(children.keys())
|
||||
children = children.values()
|
||||
self.children = []
|
||||
for i in children:
|
||||
if isinstance(i, Future):
|
||||
if is_future(i):
|
||||
i = YieldFuture(i)
|
||||
self.children.append(i)
|
||||
assert all(isinstance(i, YieldPoint) for i in self.children)
|
||||
|
|
@ -423,21 +408,134 @@ class Multi(YieldPoint):
|
|||
return not self.unfinished_children
|
||||
|
||||
def get_result(self):
|
||||
return [i.get_result() for i in self.children]
|
||||
result = (i.get_result() for i in self.children)
|
||||
if self.keys is not None:
|
||||
return dict(zip(self.keys, result))
|
||||
else:
|
||||
return list(result)
|
||||
|
||||
|
||||
class _NullYieldPoint(YieldPoint):
|
||||
def start(self, runner):
|
||||
pass
|
||||
def multi_future(children):
|
||||
"""Wait for multiple asynchronous futures in parallel.
|
||||
|
||||
def is_ready(self):
|
||||
return True
|
||||
Takes a list of ``Futures`` (but *not* other ``YieldPoints``) and returns
|
||||
a new Future that resolves when all the other Futures are done.
|
||||
If all the ``Futures`` succeeded, the returned Future's result is a list
|
||||
of their results. If any failed, the returned Future raises the exception
|
||||
of the first one to fail.
|
||||
|
||||
def get_result(self):
|
||||
return None
|
||||
Instead of a list, the argument may also be a dictionary whose values are
|
||||
Futures, in which case a parallel dictionary is returned mapping the same
|
||||
keys to their results.
|
||||
|
||||
It is not necessary to call `multi_future` explcitly, since the engine will
|
||||
do so automatically when the generator yields a list of `Futures`.
|
||||
This function is faster than the `Multi` `YieldPoint` because it does not
|
||||
require the creation of a stack context.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
if isinstance(children, dict):
|
||||
keys = list(children.keys())
|
||||
children = children.values()
|
||||
else:
|
||||
keys = None
|
||||
assert all(is_future(i) for i in children)
|
||||
unfinished_children = set(children)
|
||||
|
||||
future = Future()
|
||||
if not children:
|
||||
future.set_result({} if keys is not None else [])
|
||||
def callback(f):
|
||||
unfinished_children.remove(f)
|
||||
if not unfinished_children:
|
||||
try:
|
||||
result_list = [i.result() for i in children]
|
||||
except Exception:
|
||||
future.set_exc_info(sys.exc_info())
|
||||
else:
|
||||
if keys is not None:
|
||||
future.set_result(dict(zip(keys, result_list)))
|
||||
else:
|
||||
future.set_result(result_list)
|
||||
for f in children:
|
||||
f.add_done_callback(callback)
|
||||
return future
|
||||
|
||||
|
||||
_null_yield_point = _NullYieldPoint()
|
||||
def maybe_future(x):
|
||||
"""Converts ``x`` into a `.Future`.
|
||||
|
||||
If ``x`` is already a `.Future`, it is simply returned; otherwise
|
||||
it is wrapped in a new `.Future`. This is suitable for use as
|
||||
``result = yield gen.maybe_future(f())`` when you don't know whether
|
||||
``f()`` returns a `.Future` or not.
|
||||
"""
|
||||
if is_future(x):
|
||||
return x
|
||||
else:
|
||||
fut = Future()
|
||||
fut.set_result(x)
|
||||
return fut
|
||||
|
||||
|
||||
def with_timeout(timeout, future, io_loop=None):
|
||||
"""Wraps a `.Future` in a timeout.
|
||||
|
||||
Raises `TimeoutError` if the input future does not complete before
|
||||
``timeout``, which may be specified in any form allowed by
|
||||
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
|
||||
relative to `.IOLoop.time`)
|
||||
|
||||
Currently only supports Futures, not other `YieldPoint` classes.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
# TODO: allow yield points in addition to futures?
|
||||
# Tricky to do with stack_context semantics.
|
||||
#
|
||||
# It's tempting to optimize this by cancelling the input future on timeout
|
||||
# instead of creating a new one, but A) we can't know if we are the only
|
||||
# one waiting on the input future, so cancelling it might disrupt other
|
||||
# callers and B) concurrent futures can only be cancelled while they are
|
||||
# in the queue, so cancellation cannot reliably bound our waiting time.
|
||||
result = Future()
|
||||
chain_future(future, result)
|
||||
if io_loop is None:
|
||||
io_loop = IOLoop.current()
|
||||
timeout_handle = io_loop.add_timeout(
|
||||
timeout,
|
||||
lambda: result.set_exception(TimeoutError("Timeout")))
|
||||
if isinstance(future, Future):
|
||||
# We know this future will resolve on the IOLoop, so we don't
|
||||
# need the extra thread-safety of IOLoop.add_future (and we also
|
||||
# don't care about StackContext here.
|
||||
future.add_done_callback(
|
||||
lambda future: io_loop.remove_timeout(timeout_handle))
|
||||
else:
|
||||
# concurrent.futures.Futures may resolve on any thread, so we
|
||||
# need to route them back to the IOLoop.
|
||||
io_loop.add_future(
|
||||
future, lambda future: io_loop.remove_timeout(timeout_handle))
|
||||
return result
|
||||
|
||||
|
||||
_null_future = Future()
|
||||
_null_future.set_result(None)
|
||||
|
||||
moment = Future()
|
||||
moment.__doc__ = \
|
||||
"""A special object which may be yielded to allow the IOLoop to run for
|
||||
one iteration.
|
||||
|
||||
This is not needed in normal use but it can be helpful in long-running
|
||||
coroutines that are likely to yield Futures that are ready instantly.
|
||||
|
||||
Usage: ``yield gen.moment``
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
moment.set_result(None)
|
||||
|
||||
|
||||
class Runner(object):
|
||||
|
|
@ -445,35 +543,55 @@ class Runner(object):
|
|||
|
||||
Maintains information about pending callbacks and their results.
|
||||
|
||||
``final_callback`` is run after the generator exits.
|
||||
The results of the generator are stored in ``result_future`` (a
|
||||
`.TracebackFuture`)
|
||||
"""
|
||||
def __init__(self, gen, final_callback):
|
||||
def __init__(self, gen, result_future, first_yielded):
|
||||
self.gen = gen
|
||||
self.final_callback = final_callback
|
||||
self.yield_point = _null_yield_point
|
||||
self.pending_callbacks = set()
|
||||
self.results = {}
|
||||
self.result_future = result_future
|
||||
self.future = _null_future
|
||||
self.yield_point = None
|
||||
self.pending_callbacks = None
|
||||
self.results = None
|
||||
self.running = False
|
||||
self.finished = False
|
||||
self.exc_info = None
|
||||
self.had_exception = False
|
||||
self.io_loop = IOLoop.current()
|
||||
# For efficiency, we do not create a stack context until we
|
||||
# reach a YieldPoint (stack contexts are required for the historical
|
||||
# semantics of YieldPoints, but not for Futures). When we have
|
||||
# done so, this field will be set and must be called at the end
|
||||
# of the coroutine.
|
||||
self.stack_context_deactivate = None
|
||||
if self.handle_yield(first_yielded):
|
||||
self.run()
|
||||
|
||||
def register_callback(self, key):
|
||||
"""Adds ``key`` to the list of callbacks."""
|
||||
if self.pending_callbacks is None:
|
||||
# Lazily initialize the old-style YieldPoint data structures.
|
||||
self.pending_callbacks = set()
|
||||
self.results = {}
|
||||
if key in self.pending_callbacks:
|
||||
raise KeyReuseError("key %r is already pending" % (key,))
|
||||
self.pending_callbacks.add(key)
|
||||
|
||||
def is_ready(self, key):
|
||||
"""Returns true if a result is available for ``key``."""
|
||||
if key not in self.pending_callbacks:
|
||||
if self.pending_callbacks is None or key not in self.pending_callbacks:
|
||||
raise UnknownKeyError("key %r is not pending" % (key,))
|
||||
return key in self.results
|
||||
|
||||
def set_result(self, key, result):
|
||||
"""Sets the result for ``key`` and attempts to resume the generator."""
|
||||
self.results[key] = result
|
||||
self.run()
|
||||
if self.yield_point is not None and self.yield_point.is_ready():
|
||||
try:
|
||||
self.future.set_result(self.yield_point.get_result())
|
||||
except:
|
||||
self.future.set_exc_info(sys.exc_info())
|
||||
self.yield_point = None
|
||||
self.run()
|
||||
|
||||
def pop_result(self, key):
|
||||
"""Returns the result for ``key`` and unregisters it."""
|
||||
|
|
@ -489,25 +607,27 @@ class Runner(object):
|
|||
try:
|
||||
self.running = True
|
||||
while True:
|
||||
if self.exc_info is None:
|
||||
try:
|
||||
if not self.yield_point.is_ready():
|
||||
return
|
||||
next = self.yield_point.get_result()
|
||||
self.yield_point = None
|
||||
except Exception:
|
||||
self.exc_info = sys.exc_info()
|
||||
future = self.future
|
||||
if not future.done():
|
||||
return
|
||||
self.future = None
|
||||
try:
|
||||
if self.exc_info is not None:
|
||||
orig_stack_contexts = stack_context._state.contexts
|
||||
try:
|
||||
value = future.result()
|
||||
except Exception:
|
||||
self.had_exception = True
|
||||
exc_info = self.exc_info
|
||||
self.exc_info = None
|
||||
yielded = self.gen.throw(*exc_info)
|
||||
yielded = self.gen.throw(*sys.exc_info())
|
||||
else:
|
||||
yielded = self.gen.send(next)
|
||||
yielded = self.gen.send(value)
|
||||
if stack_context._state.contexts is not orig_stack_contexts:
|
||||
self.gen.throw(
|
||||
stack_context.StackContextInconsistentError(
|
||||
'stack_context inconsistency (probably caused '
|
||||
'by yield within a "with StackContext" block)'))
|
||||
except (StopIteration, Return) as e:
|
||||
self.finished = True
|
||||
self.yield_point = _null_yield_point
|
||||
self.future = _null_future
|
||||
if self.pending_callbacks and not self.had_exception:
|
||||
# If we ran cleanly without waiting on all callbacks
|
||||
# raise an error (really more of a warning). If we
|
||||
|
|
@ -516,46 +636,105 @@ class Runner(object):
|
|||
raise LeakedCallbackError(
|
||||
"finished without waiting for callbacks %r" %
|
||||
self.pending_callbacks)
|
||||
self.final_callback(getattr(e, 'value', None))
|
||||
self.final_callback = None
|
||||
self.result_future.set_result(getattr(e, 'value', None))
|
||||
self.result_future = None
|
||||
self._deactivate_stack_context()
|
||||
return
|
||||
except Exception:
|
||||
self.finished = True
|
||||
self.yield_point = _null_yield_point
|
||||
raise
|
||||
if isinstance(yielded, list):
|
||||
yielded = Multi(yielded)
|
||||
elif isinstance(yielded, Future):
|
||||
yielded = YieldFuture(yielded)
|
||||
if isinstance(yielded, YieldPoint):
|
||||
self.yield_point = yielded
|
||||
try:
|
||||
self.yield_point.start(self)
|
||||
except Exception:
|
||||
self.exc_info = sys.exc_info()
|
||||
else:
|
||||
self.exc_info = (BadYieldError(
|
||||
"yielded unknown object %r" % (yielded,)),)
|
||||
self.future = _null_future
|
||||
self.result_future.set_exc_info(sys.exc_info())
|
||||
self.result_future = None
|
||||
self._deactivate_stack_context()
|
||||
return
|
||||
if not self.handle_yield(yielded):
|
||||
return
|
||||
finally:
|
||||
self.running = False
|
||||
|
||||
def result_callback(self, key):
|
||||
def inner(*args, **kwargs):
|
||||
if kwargs or len(args) > 1:
|
||||
result = Arguments(args, kwargs)
|
||||
elif args:
|
||||
result = args[0]
|
||||
def handle_yield(self, yielded):
|
||||
if isinstance(yielded, list):
|
||||
if all(is_future(f) for f in yielded):
|
||||
yielded = multi_future(yielded)
|
||||
else:
|
||||
result = None
|
||||
self.set_result(key, result)
|
||||
return wrap(inner)
|
||||
yielded = Multi(yielded)
|
||||
elif isinstance(yielded, dict):
|
||||
if all(is_future(f) for f in yielded.values()):
|
||||
yielded = multi_future(yielded)
|
||||
else:
|
||||
yielded = Multi(yielded)
|
||||
|
||||
if isinstance(yielded, YieldPoint):
|
||||
self.future = TracebackFuture()
|
||||
def start_yield_point():
|
||||
try:
|
||||
yielded.start(self)
|
||||
if yielded.is_ready():
|
||||
self.future.set_result(
|
||||
yielded.get_result())
|
||||
else:
|
||||
self.yield_point = yielded
|
||||
except Exception:
|
||||
self.future = TracebackFuture()
|
||||
self.future.set_exc_info(sys.exc_info())
|
||||
if self.stack_context_deactivate is None:
|
||||
# Start a stack context if this is the first
|
||||
# YieldPoint we've seen.
|
||||
with stack_context.ExceptionStackContext(
|
||||
self.handle_exception) as deactivate:
|
||||
self.stack_context_deactivate = deactivate
|
||||
def cb():
|
||||
start_yield_point()
|
||||
self.run()
|
||||
self.io_loop.add_callback(cb)
|
||||
return False
|
||||
else:
|
||||
start_yield_point()
|
||||
elif is_future(yielded):
|
||||
self.future = yielded
|
||||
if not self.future.done() or self.future is moment:
|
||||
self.io_loop.add_future(
|
||||
self.future, lambda f: self.run())
|
||||
return False
|
||||
else:
|
||||
self.future = TracebackFuture()
|
||||
self.future.set_exception(BadYieldError(
|
||||
"yielded unknown object %r" % (yielded,)))
|
||||
return True
|
||||
|
||||
def result_callback(self, key):
|
||||
return stack_context.wrap(_argument_adapter(
|
||||
functools.partial(self.set_result, key)))
|
||||
|
||||
def handle_exception(self, typ, value, tb):
|
||||
if not self.running and not self.finished:
|
||||
self.exc_info = (typ, value, tb)
|
||||
self.future = TracebackFuture()
|
||||
self.future.set_exc_info((typ, value, tb))
|
||||
self.run()
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _deactivate_stack_context(self):
|
||||
if self.stack_context_deactivate is not None:
|
||||
self.stack_context_deactivate()
|
||||
self.stack_context_deactivate = None
|
||||
|
||||
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
|
||||
|
||||
|
||||
def _argument_adapter(callback):
|
||||
"""Returns a function that when invoked runs ``callback`` with one arg.
|
||||
|
||||
If the function returned by this function is called with exactly
|
||||
one argument, that argument is passed to ``callback``. Otherwise
|
||||
the args tuple and kwargs dict are wrapped in an `Arguments` object.
|
||||
"""
|
||||
def wrapper(*args, **kwargs):
|
||||
if kwargs or len(args) > 1:
|
||||
callback(Arguments(args, kwargs))
|
||||
elif args:
|
||||
callback(args[0])
|
||||
else:
|
||||
callback(None)
|
||||
return wrapper
|
||||
|
|
|
|||
651
Shared/lib/python2.7/site-packages/tornado/http1connection.py
Normal file
651
Shared/lib/python2.7/site-packages/tornado/http1connection.py
Normal file
|
|
@ -0,0 +1,651 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2014 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Client and server implementations of HTTP/1.x.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
from tornado.concurrent import Future
|
||||
from tornado.escape import native_str, utf8
|
||||
from tornado import gen
|
||||
from tornado import httputil
|
||||
from tornado import iostream
|
||||
from tornado.log import gen_log, app_log
|
||||
from tornado import stack_context
|
||||
from tornado.util import GzipDecompressor
|
||||
|
||||
|
||||
class _QuietException(Exception):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
class _ExceptionLoggingContext(object):
|
||||
"""Used with the ``with`` statement when calling delegate methods to
|
||||
log any exceptions with the given logger. Any exceptions caught are
|
||||
converted to _QuietException
|
||||
"""
|
||||
def __init__(self, logger):
|
||||
self.logger = logger
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, typ, value, tb):
|
||||
if value is not None:
|
||||
self.logger.error("Uncaught exception", exc_info=(typ, value, tb))
|
||||
raise _QuietException
|
||||
|
||||
class HTTP1ConnectionParameters(object):
|
||||
"""Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`.
|
||||
"""
|
||||
def __init__(self, no_keep_alive=False, chunk_size=None,
|
||||
max_header_size=None, header_timeout=None, max_body_size=None,
|
||||
body_timeout=None, decompress=False):
|
||||
"""
|
||||
:arg bool no_keep_alive: If true, always close the connection after
|
||||
one request.
|
||||
:arg int chunk_size: how much data to read into memory at once
|
||||
:arg int max_header_size: maximum amount of data for HTTP headers
|
||||
:arg float header_timeout: how long to wait for all headers (seconds)
|
||||
:arg int max_body_size: maximum amount of data for body
|
||||
:arg float body_timeout: how long to wait while reading body (seconds)
|
||||
:arg bool decompress: if true, decode incoming
|
||||
``Content-Encoding: gzip``
|
||||
"""
|
||||
self.no_keep_alive = no_keep_alive
|
||||
self.chunk_size = chunk_size or 65536
|
||||
self.max_header_size = max_header_size or 65536
|
||||
self.header_timeout = header_timeout
|
||||
self.max_body_size = max_body_size
|
||||
self.body_timeout = body_timeout
|
||||
self.decompress = decompress
|
||||
|
||||
|
||||
class HTTP1Connection(httputil.HTTPConnection):
|
||||
"""Implements the HTTP/1.x protocol.
|
||||
|
||||
This class can be on its own for clients, or via `HTTP1ServerConnection`
|
||||
for servers.
|
||||
"""
|
||||
def __init__(self, stream, is_client, params=None, context=None):
|
||||
"""
|
||||
:arg stream: an `.IOStream`
|
||||
:arg bool is_client: client or server
|
||||
:arg params: a `.HTTP1ConnectionParameters` instance or ``None``
|
||||
:arg context: an opaque application-defined object that can be accessed
|
||||
as ``connection.context``.
|
||||
"""
|
||||
self.is_client = is_client
|
||||
self.stream = stream
|
||||
if params is None:
|
||||
params = HTTP1ConnectionParameters()
|
||||
self.params = params
|
||||
self.context = context
|
||||
self.no_keep_alive = params.no_keep_alive
|
||||
# The body limits can be altered by the delegate, so save them
|
||||
# here instead of just referencing self.params later.
|
||||
self._max_body_size = (self.params.max_body_size or
|
||||
self.stream.max_buffer_size)
|
||||
self._body_timeout = self.params.body_timeout
|
||||
# _write_finished is set to True when finish() has been called,
|
||||
# i.e. there will be no more data sent. Data may still be in the
|
||||
# stream's write buffer.
|
||||
self._write_finished = False
|
||||
# True when we have read the entire incoming body.
|
||||
self._read_finished = False
|
||||
# _finish_future resolves when all data has been written and flushed
|
||||
# to the IOStream.
|
||||
self._finish_future = Future()
|
||||
# If true, the connection should be closed after this request
|
||||
# (after the response has been written in the server side,
|
||||
# and after it has been read in the client)
|
||||
self._disconnect_on_finish = False
|
||||
self._clear_callbacks()
|
||||
# Save the start lines after we read or write them; they
|
||||
# affect later processing (e.g. 304 responses and HEAD methods
|
||||
# have content-length but no bodies)
|
||||
self._request_start_line = None
|
||||
self._response_start_line = None
|
||||
self._request_headers = None
|
||||
# True if we are writing output with chunked encoding.
|
||||
self._chunking_output = None
|
||||
# While reading a body with a content-length, this is the
|
||||
# amount left to read.
|
||||
self._expected_content_remaining = None
|
||||
# A Future for our outgoing writes, returned by IOStream.write.
|
||||
self._pending_write = None
|
||||
|
||||
def read_response(self, delegate):
|
||||
"""Read a single HTTP response.
|
||||
|
||||
Typical client-mode usage is to write a request using `write_headers`,
|
||||
`write`, and `finish`, and then call ``read_response``.
|
||||
|
||||
:arg delegate: a `.HTTPMessageDelegate`
|
||||
|
||||
Returns a `.Future` that resolves to None after the full response has
|
||||
been read.
|
||||
"""
|
||||
if self.params.decompress:
|
||||
delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
|
||||
return self._read_message(delegate)
|
||||
|
||||
@gen.coroutine
|
||||
def _read_message(self, delegate):
|
||||
need_delegate_close = False
|
||||
try:
|
||||
header_future = self.stream.read_until_regex(
|
||||
b"\r?\n\r?\n",
|
||||
max_bytes=self.params.max_header_size)
|
||||
if self.params.header_timeout is None:
|
||||
header_data = yield header_future
|
||||
else:
|
||||
try:
|
||||
header_data = yield gen.with_timeout(
|
||||
self.stream.io_loop.time() + self.params.header_timeout,
|
||||
header_future,
|
||||
io_loop=self.stream.io_loop)
|
||||
except gen.TimeoutError:
|
||||
self.close()
|
||||
raise gen.Return(False)
|
||||
start_line, headers = self._parse_headers(header_data)
|
||||
if self.is_client:
|
||||
start_line = httputil.parse_response_start_line(start_line)
|
||||
self._response_start_line = start_line
|
||||
else:
|
||||
start_line = httputil.parse_request_start_line(start_line)
|
||||
self._request_start_line = start_line
|
||||
self._request_headers = headers
|
||||
|
||||
self._disconnect_on_finish = not self._can_keep_alive(
|
||||
start_line, headers)
|
||||
need_delegate_close = True
|
||||
with _ExceptionLoggingContext(app_log):
|
||||
header_future = delegate.headers_received(start_line, headers)
|
||||
if header_future is not None:
|
||||
yield header_future
|
||||
if self.stream is None:
|
||||
# We've been detached.
|
||||
need_delegate_close = False
|
||||
raise gen.Return(False)
|
||||
skip_body = False
|
||||
if self.is_client:
|
||||
if (self._request_start_line is not None and
|
||||
self._request_start_line.method == 'HEAD'):
|
||||
skip_body = True
|
||||
code = start_line.code
|
||||
if code == 304:
|
||||
skip_body = True
|
||||
if code >= 100 and code < 200:
|
||||
# TODO: client delegates will get headers_received twice
|
||||
# in the case of a 100-continue. Document or change?
|
||||
yield self._read_message(delegate)
|
||||
else:
|
||||
if (headers.get("Expect") == "100-continue" and
|
||||
not self._write_finished):
|
||||
self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
|
||||
if not skip_body:
|
||||
body_future = self._read_body(headers, delegate)
|
||||
if body_future is not None:
|
||||
if self._body_timeout is None:
|
||||
yield body_future
|
||||
else:
|
||||
try:
|
||||
yield gen.with_timeout(
|
||||
self.stream.io_loop.time() + self._body_timeout,
|
||||
body_future, self.stream.io_loop)
|
||||
except gen.TimeoutError:
|
||||
gen_log.info("Timeout reading body from %s",
|
||||
self.context)
|
||||
self.stream.close()
|
||||
raise gen.Return(False)
|
||||
self._read_finished = True
|
||||
if not self._write_finished or self.is_client:
|
||||
need_delegate_close = False
|
||||
with _ExceptionLoggingContext(app_log):
|
||||
delegate.finish()
|
||||
# If we're waiting for the application to produce an asynchronous
|
||||
# response, and we're not detached, register a close callback
|
||||
# on the stream (we didn't need one while we were reading)
|
||||
if (not self._finish_future.done() and
|
||||
self.stream is not None and
|
||||
not self.stream.closed()):
|
||||
self.stream.set_close_callback(self._on_connection_close)
|
||||
yield self._finish_future
|
||||
if self.is_client and self._disconnect_on_finish:
|
||||
self.close()
|
||||
if self.stream is None:
|
||||
raise gen.Return(False)
|
||||
except httputil.HTTPInputError as e:
|
||||
gen_log.info("Malformed HTTP message from %s: %s",
|
||||
self.context, e)
|
||||
self.close()
|
||||
raise gen.Return(False)
|
||||
finally:
|
||||
if need_delegate_close:
|
||||
with _ExceptionLoggingContext(app_log):
|
||||
delegate.on_connection_close()
|
||||
self._clear_callbacks()
|
||||
raise gen.Return(True)
|
||||
|
||||
def _clear_callbacks(self):
|
||||
"""Clears the callback attributes.
|
||||
|
||||
This allows the request handler to be garbage collected more
|
||||
quickly in CPython by breaking up reference cycles.
|
||||
"""
|
||||
self._write_callback = None
|
||||
self._write_future = None
|
||||
self._close_callback = None
|
||||
if self.stream is not None:
|
||||
self.stream.set_close_callback(None)
|
||||
|
||||
def set_close_callback(self, callback):
|
||||
"""Sets a callback that will be run when the connection is closed.
|
||||
|
||||
.. deprecated:: 4.0
|
||||
Use `.HTTPMessageDelegate.on_connection_close` instead.
|
||||
"""
|
||||
self._close_callback = stack_context.wrap(callback)
|
||||
|
||||
def _on_connection_close(self):
|
||||
# Note that this callback is only registered on the IOStream
|
||||
# when we have finished reading the request and are waiting for
|
||||
# the application to produce its response.
|
||||
if self._close_callback is not None:
|
||||
callback = self._close_callback
|
||||
self._close_callback = None
|
||||
callback()
|
||||
if not self._finish_future.done():
|
||||
self._finish_future.set_result(None)
|
||||
self._clear_callbacks()
|
||||
|
||||
def close(self):
|
||||
if self.stream is not None:
|
||||
self.stream.close()
|
||||
self._clear_callbacks()
|
||||
if not self._finish_future.done():
|
||||
self._finish_future.set_result(None)
|
||||
|
||||
def detach(self):
|
||||
"""Take control of the underlying stream.
|
||||
|
||||
Returns the underlying `.IOStream` object and stops all further
|
||||
HTTP processing. May only be called during
|
||||
`.HTTPMessageDelegate.headers_received`. Intended for implementing
|
||||
protocols like websockets that tunnel over an HTTP handshake.
|
||||
"""
|
||||
self._clear_callbacks()
|
||||
stream = self.stream
|
||||
self.stream = None
|
||||
return stream
|
||||
|
||||
def set_body_timeout(self, timeout):
|
||||
"""Sets the body timeout for a single request.
|
||||
|
||||
Overrides the value from `.HTTP1ConnectionParameters`.
|
||||
"""
|
||||
self._body_timeout = timeout
|
||||
|
||||
def set_max_body_size(self, max_body_size):
|
||||
"""Sets the body size limit for a single request.
|
||||
|
||||
Overrides the value from `.HTTP1ConnectionParameters`.
|
||||
"""
|
||||
self._max_body_size = max_body_size
|
||||
|
||||
def write_headers(self, start_line, headers, chunk=None, callback=None):
|
||||
"""Implements `.HTTPConnection.write_headers`."""
|
||||
if self.is_client:
|
||||
self._request_start_line = start_line
|
||||
# Client requests with a non-empty body must have either a
|
||||
# Content-Length or a Transfer-Encoding.
|
||||
self._chunking_output = (
|
||||
start_line.method in ('POST', 'PUT', 'PATCH') and
|
||||
'Content-Length' not in headers and
|
||||
'Transfer-Encoding' not in headers)
|
||||
else:
|
||||
self._response_start_line = start_line
|
||||
self._chunking_output = (
|
||||
# TODO: should this use
|
||||
# self._request_start_line.version or
|
||||
# start_line.version?
|
||||
self._request_start_line.version == 'HTTP/1.1' and
|
||||
# 304 responses have no body (not even a zero-length body), and so
|
||||
# should not have either Content-Length or Transfer-Encoding.
|
||||
# headers.
|
||||
start_line.code != 304 and
|
||||
# No need to chunk the output if a Content-Length is specified.
|
||||
'Content-Length' not in headers and
|
||||
# Applications are discouraged from touching Transfer-Encoding,
|
||||
# but if they do, leave it alone.
|
||||
'Transfer-Encoding' not in headers)
|
||||
# If a 1.0 client asked for keep-alive, add the header.
|
||||
if (self._request_start_line.version == 'HTTP/1.0' and
|
||||
(self._request_headers.get('Connection', '').lower()
|
||||
== 'keep-alive')):
|
||||
headers['Connection'] = 'Keep-Alive'
|
||||
if self._chunking_output:
|
||||
headers['Transfer-Encoding'] = 'chunked'
|
||||
if (not self.is_client and
|
||||
(self._request_start_line.method == 'HEAD' or
|
||||
start_line.code == 304)):
|
||||
self._expected_content_remaining = 0
|
||||
elif 'Content-Length' in headers:
|
||||
self._expected_content_remaining = int(headers['Content-Length'])
|
||||
else:
|
||||
self._expected_content_remaining = None
|
||||
lines = [utf8("%s %s %s" % start_line)]
|
||||
lines.extend([utf8(n) + b": " + utf8(v) for n, v in headers.get_all()])
|
||||
for line in lines:
|
||||
if b'\n' in line:
|
||||
raise ValueError('Newline in header: ' + repr(line))
|
||||
future = None
|
||||
if self.stream.closed():
|
||||
future = self._write_future = Future()
|
||||
future.set_exception(iostream.StreamClosedError())
|
||||
else:
|
||||
if callback is not None:
|
||||
self._write_callback = stack_context.wrap(callback)
|
||||
else:
|
||||
future = self._write_future = Future()
|
||||
data = b"\r\n".join(lines) + b"\r\n\r\n"
|
||||
if chunk:
|
||||
data += self._format_chunk(chunk)
|
||||
self._pending_write = self.stream.write(data)
|
||||
self._pending_write.add_done_callback(self._on_write_complete)
|
||||
return future
|
||||
|
||||
def _format_chunk(self, chunk):
|
||||
if self._expected_content_remaining is not None:
|
||||
self._expected_content_remaining -= len(chunk)
|
||||
if self._expected_content_remaining < 0:
|
||||
# Close the stream now to stop further framing errors.
|
||||
self.stream.close()
|
||||
raise httputil.HTTPOutputError(
|
||||
"Tried to write more data than Content-Length")
|
||||
if self._chunking_output and chunk:
|
||||
# Don't write out empty chunks because that means END-OF-STREAM
|
||||
# with chunked encoding
|
||||
return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n"
|
||||
else:
|
||||
return chunk
|
||||
|
||||
def write(self, chunk, callback=None):
|
||||
"""Implements `.HTTPConnection.write`.
|
||||
|
||||
For backwards compatibility is is allowed but deprecated to
|
||||
skip `write_headers` and instead call `write()` with a
|
||||
pre-encoded header block.
|
||||
"""
|
||||
future = None
|
||||
if self.stream.closed():
|
||||
future = self._write_future = Future()
|
||||
self._write_future.set_exception(iostream.StreamClosedError())
|
||||
else:
|
||||
if callback is not None:
|
||||
self._write_callback = stack_context.wrap(callback)
|
||||
else:
|
||||
future = self._write_future = Future()
|
||||
self._pending_write = self.stream.write(self._format_chunk(chunk))
|
||||
self._pending_write.add_done_callback(self._on_write_complete)
|
||||
return future
|
||||
|
||||
def finish(self):
|
||||
"""Implements `.HTTPConnection.finish`."""
|
||||
if (self._expected_content_remaining is not None and
|
||||
self._expected_content_remaining != 0 and
|
||||
not self.stream.closed()):
|
||||
self.stream.close()
|
||||
raise httputil.HTTPOutputError(
|
||||
"Tried to write %d bytes less than Content-Length" %
|
||||
self._expected_content_remaining)
|
||||
if self._chunking_output:
|
||||
if not self.stream.closed():
|
||||
self._pending_write = self.stream.write(b"0\r\n\r\n")
|
||||
self._pending_write.add_done_callback(self._on_write_complete)
|
||||
self._write_finished = True
|
||||
# If the app finished the request while we're still reading,
|
||||
# divert any remaining data away from the delegate and
|
||||
# close the connection when we're done sending our response.
|
||||
# Closing the connection is the only way to avoid reading the
|
||||
# whole input body.
|
||||
if not self._read_finished:
|
||||
self._disconnect_on_finish = True
|
||||
# No more data is coming, so instruct TCP to send any remaining
|
||||
# data immediately instead of waiting for a full packet or ack.
|
||||
self.stream.set_nodelay(True)
|
||||
if self._pending_write is None:
|
||||
self._finish_request(None)
|
||||
else:
|
||||
self._pending_write.add_done_callback(self._finish_request)
|
||||
|
||||
def _on_write_complete(self, future):
|
||||
if self._write_callback is not None:
|
||||
callback = self._write_callback
|
||||
self._write_callback = None
|
||||
self.stream.io_loop.add_callback(callback)
|
||||
if self._write_future is not None:
|
||||
future = self._write_future
|
||||
self._write_future = None
|
||||
future.set_result(None)
|
||||
|
||||
def _can_keep_alive(self, start_line, headers):
|
||||
if self.params.no_keep_alive:
|
||||
return False
|
||||
connection_header = headers.get("Connection")
|
||||
if connection_header is not None:
|
||||
connection_header = connection_header.lower()
|
||||
if start_line.version == "HTTP/1.1":
|
||||
return connection_header != "close"
|
||||
elif ("Content-Length" in headers
|
||||
or start_line.method in ("HEAD", "GET")):
|
||||
return connection_header == "keep-alive"
|
||||
return False
|
||||
|
||||
def _finish_request(self, future):
|
||||
self._clear_callbacks()
|
||||
if not self.is_client and self._disconnect_on_finish:
|
||||
self.close()
|
||||
return
|
||||
# Turn Nagle's algorithm back on, leaving the stream in its
|
||||
# default state for the next request.
|
||||
self.stream.set_nodelay(False)
|
||||
if not self._finish_future.done():
|
||||
self._finish_future.set_result(None)
|
||||
|
||||
def _parse_headers(self, data):
|
||||
data = native_str(data.decode('latin1'))
|
||||
eol = data.find("\r\n")
|
||||
start_line = data[:eol]
|
||||
try:
|
||||
headers = httputil.HTTPHeaders.parse(data[eol:])
|
||||
except ValueError:
|
||||
# probably form split() if there was no ':' in the line
|
||||
raise httputil.HTTPInputError("Malformed HTTP headers: %r" %
|
||||
data[eol:100])
|
||||
return start_line, headers
|
||||
|
||||
def _read_body(self, headers, delegate):
|
||||
content_length = headers.get("Content-Length")
|
||||
if content_length:
|
||||
content_length = int(content_length)
|
||||
if content_length > self._max_body_size:
|
||||
raise httputil.HTTPInputError("Content-Length too long")
|
||||
return self._read_fixed_body(content_length, delegate)
|
||||
if headers.get("Transfer-Encoding") == "chunked":
|
||||
return self._read_chunked_body(delegate)
|
||||
if self.is_client:
|
||||
return self._read_body_until_close(delegate)
|
||||
return None
|
||||
|
||||
@gen.coroutine
|
||||
def _read_fixed_body(self, content_length, delegate):
|
||||
while content_length > 0:
|
||||
body = yield self.stream.read_bytes(
|
||||
min(self.params.chunk_size, content_length), partial=True)
|
||||
content_length -= len(body)
|
||||
if not self._write_finished or self.is_client:
|
||||
with _ExceptionLoggingContext(app_log):
|
||||
yield gen.maybe_future(delegate.data_received(body))
|
||||
|
||||
@gen.coroutine
|
||||
def _read_chunked_body(self, delegate):
|
||||
# TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
|
||||
total_size = 0
|
||||
while True:
|
||||
chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64)
|
||||
chunk_len = int(chunk_len.strip(), 16)
|
||||
if chunk_len == 0:
|
||||
return
|
||||
total_size += chunk_len
|
||||
if total_size > self._max_body_size:
|
||||
raise httputil.HTTPInputError("chunked body too large")
|
||||
bytes_to_read = chunk_len
|
||||
while bytes_to_read:
|
||||
chunk = yield self.stream.read_bytes(
|
||||
min(bytes_to_read, self.params.chunk_size), partial=True)
|
||||
bytes_to_read -= len(chunk)
|
||||
if not self._write_finished or self.is_client:
|
||||
with _ExceptionLoggingContext(app_log):
|
||||
yield gen.maybe_future(delegate.data_received(chunk))
|
||||
# chunk ends with \r\n
|
||||
crlf = yield self.stream.read_bytes(2)
|
||||
assert crlf == b"\r\n"
|
||||
|
||||
@gen.coroutine
|
||||
def _read_body_until_close(self, delegate):
|
||||
body = yield self.stream.read_until_close()
|
||||
if not self._write_finished or self.is_client:
|
||||
with _ExceptionLoggingContext(app_log):
|
||||
delegate.data_received(body)
|
||||
|
||||
|
||||
class _GzipMessageDelegate(httputil.HTTPMessageDelegate):
|
||||
"""Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``.
|
||||
"""
|
||||
def __init__(self, delegate, chunk_size):
|
||||
self._delegate = delegate
|
||||
self._chunk_size = chunk_size
|
||||
self._decompressor = None
|
||||
|
||||
def headers_received(self, start_line, headers):
|
||||
if headers.get("Content-Encoding") == "gzip":
|
||||
self._decompressor = GzipDecompressor()
|
||||
# Downstream delegates will only see uncompressed data,
|
||||
# so rename the content-encoding header.
|
||||
# (but note that curl_httpclient doesn't do this).
|
||||
headers.add("X-Consumed-Content-Encoding",
|
||||
headers["Content-Encoding"])
|
||||
del headers["Content-Encoding"]
|
||||
return self._delegate.headers_received(start_line, headers)
|
||||
|
||||
@gen.coroutine
|
||||
def data_received(self, chunk):
|
||||
if self._decompressor:
|
||||
compressed_data = chunk
|
||||
while compressed_data:
|
||||
decompressed = self._decompressor.decompress(
|
||||
compressed_data, self._chunk_size)
|
||||
if decompressed:
|
||||
yield gen.maybe_future(
|
||||
self._delegate.data_received(decompressed))
|
||||
compressed_data = self._decompressor.unconsumed_tail
|
||||
else:
|
||||
yield gen.maybe_future(self._delegate.data_received(chunk))
|
||||
|
||||
def finish(self):
|
||||
if self._decompressor is not None:
|
||||
tail = self._decompressor.flush()
|
||||
if tail:
|
||||
# I believe the tail will always be empty (i.e.
|
||||
# decompress will return all it can). The purpose
|
||||
# of the flush call is to detect errors such
|
||||
# as truncated input. But in case it ever returns
|
||||
# anything, treat it as an extra chunk
|
||||
self._delegate.data_received(tail)
|
||||
return self._delegate.finish()
|
||||
|
||||
|
||||
class HTTP1ServerConnection(object):
|
||||
"""An HTTP/1.x server."""
|
||||
def __init__(self, stream, params=None, context=None):
|
||||
"""
|
||||
:arg stream: an `.IOStream`
|
||||
:arg params: a `.HTTP1ConnectionParameters` or None
|
||||
:arg context: an opaque application-defined object that is accessible
|
||||
as ``connection.context``
|
||||
"""
|
||||
self.stream = stream
|
||||
if params is None:
|
||||
params = HTTP1ConnectionParameters()
|
||||
self.params = params
|
||||
self.context = context
|
||||
self._serving_future = None
|
||||
|
||||
@gen.coroutine
|
||||
def close(self):
|
||||
"""Closes the connection.
|
||||
|
||||
Returns a `.Future` that resolves after the serving loop has exited.
|
||||
"""
|
||||
self.stream.close()
|
||||
# Block until the serving loop is done, but ignore any exceptions
|
||||
# (start_serving is already responsible for logging them).
|
||||
try:
|
||||
yield self._serving_future
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def start_serving(self, delegate):
|
||||
"""Starts serving requests on this connection.
|
||||
|
||||
:arg delegate: a `.HTTPServerConnectionDelegate`
|
||||
"""
|
||||
assert isinstance(delegate, httputil.HTTPServerConnectionDelegate)
|
||||
self._serving_future = self._server_request_loop(delegate)
|
||||
# Register the future on the IOLoop so its errors get logged.
|
||||
self.stream.io_loop.add_future(self._serving_future,
|
||||
lambda f: f.result())
|
||||
|
||||
@gen.coroutine
|
||||
def _server_request_loop(self, delegate):
|
||||
try:
|
||||
while True:
|
||||
conn = HTTP1Connection(self.stream, False,
|
||||
self.params, self.context)
|
||||
request_delegate = delegate.start_request(self, conn)
|
||||
try:
|
||||
ret = yield conn.read_response(request_delegate)
|
||||
except (iostream.StreamClosedError,
|
||||
iostream.UnsatisfiableReadError):
|
||||
return
|
||||
except _QuietException:
|
||||
# This exception was already logged.
|
||||
conn.close()
|
||||
return
|
||||
except Exception:
|
||||
gen_log.error("Uncaught exception", exc_info=True)
|
||||
conn.close()
|
||||
return
|
||||
if not ret:
|
||||
return
|
||||
yield gen.moment
|
||||
finally:
|
||||
delegate.on_close(self)
|
||||
|
|
@ -22,9 +22,20 @@ to switch to ``curl_httpclient`` for reasons such as the following:
|
|||
|
||||
* ``curl_httpclient`` was the default prior to Tornado 2.0.
|
||||
|
||||
Note that if you are using ``curl_httpclient``, it is highly recommended that
|
||||
you use a recent version of ``libcurl`` and ``pycurl``. Currently the minimum
|
||||
supported version is 7.18.2, and the recommended version is 7.21.1 or newer.
|
||||
Note that if you are using ``curl_httpclient``, it is highly
|
||||
recommended that you use a recent version of ``libcurl`` and
|
||||
``pycurl``. Currently the minimum supported version of libcurl is
|
||||
7.21.1, and the minimum version of pycurl is 7.18.2. It is highly
|
||||
recommended that your ``libcurl`` installation is built with
|
||||
asynchronous DNS resolver (threaded or c-ares), otherwise you may
|
||||
encounter various problems with request timeouts (for more
|
||||
information, see
|
||||
http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS
|
||||
and comments in curl_httpclient.py).
|
||||
|
||||
To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup::
|
||||
|
||||
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
|
@ -33,8 +44,8 @@ import functools
|
|||
import time
|
||||
import weakref
|
||||
|
||||
from tornado.concurrent import Future
|
||||
from tornado.escape import utf8
|
||||
from tornado.concurrent import TracebackFuture
|
||||
from tornado.escape import utf8, native_str
|
||||
from tornado import httputil, stack_context
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.util import Configurable
|
||||
|
|
@ -105,10 +116,21 @@ class AsyncHTTPClient(Configurable):
|
|||
actually creates an instance of an implementation-specific
|
||||
subclass, and instances are reused as a kind of pseudo-singleton
|
||||
(one per `.IOLoop`). The keyword argument ``force_instance=True``
|
||||
can be used to suppress this singleton behavior. Constructor
|
||||
arguments other than ``io_loop`` and ``force_instance`` are
|
||||
deprecated. The implementation subclass as well as arguments to
|
||||
its constructor can be set with the static method `configure()`
|
||||
can be used to suppress this singleton behavior. Unless
|
||||
``force_instance=True`` is used, no arguments other than
|
||||
``io_loop`` should be passed to the `AsyncHTTPClient` constructor.
|
||||
The implementation subclass as well as arguments to its
|
||||
constructor can be set with the static method `configure()`
|
||||
|
||||
All `AsyncHTTPClient` implementations support a ``defaults``
|
||||
keyword argument, which can be used to set default values for
|
||||
`HTTPRequest` attributes. For example::
|
||||
|
||||
AsyncHTTPClient.configure(
|
||||
None, defaults=dict(user_agent="MyUserAgent"))
|
||||
# or with force_instance:
|
||||
client = AsyncHTTPClient(force_instance=True,
|
||||
defaults=dict(user_agent="MyUserAgent"))
|
||||
"""
|
||||
@classmethod
|
||||
def configurable_base(cls):
|
||||
|
|
@ -128,12 +150,21 @@ class AsyncHTTPClient(Configurable):
|
|||
|
||||
def __new__(cls, io_loop=None, force_instance=False, **kwargs):
|
||||
io_loop = io_loop or IOLoop.current()
|
||||
if io_loop in cls._async_clients() and not force_instance:
|
||||
return cls._async_clients()[io_loop]
|
||||
if force_instance:
|
||||
instance_cache = None
|
||||
else:
|
||||
instance_cache = cls._async_clients()
|
||||
if instance_cache is not None and io_loop in instance_cache:
|
||||
return instance_cache[io_loop]
|
||||
instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop,
|
||||
**kwargs)
|
||||
if not force_instance:
|
||||
cls._async_clients()[io_loop] = instance
|
||||
# Make sure the instance knows which cache to remove itself from.
|
||||
# It can't simply call _async_clients() because we may be in
|
||||
# __new__(AsyncHTTPClient) but instance.__class__ may be
|
||||
# SimpleAsyncHTTPClient.
|
||||
instance._instance_cache = instance_cache
|
||||
if instance_cache is not None:
|
||||
instance_cache[instance.io_loop] = instance
|
||||
return instance
|
||||
|
||||
def initialize(self, io_loop, defaults=None):
|
||||
|
|
@ -141,15 +172,28 @@ class AsyncHTTPClient(Configurable):
|
|||
self.defaults = dict(HTTPRequest._DEFAULTS)
|
||||
if defaults is not None:
|
||||
self.defaults.update(defaults)
|
||||
self._closed = False
|
||||
|
||||
def close(self):
|
||||
"""Destroys this HTTP client, freeing any file descriptors used.
|
||||
Not needed in normal use, but may be helpful in unittests that
|
||||
create and destroy http clients. No other methods may be called
|
||||
on the `AsyncHTTPClient` after ``close()``.
|
||||
|
||||
This method is **not needed in normal use** due to the way
|
||||
that `AsyncHTTPClient` objects are transparently reused.
|
||||
``close()`` is generally only necessary when either the
|
||||
`.IOLoop` is also being closed, or the ``force_instance=True``
|
||||
argument was used when creating the `AsyncHTTPClient`.
|
||||
|
||||
No other methods may be called on the `AsyncHTTPClient` after
|
||||
``close()``.
|
||||
|
||||
"""
|
||||
if self._async_clients().get(self.io_loop) is self:
|
||||
del self._async_clients()[self.io_loop]
|
||||
if self._closed:
|
||||
return
|
||||
self._closed = True
|
||||
if self._instance_cache is not None:
|
||||
if self._instance_cache.get(self.io_loop) is not self:
|
||||
raise RuntimeError("inconsistent AsyncHTTPClient cache")
|
||||
del self._instance_cache[self.io_loop]
|
||||
|
||||
def fetch(self, request, callback=None, **kwargs):
|
||||
"""Executes a request, asynchronously returning an `HTTPResponse`.
|
||||
|
|
@ -159,7 +203,7 @@ class AsyncHTTPClient(Configurable):
|
|||
kwargs: ``HTTPRequest(request, **kwargs)``
|
||||
|
||||
This method returns a `.Future` whose result is an
|
||||
`HTTPResponse`. The ``Future`` wil raise an `HTTPError` if
|
||||
`HTTPResponse`. The ``Future`` will raise an `HTTPError` if
|
||||
the request returned a non-200 response code.
|
||||
|
||||
If a ``callback`` is given, it will be invoked with the `HTTPResponse`.
|
||||
|
|
@ -167,6 +211,8 @@ class AsyncHTTPClient(Configurable):
|
|||
Instead, you must check the response's ``error`` attribute or
|
||||
call its `~HTTPResponse.rethrow` method.
|
||||
"""
|
||||
if self._closed:
|
||||
raise RuntimeError("fetch() called on closed AsyncHTTPClient")
|
||||
if not isinstance(request, HTTPRequest):
|
||||
request = HTTPRequest(url=request, **kwargs)
|
||||
# We may modify this (to add Host, Accept-Encoding, etc),
|
||||
|
|
@ -174,7 +220,7 @@ class AsyncHTTPClient(Configurable):
|
|||
# where normal dicts get converted to HTTPHeaders objects.
|
||||
request.headers = httputil.HTTPHeaders(request.headers)
|
||||
request = _RequestProxy(request, self.defaults)
|
||||
future = Future()
|
||||
future = TracebackFuture()
|
||||
if callback is not None:
|
||||
callback = stack_context.wrap(callback)
|
||||
|
||||
|
|
@ -236,7 +282,7 @@ class HTTPRequest(object):
|
|||
request_timeout=20.0,
|
||||
follow_redirects=True,
|
||||
max_redirects=5,
|
||||
use_gzip=True,
|
||||
decompress_response=True,
|
||||
proxy_password='',
|
||||
allow_nonstandard_methods=False,
|
||||
validate_cert=True)
|
||||
|
|
@ -252,14 +298,27 @@ class HTTPRequest(object):
|
|||
proxy_password=None, allow_nonstandard_methods=None,
|
||||
validate_cert=None, ca_certs=None,
|
||||
allow_ipv6=None,
|
||||
client_key=None, client_cert=None):
|
||||
client_key=None, client_cert=None, body_producer=None,
|
||||
expect_100_continue=False, decompress_response=None):
|
||||
r"""All parameters except ``url`` are optional.
|
||||
|
||||
:arg string url: URL to fetch
|
||||
:arg string method: HTTP method, e.g. "GET" or "POST"
|
||||
:arg headers: Additional HTTP headers to pass on the request
|
||||
:arg body: HTTP body to pass on the request
|
||||
:type headers: `~tornado.httputil.HTTPHeaders` or `dict`
|
||||
:arg body: HTTP request body as a string (byte or unicode; if unicode
|
||||
the utf-8 encoding will be used)
|
||||
:arg body_producer: Callable used for lazy/asynchronous request bodies.
|
||||
It is called with one argument, a ``write`` function, and should
|
||||
return a `.Future`. It should call the write function with new
|
||||
data as it becomes available. The write function returns a
|
||||
`.Future` which can be used for flow control.
|
||||
Only one of ``body`` and ``body_producer`` may
|
||||
be specified. ``body_producer`` is not supported on
|
||||
``curl_httpclient``. When using ``body_producer`` it is recommended
|
||||
to pass a ``Content-Length`` in the headers as otherwise chunked
|
||||
encoding will be used, and many servers do not support chunked
|
||||
encoding on requests. New in Tornado 4.0
|
||||
:arg string auth_username: Username for HTTP authentication
|
||||
:arg string auth_password: Password for HTTP authentication
|
||||
:arg string auth_mode: Authentication mode; default is "basic".
|
||||
|
|
@ -274,8 +333,13 @@ class HTTPRequest(object):
|
|||
or return the 3xx response?
|
||||
:arg int max_redirects: Limit for ``follow_redirects``
|
||||
:arg string user_agent: String to send as ``User-Agent`` header
|
||||
:arg bool use_gzip: Request gzip encoding from the server
|
||||
:arg string network_interface: Network interface to use for request
|
||||
:arg bool decompress_response: Request a compressed response from
|
||||
the server and decompress it after downloading. Default is True.
|
||||
New in Tornado 4.0.
|
||||
:arg bool use_gzip: Deprecated alias for ``decompress_response``
|
||||
since Tornado 4.0.
|
||||
:arg string network_interface: Network interface to use for request.
|
||||
``curl_httpclient`` only; see note below.
|
||||
:arg callable streaming_callback: If set, ``streaming_callback`` will
|
||||
be run with each chunk of data as it is received, and
|
||||
``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
|
||||
|
|
@ -303,22 +367,42 @@ class HTTPRequest(object):
|
|||
:arg bool validate_cert: For HTTPS requests, validate the server's
|
||||
certificate?
|
||||
:arg string ca_certs: filename of CA certificates in PEM format,
|
||||
or None to use defaults. Note that in ``curl_httpclient``, if
|
||||
any request uses a custom ``ca_certs`` file, they all must (they
|
||||
don't have to all use the same ``ca_certs``, but it's not possible
|
||||
to mix requests with ``ca_certs`` and requests that use the defaults.
|
||||
or None to use defaults. See note below when used with
|
||||
``curl_httpclient``.
|
||||
:arg bool allow_ipv6: Use IPv6 when available? Default is false in
|
||||
``simple_httpclient`` and true in ``curl_httpclient``
|
||||
:arg string client_key: Filename for client SSL key, if any
|
||||
:arg string client_cert: Filename for client SSL certificate, if any
|
||||
:arg string client_key: Filename for client SSL key, if any. See
|
||||
note below when used with ``curl_httpclient``.
|
||||
:arg string client_cert: Filename for client SSL certificate, if any.
|
||||
See note below when used with ``curl_httpclient``.
|
||||
:arg bool expect_100_continue: If true, send the
|
||||
``Expect: 100-continue`` header and wait for a continue response
|
||||
before sending the request body. Only supported with
|
||||
simple_httpclient.
|
||||
|
||||
.. note::
|
||||
|
||||
When using ``curl_httpclient`` certain options may be
|
||||
inherited by subsequent fetches because ``pycurl`` does
|
||||
not allow them to be cleanly reset. This applies to the
|
||||
``ca_certs``, ``client_key``, ``client_cert``, and
|
||||
``network_interface`` arguments. If you use these
|
||||
options, you should pass them on every request (you don't
|
||||
have to always use the same values, but it's not possible
|
||||
to mix requests that specify these options with ones that
|
||||
use the defaults).
|
||||
|
||||
.. versionadded:: 3.1
|
||||
The ``auth_mode`` argument.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
The ``body_producer`` and ``expect_100_continue`` arguments.
|
||||
"""
|
||||
if headers is None:
|
||||
headers = httputil.HTTPHeaders()
|
||||
# Note that some of these attributes go through property setters
|
||||
# defined below.
|
||||
self.headers = headers
|
||||
if if_modified_since:
|
||||
headers["If-Modified-Since"] = httputil.format_timestamp(
|
||||
self.headers["If-Modified-Since"] = httputil.format_timestamp(
|
||||
if_modified_since)
|
||||
self.proxy_host = proxy_host
|
||||
self.proxy_port = proxy_port
|
||||
|
|
@ -326,8 +410,8 @@ class HTTPRequest(object):
|
|||
self.proxy_password = proxy_password
|
||||
self.url = url
|
||||
self.method = method
|
||||
self.headers = headers
|
||||
self.body = utf8(body)
|
||||
self.body = body
|
||||
self.body_producer = body_producer
|
||||
self.auth_username = auth_username
|
||||
self.auth_password = auth_password
|
||||
self.auth_mode = auth_mode
|
||||
|
|
@ -336,19 +420,74 @@ class HTTPRequest(object):
|
|||
self.follow_redirects = follow_redirects
|
||||
self.max_redirects = max_redirects
|
||||
self.user_agent = user_agent
|
||||
self.use_gzip = use_gzip
|
||||
if decompress_response is not None:
|
||||
self.decompress_response = decompress_response
|
||||
else:
|
||||
self.decompress_response = use_gzip
|
||||
self.network_interface = network_interface
|
||||
self.streaming_callback = stack_context.wrap(streaming_callback)
|
||||
self.header_callback = stack_context.wrap(header_callback)
|
||||
self.prepare_curl_callback = stack_context.wrap(prepare_curl_callback)
|
||||
self.streaming_callback = streaming_callback
|
||||
self.header_callback = header_callback
|
||||
self.prepare_curl_callback = prepare_curl_callback
|
||||
self.allow_nonstandard_methods = allow_nonstandard_methods
|
||||
self.validate_cert = validate_cert
|
||||
self.ca_certs = ca_certs
|
||||
self.allow_ipv6 = allow_ipv6
|
||||
self.client_key = client_key
|
||||
self.client_cert = client_cert
|
||||
self.expect_100_continue = expect_100_continue
|
||||
self.start_time = time.time()
|
||||
|
||||
@property
|
||||
def headers(self):
|
||||
return self._headers
|
||||
|
||||
@headers.setter
|
||||
def headers(self, value):
|
||||
if value is None:
|
||||
self._headers = httputil.HTTPHeaders()
|
||||
else:
|
||||
self._headers = value
|
||||
|
||||
@property
|
||||
def body(self):
|
||||
return self._body
|
||||
|
||||
@body.setter
|
||||
def body(self, value):
|
||||
self._body = utf8(value)
|
||||
|
||||
@property
|
||||
def body_producer(self):
|
||||
return self._body_producer
|
||||
|
||||
@body_producer.setter
|
||||
def body_producer(self, value):
|
||||
self._body_producer = stack_context.wrap(value)
|
||||
|
||||
@property
|
||||
def streaming_callback(self):
|
||||
return self._streaming_callback
|
||||
|
||||
@streaming_callback.setter
|
||||
def streaming_callback(self, value):
|
||||
self._streaming_callback = stack_context.wrap(value)
|
||||
|
||||
@property
|
||||
def header_callback(self):
|
||||
return self._header_callback
|
||||
|
||||
@header_callback.setter
|
||||
def header_callback(self, value):
|
||||
self._header_callback = stack_context.wrap(value)
|
||||
|
||||
@property
|
||||
def prepare_curl_callback(self):
|
||||
return self._prepare_curl_callback
|
||||
|
||||
@prepare_curl_callback.setter
|
||||
def prepare_curl_callback(self, value):
|
||||
self._prepare_curl_callback = stack_context.wrap(value)
|
||||
|
||||
|
||||
class HTTPResponse(object):
|
||||
"""HTTP Response object.
|
||||
|
|
@ -360,11 +499,12 @@ class HTTPResponse(object):
|
|||
* code: numeric HTTP status code, e.g. 200 or 404
|
||||
|
||||
* reason: human-readable reason phrase describing the status code
|
||||
(with curl_httpclient, this is a default value rather than the
|
||||
server's actual response)
|
||||
|
||||
* headers: `tornado.httputil.HTTPHeaders` object
|
||||
|
||||
* effective_url: final location of the resource after following any
|
||||
redirects
|
||||
|
||||
* buffer: ``cStringIO`` object for response body
|
||||
|
||||
* body: response body as string (created on demand from ``self.buffer``)
|
||||
|
|
@ -400,7 +540,8 @@ class HTTPResponse(object):
|
|||
self.effective_url = effective_url
|
||||
if error is None:
|
||||
if self.code < 200 or self.code >= 300:
|
||||
self.error = HTTPError(self.code, response=self)
|
||||
self.error = HTTPError(self.code, message=self.reason,
|
||||
response=self)
|
||||
else:
|
||||
self.error = None
|
||||
else:
|
||||
|
|
@ -490,7 +631,7 @@ def main():
|
|||
if options.print_headers:
|
||||
print(response.headers)
|
||||
if options.print_body:
|
||||
print(response.body)
|
||||
print(native_str(response.body))
|
||||
client.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
|||
|
|
@ -20,69 +20,55 @@ Typical applications have little direct interaction with the `HTTPServer`
|
|||
class except to start a server at the beginning of the process
|
||||
(and even that is often done indirectly via `tornado.web.Application.listen`).
|
||||
|
||||
This module also defines the `HTTPRequest` class which is exposed via
|
||||
`tornado.web.RequestHandler.request`.
|
||||
.. versionchanged:: 4.0
|
||||
|
||||
The ``HTTPRequest`` class that used to live in this module has been moved
|
||||
to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
import socket
|
||||
import ssl
|
||||
import time
|
||||
|
||||
from tornado.escape import native_str, parse_qs_bytes
|
||||
from tornado.escape import native_str
|
||||
from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters
|
||||
from tornado import gen
|
||||
from tornado import httputil
|
||||
from tornado import iostream
|
||||
from tornado.log import gen_log
|
||||
from tornado import netutil
|
||||
from tornado.tcpserver import TCPServer
|
||||
from tornado import stack_context
|
||||
from tornado.util import bytes_type
|
||||
|
||||
try:
|
||||
import Cookie # py2
|
||||
except ImportError:
|
||||
import http.cookies as Cookie # py3
|
||||
|
||||
|
||||
class HTTPServer(TCPServer):
|
||||
class HTTPServer(TCPServer, httputil.HTTPServerConnectionDelegate):
|
||||
r"""A non-blocking, single-threaded HTTP server.
|
||||
|
||||
A server is defined by a request callback that takes an HTTPRequest
|
||||
instance as an argument and writes a valid HTTP response with
|
||||
`HTTPRequest.write`. `HTTPRequest.finish` finishes the request (but does
|
||||
not necessarily close the connection in the case of HTTP/1.1 keep-alive
|
||||
requests). A simple example server that echoes back the URI you
|
||||
requested::
|
||||
A server is defined by either a request callback that takes a
|
||||
`.HTTPServerRequest` as an argument or a `.HTTPServerConnectionDelegate`
|
||||
instance.
|
||||
|
||||
A simple example server that echoes back the URI you requested::
|
||||
|
||||
import tornado.httpserver
|
||||
import tornado.ioloop
|
||||
|
||||
def handle_request(request):
|
||||
message = "You requested %s\n" % request.uri
|
||||
request.write("HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s" % (
|
||||
len(message), message))
|
||||
request.finish()
|
||||
request.connection.write_headers(
|
||||
httputil.ResponseStartLine('HTTP/1.1', 200, 'OK'),
|
||||
{"Content-Length": str(len(message))})
|
||||
request.connection.write(message)
|
||||
request.connection.finish()
|
||||
|
||||
http_server = tornado.httpserver.HTTPServer(handle_request)
|
||||
http_server.listen(8888)
|
||||
tornado.ioloop.IOLoop.instance().start()
|
||||
|
||||
`HTTPServer` is a very basic connection handler. It parses the request
|
||||
headers and body, but the request callback is responsible for producing
|
||||
the response exactly as it will appear on the wire. This affords
|
||||
maximum flexibility for applications to implement whatever parts
|
||||
of HTTP responses are required.
|
||||
Applications should use the methods of `.HTTPConnection` to write
|
||||
their response.
|
||||
|
||||
`HTTPServer` supports keep-alive connections by default
|
||||
(automatically for HTTP/1.1, or for HTTP/1.0 when the client
|
||||
requests ``Connection: keep-alive``). This means that the request
|
||||
callback must generate a properly-framed response, using either
|
||||
the ``Content-Length`` header or ``Transfer-Encoding: chunked``.
|
||||
Applications that are unable to frame their responses properly
|
||||
should instead return a ``Connection: close`` header in each
|
||||
response and pass ``no_keep_alive=True`` to the `HTTPServer`
|
||||
constructor.
|
||||
requests ``Connection: keep-alive``).
|
||||
|
||||
If ``xheaders`` is ``True``, we support the
|
||||
``X-Real-Ip``/``X-Forwarded-For`` and
|
||||
|
|
@ -142,388 +128,170 @@ class HTTPServer(TCPServer):
|
|||
servers if you want to create your listening sockets in some
|
||||
way other than `tornado.netutil.bind_sockets`.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
Added ``decompress_request``, ``chunk_size``, ``max_header_size``,
|
||||
``idle_connection_timeout``, ``body_timeout``, ``max_body_size``
|
||||
arguments. Added support for `.HTTPServerConnectionDelegate`
|
||||
instances as ``request_callback``.
|
||||
"""
|
||||
def __init__(self, request_callback, no_keep_alive=False, io_loop=None,
|
||||
xheaders=False, ssl_options=None, protocol=None, **kwargs):
|
||||
xheaders=False, ssl_options=None, protocol=None,
|
||||
decompress_request=False,
|
||||
chunk_size=None, max_header_size=None,
|
||||
idle_connection_timeout=None, body_timeout=None,
|
||||
max_body_size=None, max_buffer_size=None):
|
||||
self.request_callback = request_callback
|
||||
self.no_keep_alive = no_keep_alive
|
||||
self.xheaders = xheaders
|
||||
self.protocol = protocol
|
||||
self.conn_params = HTTP1ConnectionParameters(
|
||||
decompress=decompress_request,
|
||||
chunk_size=chunk_size,
|
||||
max_header_size=max_header_size,
|
||||
header_timeout=idle_connection_timeout or 3600,
|
||||
max_body_size=max_body_size,
|
||||
body_timeout=body_timeout)
|
||||
TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options,
|
||||
**kwargs)
|
||||
max_buffer_size=max_buffer_size,
|
||||
read_chunk_size=chunk_size)
|
||||
self._connections = set()
|
||||
|
||||
@gen.coroutine
|
||||
def close_all_connections(self):
|
||||
while self._connections:
|
||||
# Peek at an arbitrary element of the set
|
||||
conn = next(iter(self._connections))
|
||||
yield conn.close()
|
||||
|
||||
def handle_stream(self, stream, address):
|
||||
HTTPConnection(stream, address, self.request_callback,
|
||||
self.no_keep_alive, self.xheaders, self.protocol)
|
||||
context = _HTTPRequestContext(stream, address,
|
||||
self.protocol)
|
||||
conn = HTTP1ServerConnection(
|
||||
stream, self.conn_params, context)
|
||||
self._connections.add(conn)
|
||||
conn.start_serving(self)
|
||||
|
||||
def start_request(self, server_conn, request_conn):
|
||||
return _ServerRequestAdapter(self, request_conn)
|
||||
|
||||
def on_close(self, server_conn):
|
||||
self._connections.remove(server_conn)
|
||||
|
||||
|
||||
class _BadRequestException(Exception):
|
||||
"""Exception class for malformed HTTP requests."""
|
||||
pass
|
||||
|
||||
|
||||
class HTTPConnection(object):
|
||||
"""Handles a connection to an HTTP client, executing HTTP requests.
|
||||
|
||||
We parse HTTP headers and bodies, and execute the request callback
|
||||
until the HTTP conection is closed.
|
||||
"""
|
||||
def __init__(self, stream, address, request_callback, no_keep_alive=False,
|
||||
xheaders=False, protocol=None):
|
||||
self.stream = stream
|
||||
class _HTTPRequestContext(object):
|
||||
def __init__(self, stream, address, protocol):
|
||||
self.address = address
|
||||
self.protocol = protocol
|
||||
# Save the socket's address family now so we know how to
|
||||
# interpret self.address even after the stream is closed
|
||||
# and its socket attribute replaced with None.
|
||||
self.address_family = stream.socket.family
|
||||
self.request_callback = request_callback
|
||||
self.no_keep_alive = no_keep_alive
|
||||
self.xheaders = xheaders
|
||||
self.protocol = protocol
|
||||
self._clear_request_state()
|
||||
# Save stack context here, outside of any request. This keeps
|
||||
# contexts from one request from leaking into the next.
|
||||
self._header_callback = stack_context.wrap(self._on_headers)
|
||||
self.stream.set_close_callback(self._on_connection_close)
|
||||
self.stream.read_until(b"\r\n\r\n", self._header_callback)
|
||||
|
||||
def _clear_request_state(self):
|
||||
"""Clears the per-request state.
|
||||
|
||||
This is run in between requests to allow the previous handler
|
||||
to be garbage collected (and prevent spurious close callbacks),
|
||||
and when the connection is closed (to break up cycles and
|
||||
facilitate garbage collection in cpython).
|
||||
"""
|
||||
self._request = None
|
||||
self._request_finished = False
|
||||
self._write_callback = None
|
||||
self._close_callback = None
|
||||
|
||||
def set_close_callback(self, callback):
|
||||
"""Sets a callback that will be run when the connection is closed.
|
||||
|
||||
Use this instead of accessing
|
||||
`HTTPConnection.stream.set_close_callback
|
||||
<.BaseIOStream.set_close_callback>` directly (which was the
|
||||
recommended approach prior to Tornado 3.0).
|
||||
"""
|
||||
self._close_callback = stack_context.wrap(callback)
|
||||
|
||||
def _on_connection_close(self):
|
||||
if self._close_callback is not None:
|
||||
callback = self._close_callback
|
||||
self._close_callback = None
|
||||
callback()
|
||||
# Delete any unfinished callbacks to break up reference cycles.
|
||||
self._header_callback = None
|
||||
self._clear_request_state()
|
||||
|
||||
def close(self):
|
||||
self.stream.close()
|
||||
# Remove this reference to self, which would otherwise cause a
|
||||
# cycle and delay garbage collection of this connection.
|
||||
self._header_callback = None
|
||||
self._clear_request_state()
|
||||
|
||||
def write(self, chunk, callback=None):
|
||||
"""Writes a chunk of output to the stream."""
|
||||
if not self.stream.closed():
|
||||
self._write_callback = stack_context.wrap(callback)
|
||||
self.stream.write(chunk, self._on_write_complete)
|
||||
|
||||
def finish(self):
|
||||
"""Finishes the request."""
|
||||
self._request_finished = True
|
||||
# No more data is coming, so instruct TCP to send any remaining
|
||||
# data immediately instead of waiting for a full packet or ack.
|
||||
self.stream.set_nodelay(True)
|
||||
if not self.stream.writing():
|
||||
self._finish_request()
|
||||
|
||||
def _on_write_complete(self):
|
||||
if self._write_callback is not None:
|
||||
callback = self._write_callback
|
||||
self._write_callback = None
|
||||
callback()
|
||||
# _on_write_complete is enqueued on the IOLoop whenever the
|
||||
# IOStream's write buffer becomes empty, but it's possible for
|
||||
# another callback that runs on the IOLoop before it to
|
||||
# simultaneously write more data and finish the request. If
|
||||
# there is still data in the IOStream, a future
|
||||
# _on_write_complete will be responsible for calling
|
||||
# _finish_request.
|
||||
if self._request_finished and not self.stream.writing():
|
||||
self._finish_request()
|
||||
|
||||
def _finish_request(self):
|
||||
if self.no_keep_alive or self._request is None:
|
||||
disconnect = True
|
||||
if stream.socket is not None:
|
||||
self.address_family = stream.socket.family
|
||||
else:
|
||||
connection_header = self._request.headers.get("Connection")
|
||||
if connection_header is not None:
|
||||
connection_header = connection_header.lower()
|
||||
if self._request.supports_http_1_1():
|
||||
disconnect = connection_header == "close"
|
||||
elif ("Content-Length" in self._request.headers
|
||||
or self._request.method in ("HEAD", "GET")):
|
||||
disconnect = connection_header != "keep-alive"
|
||||
else:
|
||||
disconnect = True
|
||||
self._clear_request_state()
|
||||
if disconnect:
|
||||
self.close()
|
||||
return
|
||||
try:
|
||||
# Use a try/except instead of checking stream.closed()
|
||||
# directly, because in some cases the stream doesn't discover
|
||||
# that it's closed until you try to read from it.
|
||||
self.stream.read_until(b"\r\n\r\n", self._header_callback)
|
||||
|
||||
# Turn Nagle's algorithm back on, leaving the stream in its
|
||||
# default state for the next request.
|
||||
self.stream.set_nodelay(False)
|
||||
except iostream.StreamClosedError:
|
||||
self.close()
|
||||
|
||||
def _on_headers(self, data):
|
||||
try:
|
||||
data = native_str(data.decode('latin1'))
|
||||
eol = data.find("\r\n")
|
||||
start_line = data[:eol]
|
||||
try:
|
||||
method, uri, version = start_line.split(" ")
|
||||
except ValueError:
|
||||
raise _BadRequestException("Malformed HTTP request line")
|
||||
if not version.startswith("HTTP/"):
|
||||
raise _BadRequestException("Malformed HTTP version in HTTP Request-Line")
|
||||
try:
|
||||
headers = httputil.HTTPHeaders.parse(data[eol:])
|
||||
except ValueError:
|
||||
# Probably from split() if there was no ':' in the line
|
||||
raise _BadRequestException("Malformed HTTP headers")
|
||||
|
||||
# HTTPRequest wants an IP, not a full socket address
|
||||
if self.address_family in (socket.AF_INET, socket.AF_INET6):
|
||||
remote_ip = self.address[0]
|
||||
else:
|
||||
# Unix (or other) socket; fake the remote address
|
||||
remote_ip = '0.0.0.0'
|
||||
|
||||
self._request = HTTPRequest(
|
||||
connection=self, method=method, uri=uri, version=version,
|
||||
headers=headers, remote_ip=remote_ip, protocol=self.protocol)
|
||||
|
||||
content_length = headers.get("Content-Length")
|
||||
if content_length:
|
||||
content_length = int(content_length)
|
||||
if content_length > self.stream.max_buffer_size:
|
||||
raise _BadRequestException("Content-Length too long")
|
||||
if headers.get("Expect") == "100-continue":
|
||||
self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
|
||||
self.stream.read_bytes(content_length, self._on_request_body)
|
||||
return
|
||||
|
||||
self.request_callback(self._request)
|
||||
except _BadRequestException as e:
|
||||
gen_log.info("Malformed HTTP request from %s: %s",
|
||||
self.address[0], e)
|
||||
self.close()
|
||||
return
|
||||
|
||||
def _on_request_body(self, data):
|
||||
self._request.body = data
|
||||
if self._request.method in ("POST", "PATCH", "PUT"):
|
||||
httputil.parse_body_arguments(
|
||||
self._request.headers.get("Content-Type", ""), data,
|
||||
self._request.arguments, self._request.files)
|
||||
self.request_callback(self._request)
|
||||
|
||||
|
||||
class HTTPRequest(object):
|
||||
"""A single HTTP request.
|
||||
|
||||
All attributes are type `str` unless otherwise noted.
|
||||
|
||||
.. attribute:: method
|
||||
|
||||
HTTP request method, e.g. "GET" or "POST"
|
||||
|
||||
.. attribute:: uri
|
||||
|
||||
The requested uri.
|
||||
|
||||
.. attribute:: path
|
||||
|
||||
The path portion of `uri`
|
||||
|
||||
.. attribute:: query
|
||||
|
||||
The query portion of `uri`
|
||||
|
||||
.. attribute:: version
|
||||
|
||||
HTTP version specified in request, e.g. "HTTP/1.1"
|
||||
|
||||
.. attribute:: headers
|
||||
|
||||
`.HTTPHeaders` dictionary-like object for request headers. Acts like
|
||||
a case-insensitive dictionary with additional methods for repeated
|
||||
headers.
|
||||
|
||||
.. attribute:: body
|
||||
|
||||
Request body, if present, as a byte string.
|
||||
|
||||
.. attribute:: remote_ip
|
||||
|
||||
Client's IP address as a string. If ``HTTPServer.xheaders`` is set,
|
||||
will pass along the real IP address provided by a load balancer
|
||||
in the ``X-Real-Ip`` or ``X-Forwarded-For`` header.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
The list format of ``X-Forwarded-For`` is now supported.
|
||||
|
||||
.. attribute:: protocol
|
||||
|
||||
The protocol used, either "http" or "https". If ``HTTPServer.xheaders``
|
||||
is set, will pass along the protocol used by a load balancer if
|
||||
reported via an ``X-Scheme`` header.
|
||||
|
||||
.. attribute:: host
|
||||
|
||||
The requested hostname, usually taken from the ``Host`` header.
|
||||
|
||||
.. attribute:: arguments
|
||||
|
||||
GET/POST arguments are available in the arguments property, which
|
||||
maps arguments names to lists of values (to support multiple values
|
||||
for individual names). Names are of type `str`, while arguments
|
||||
are byte strings. Note that this is different from
|
||||
`.RequestHandler.get_argument`, which returns argument values as
|
||||
unicode strings.
|
||||
|
||||
.. attribute:: files
|
||||
|
||||
File uploads are available in the files property, which maps file
|
||||
names to lists of `.HTTPFile`.
|
||||
|
||||
.. attribute:: connection
|
||||
|
||||
An HTTP request is attached to a single HTTP connection, which can
|
||||
be accessed through the "connection" attribute. Since connections
|
||||
are typically kept open in HTTP/1.1, multiple requests can be handled
|
||||
sequentially on a single connection.
|
||||
"""
|
||||
def __init__(self, method, uri, version="HTTP/1.0", headers=None,
|
||||
body=None, remote_ip=None, protocol=None, host=None,
|
||||
files=None, connection=None):
|
||||
self.method = method
|
||||
self.uri = uri
|
||||
self.version = version
|
||||
self.headers = headers or httputil.HTTPHeaders()
|
||||
self.body = body or ""
|
||||
|
||||
# set remote IP and protocol
|
||||
self.remote_ip = remote_ip
|
||||
self.address_family = None
|
||||
# In HTTPServerRequest we want an IP, not a full socket address.
|
||||
if (self.address_family in (socket.AF_INET, socket.AF_INET6) and
|
||||
address is not None):
|
||||
self.remote_ip = address[0]
|
||||
else:
|
||||
# Unix (or other) socket; fake the remote address.
|
||||
self.remote_ip = '0.0.0.0'
|
||||
if protocol:
|
||||
self.protocol = protocol
|
||||
elif connection and isinstance(connection.stream,
|
||||
iostream.SSLIOStream):
|
||||
elif isinstance(stream, iostream.SSLIOStream):
|
||||
self.protocol = "https"
|
||||
else:
|
||||
self.protocol = "http"
|
||||
self._orig_remote_ip = self.remote_ip
|
||||
self._orig_protocol = self.protocol
|
||||
|
||||
# xheaders can override the defaults
|
||||
if connection and connection.xheaders:
|
||||
# Squid uses X-Forwarded-For, others use X-Real-Ip
|
||||
ip = self.headers.get("X-Forwarded-For", self.remote_ip)
|
||||
ip = ip.split(',')[-1].strip()
|
||||
ip = self.headers.get(
|
||||
"X-Real-Ip", ip)
|
||||
if netutil.is_valid_ip(ip):
|
||||
self.remote_ip = ip
|
||||
# AWS uses X-Forwarded-Proto
|
||||
proto = self.headers.get(
|
||||
"X-Scheme", self.headers.get("X-Forwarded-Proto", self.protocol))
|
||||
if proto in ("http", "https"):
|
||||
self.protocol = proto
|
||||
def __str__(self):
|
||||
if self.address_family in (socket.AF_INET, socket.AF_INET6):
|
||||
return self.remote_ip
|
||||
elif isinstance(self.address, bytes):
|
||||
# Python 3 with the -bb option warns about str(bytes),
|
||||
# so convert it explicitly.
|
||||
# Unix socket addresses are str on mac but bytes on linux.
|
||||
return native_str(self.address)
|
||||
else:
|
||||
return str(self.address)
|
||||
|
||||
self.host = host or self.headers.get("Host") or "127.0.0.1"
|
||||
self.files = files or {}
|
||||
def _apply_xheaders(self, headers):
|
||||
"""Rewrite the ``remote_ip`` and ``protocol`` fields."""
|
||||
# Squid uses X-Forwarded-For, others use X-Real-Ip
|
||||
ip = headers.get("X-Forwarded-For", self.remote_ip)
|
||||
ip = ip.split(',')[-1].strip()
|
||||
ip = headers.get("X-Real-Ip", ip)
|
||||
if netutil.is_valid_ip(ip):
|
||||
self.remote_ip = ip
|
||||
# AWS uses X-Forwarded-Proto
|
||||
proto_header = headers.get(
|
||||
"X-Scheme", headers.get("X-Forwarded-Proto",
|
||||
self.protocol))
|
||||
if proto_header in ("http", "https"):
|
||||
self.protocol = proto_header
|
||||
|
||||
def _unapply_xheaders(self):
|
||||
"""Undo changes from `_apply_xheaders`.
|
||||
|
||||
Xheaders are per-request so they should not leak to the next
|
||||
request on the same connection.
|
||||
"""
|
||||
self.remote_ip = self._orig_remote_ip
|
||||
self.protocol = self._orig_protocol
|
||||
|
||||
|
||||
class _ServerRequestAdapter(httputil.HTTPMessageDelegate):
|
||||
"""Adapts the `HTTPMessageDelegate` interface to the interface expected
|
||||
by our clients.
|
||||
"""
|
||||
def __init__(self, server, connection):
|
||||
self.server = server
|
||||
self.connection = connection
|
||||
self._start_time = time.time()
|
||||
self._finish_time = None
|
||||
self.request = None
|
||||
if isinstance(server.request_callback,
|
||||
httputil.HTTPServerConnectionDelegate):
|
||||
self.delegate = server.request_callback.start_request(connection)
|
||||
self._chunks = None
|
||||
else:
|
||||
self.delegate = None
|
||||
self._chunks = []
|
||||
|
||||
self.path, sep, self.query = uri.partition('?')
|
||||
self.arguments = parse_qs_bytes(self.query, keep_blank_values=True)
|
||||
def headers_received(self, start_line, headers):
|
||||
if self.server.xheaders:
|
||||
self.connection.context._apply_xheaders(headers)
|
||||
if self.delegate is None:
|
||||
self.request = httputil.HTTPServerRequest(
|
||||
connection=self.connection, start_line=start_line,
|
||||
headers=headers)
|
||||
else:
|
||||
return self.delegate.headers_received(start_line, headers)
|
||||
|
||||
def supports_http_1_1(self):
|
||||
"""Returns True if this request supports HTTP/1.1 semantics"""
|
||||
return self.version == "HTTP/1.1"
|
||||
|
||||
@property
|
||||
def cookies(self):
|
||||
"""A dictionary of Cookie.Morsel objects."""
|
||||
if not hasattr(self, "_cookies"):
|
||||
self._cookies = Cookie.SimpleCookie()
|
||||
if "Cookie" in self.headers:
|
||||
try:
|
||||
self._cookies.load(
|
||||
native_str(self.headers["Cookie"]))
|
||||
except Exception:
|
||||
self._cookies = {}
|
||||
return self._cookies
|
||||
|
||||
def write(self, chunk, callback=None):
|
||||
"""Writes the given chunk to the response stream."""
|
||||
assert isinstance(chunk, bytes_type)
|
||||
self.connection.write(chunk, callback=callback)
|
||||
def data_received(self, chunk):
|
||||
if self.delegate is None:
|
||||
self._chunks.append(chunk)
|
||||
else:
|
||||
return self.delegate.data_received(chunk)
|
||||
|
||||
def finish(self):
|
||||
"""Finishes this HTTP request on the open connection."""
|
||||
self.connection.finish()
|
||||
self._finish_time = time.time()
|
||||
|
||||
def full_url(self):
|
||||
"""Reconstructs the full URL for this request."""
|
||||
return self.protocol + "://" + self.host + self.uri
|
||||
|
||||
def request_time(self):
|
||||
"""Returns the amount of time it took for this request to execute."""
|
||||
if self._finish_time is None:
|
||||
return time.time() - self._start_time
|
||||
if self.delegate is None:
|
||||
self.request.body = b''.join(self._chunks)
|
||||
self.request._parse_body()
|
||||
self.server.request_callback(self.request)
|
||||
else:
|
||||
return self._finish_time - self._start_time
|
||||
self.delegate.finish()
|
||||
self._cleanup()
|
||||
|
||||
def get_ssl_certificate(self, binary_form=False):
|
||||
"""Returns the client's SSL certificate, if any.
|
||||
def on_connection_close(self):
|
||||
if self.delegate is None:
|
||||
self._chunks = None
|
||||
else:
|
||||
self.delegate.on_connection_close()
|
||||
self._cleanup()
|
||||
|
||||
To use client certificates, the HTTPServer must have been constructed
|
||||
with cert_reqs set in ssl_options, e.g.::
|
||||
def _cleanup(self):
|
||||
if self.server.xheaders:
|
||||
self.connection.context._unapply_xheaders()
|
||||
|
||||
server = HTTPServer(app,
|
||||
ssl_options=dict(
|
||||
certfile="foo.crt",
|
||||
keyfile="foo.key",
|
||||
cert_reqs=ssl.CERT_REQUIRED,
|
||||
ca_certs="cacert.crt"))
|
||||
|
||||
By default, the return value is a dictionary (or None, if no
|
||||
client certificate is present). If ``binary_form`` is true, a
|
||||
DER-encoded form of the certificate is returned instead. See
|
||||
SSLSocket.getpeercert() in the standard library for more
|
||||
details.
|
||||
http://docs.python.org/library/ssl.html#sslsocket-objects
|
||||
"""
|
||||
try:
|
||||
return self.connection.stream.socket.getpeercert(
|
||||
binary_form=binary_form)
|
||||
except ssl.SSLError:
|
||||
return None
|
||||
|
||||
def __repr__(self):
|
||||
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
|
||||
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
|
||||
return "%s(%s, headers=%s)" % (
|
||||
self.__class__.__name__, args, dict(self.headers))
|
||||
HTTPRequest = httputil.HTTPServerRequest
|
||||
|
|
|
|||
|
|
@ -14,20 +14,31 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""HTTP utility code shared by clients and servers."""
|
||||
"""HTTP utility code shared by clients and servers.
|
||||
|
||||
This module also defines the `HTTPServerRequest` class which is exposed
|
||||
via `tornado.web.RequestHandler.request`.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
import calendar
|
||||
import collections
|
||||
import copy
|
||||
import datetime
|
||||
import email.utils
|
||||
import numbers
|
||||
import re
|
||||
import time
|
||||
|
||||
from tornado.escape import native_str, parse_qs_bytes, utf8
|
||||
from tornado.log import gen_log
|
||||
from tornado.util import ObjectDict
|
||||
from tornado.util import ObjectDict, bytes_type
|
||||
|
||||
try:
|
||||
import Cookie # py2
|
||||
except ImportError:
|
||||
import http.cookies as Cookie # py3
|
||||
|
||||
try:
|
||||
from httplib import responses # py2
|
||||
|
|
@ -43,6 +54,13 @@ try:
|
|||
except ImportError:
|
||||
from urllib.parse import urlencode # py3
|
||||
|
||||
try:
|
||||
from ssl import SSLError
|
||||
except ImportError:
|
||||
# ssl is unavailable on app engine.
|
||||
class SSLError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class _NormalizedHeaderCache(dict):
|
||||
"""Dynamic cached mapping of header names to Http-Header-Case.
|
||||
|
|
@ -212,6 +230,337 @@ class HTTPHeaders(dict):
|
|||
return HTTPHeaders(self)
|
||||
|
||||
|
||||
class HTTPServerRequest(object):
|
||||
"""A single HTTP request.
|
||||
|
||||
All attributes are type `str` unless otherwise noted.
|
||||
|
||||
.. attribute:: method
|
||||
|
||||
HTTP request method, e.g. "GET" or "POST"
|
||||
|
||||
.. attribute:: uri
|
||||
|
||||
The requested uri.
|
||||
|
||||
.. attribute:: path
|
||||
|
||||
The path portion of `uri`
|
||||
|
||||
.. attribute:: query
|
||||
|
||||
The query portion of `uri`
|
||||
|
||||
.. attribute:: version
|
||||
|
||||
HTTP version specified in request, e.g. "HTTP/1.1"
|
||||
|
||||
.. attribute:: headers
|
||||
|
||||
`.HTTPHeaders` dictionary-like object for request headers. Acts like
|
||||
a case-insensitive dictionary with additional methods for repeated
|
||||
headers.
|
||||
|
||||
.. attribute:: body
|
||||
|
||||
Request body, if present, as a byte string.
|
||||
|
||||
.. attribute:: remote_ip
|
||||
|
||||
Client's IP address as a string. If ``HTTPServer.xheaders`` is set,
|
||||
will pass along the real IP address provided by a load balancer
|
||||
in the ``X-Real-Ip`` or ``X-Forwarded-For`` header.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
The list format of ``X-Forwarded-For`` is now supported.
|
||||
|
||||
.. attribute:: protocol
|
||||
|
||||
The protocol used, either "http" or "https". If ``HTTPServer.xheaders``
|
||||
is set, will pass along the protocol used by a load balancer if
|
||||
reported via an ``X-Scheme`` header.
|
||||
|
||||
.. attribute:: host
|
||||
|
||||
The requested hostname, usually taken from the ``Host`` header.
|
||||
|
||||
.. attribute:: arguments
|
||||
|
||||
GET/POST arguments are available in the arguments property, which
|
||||
maps arguments names to lists of values (to support multiple values
|
||||
for individual names). Names are of type `str`, while arguments
|
||||
are byte strings. Note that this is different from
|
||||
`.RequestHandler.get_argument`, which returns argument values as
|
||||
unicode strings.
|
||||
|
||||
.. attribute:: query_arguments
|
||||
|
||||
Same format as ``arguments``, but contains only arguments extracted
|
||||
from the query string.
|
||||
|
||||
.. versionadded:: 3.2
|
||||
|
||||
.. attribute:: body_arguments
|
||||
|
||||
Same format as ``arguments``, but contains only arguments extracted
|
||||
from the request body.
|
||||
|
||||
.. versionadded:: 3.2
|
||||
|
||||
.. attribute:: files
|
||||
|
||||
File uploads are available in the files property, which maps file
|
||||
names to lists of `.HTTPFile`.
|
||||
|
||||
.. attribute:: connection
|
||||
|
||||
An HTTP request is attached to a single HTTP connection, which can
|
||||
be accessed through the "connection" attribute. Since connections
|
||||
are typically kept open in HTTP/1.1, multiple requests can be handled
|
||||
sequentially on a single connection.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
Moved from ``tornado.httpserver.HTTPRequest``.
|
||||
"""
|
||||
def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None,
|
||||
body=None, host=None, files=None, connection=None,
|
||||
start_line=None):
|
||||
if start_line is not None:
|
||||
method, uri, version = start_line
|
||||
self.method = method
|
||||
self.uri = uri
|
||||
self.version = version
|
||||
self.headers = headers or HTTPHeaders()
|
||||
self.body = body or ""
|
||||
|
||||
# set remote IP and protocol
|
||||
context = getattr(connection, 'context', None)
|
||||
self.remote_ip = getattr(context, 'remote_ip')
|
||||
self.protocol = getattr(context, 'protocol', "http")
|
||||
|
||||
self.host = host or self.headers.get("Host") or "127.0.0.1"
|
||||
self.files = files or {}
|
||||
self.connection = connection
|
||||
self._start_time = time.time()
|
||||
self._finish_time = None
|
||||
|
||||
self.path, sep, self.query = uri.partition('?')
|
||||
self.arguments = parse_qs_bytes(self.query, keep_blank_values=True)
|
||||
self.query_arguments = copy.deepcopy(self.arguments)
|
||||
self.body_arguments = {}
|
||||
|
||||
def supports_http_1_1(self):
|
||||
"""Returns True if this request supports HTTP/1.1 semantics.
|
||||
|
||||
.. deprecated:: 4.0
|
||||
Applications are less likely to need this information with the
|
||||
introduction of `.HTTPConnection`. If you still need it, access
|
||||
the ``version`` attribute directly.
|
||||
"""
|
||||
return self.version == "HTTP/1.1"
|
||||
|
||||
@property
|
||||
def cookies(self):
|
||||
"""A dictionary of Cookie.Morsel objects."""
|
||||
if not hasattr(self, "_cookies"):
|
||||
self._cookies = Cookie.SimpleCookie()
|
||||
if "Cookie" in self.headers:
|
||||
try:
|
||||
self._cookies.load(
|
||||
native_str(self.headers["Cookie"]))
|
||||
except Exception:
|
||||
self._cookies = {}
|
||||
return self._cookies
|
||||
|
||||
def write(self, chunk, callback=None):
|
||||
"""Writes the given chunk to the response stream.
|
||||
|
||||
.. deprecated:: 4.0
|
||||
Use ``request.connection`` and the `.HTTPConnection` methods
|
||||
to write the response.
|
||||
"""
|
||||
assert isinstance(chunk, bytes_type)
|
||||
self.connection.write(chunk, callback=callback)
|
||||
|
||||
def finish(self):
|
||||
"""Finishes this HTTP request on the open connection.
|
||||
|
||||
.. deprecated:: 4.0
|
||||
Use ``request.connection`` and the `.HTTPConnection` methods
|
||||
to write the response.
|
||||
"""
|
||||
self.connection.finish()
|
||||
self._finish_time = time.time()
|
||||
|
||||
def full_url(self):
|
||||
"""Reconstructs the full URL for this request."""
|
||||
return self.protocol + "://" + self.host + self.uri
|
||||
|
||||
def request_time(self):
|
||||
"""Returns the amount of time it took for this request to execute."""
|
||||
if self._finish_time is None:
|
||||
return time.time() - self._start_time
|
||||
else:
|
||||
return self._finish_time - self._start_time
|
||||
|
||||
def get_ssl_certificate(self, binary_form=False):
|
||||
"""Returns the client's SSL certificate, if any.
|
||||
|
||||
To use client certificates, the HTTPServer must have been constructed
|
||||
with cert_reqs set in ssl_options, e.g.::
|
||||
|
||||
server = HTTPServer(app,
|
||||
ssl_options=dict(
|
||||
certfile="foo.crt",
|
||||
keyfile="foo.key",
|
||||
cert_reqs=ssl.CERT_REQUIRED,
|
||||
ca_certs="cacert.crt"))
|
||||
|
||||
By default, the return value is a dictionary (or None, if no
|
||||
client certificate is present). If ``binary_form`` is true, a
|
||||
DER-encoded form of the certificate is returned instead. See
|
||||
SSLSocket.getpeercert() in the standard library for more
|
||||
details.
|
||||
http://docs.python.org/library/ssl.html#sslsocket-objects
|
||||
"""
|
||||
try:
|
||||
return self.connection.stream.socket.getpeercert(
|
||||
binary_form=binary_form)
|
||||
except SSLError:
|
||||
return None
|
||||
|
||||
def _parse_body(self):
|
||||
parse_body_arguments(
|
||||
self.headers.get("Content-Type", ""), self.body,
|
||||
self.body_arguments, self.files,
|
||||
self.headers)
|
||||
|
||||
for k, v in self.body_arguments.items():
|
||||
self.arguments.setdefault(k, []).extend(v)
|
||||
|
||||
def __repr__(self):
|
||||
attrs = ("protocol", "host", "method", "uri", "version", "remote_ip")
|
||||
args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs])
|
||||
return "%s(%s, headers=%s)" % (
|
||||
self.__class__.__name__, args, dict(self.headers))
|
||||
|
||||
|
||||
class HTTPInputError(Exception):
|
||||
"""Exception class for malformed HTTP requests or responses
|
||||
from remote sources.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class HTTPOutputError(Exception):
|
||||
"""Exception class for errors in HTTP output.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class HTTPServerConnectionDelegate(object):
|
||||
"""Implement this interface to handle requests from `.HTTPServer`.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
def start_request(self, server_conn, request_conn):
|
||||
"""This method is called by the server when a new request has started.
|
||||
|
||||
:arg server_conn: is an opaque object representing the long-lived
|
||||
(e.g. tcp-level) connection.
|
||||
:arg request_conn: is a `.HTTPConnection` object for a single
|
||||
request/response exchange.
|
||||
|
||||
This method should return a `.HTTPMessageDelegate`.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def on_close(self, server_conn):
|
||||
"""This method is called when a connection has been closed.
|
||||
|
||||
:arg server_conn: is a server connection that has previously been
|
||||
passed to ``start_request``.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class HTTPMessageDelegate(object):
|
||||
"""Implement this interface to handle an HTTP request or response.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
def headers_received(self, start_line, headers):
|
||||
"""Called when the HTTP headers have been received and parsed.
|
||||
|
||||
:arg start_line: a `.RequestStartLine` or `.ResponseStartLine`
|
||||
depending on whether this is a client or server message.
|
||||
:arg headers: a `.HTTPHeaders` instance.
|
||||
|
||||
Some `.HTTPConnection` methods can only be called during
|
||||
``headers_received``.
|
||||
|
||||
May return a `.Future`; if it does the body will not be read
|
||||
until it is done.
|
||||
"""
|
||||
pass
|
||||
|
||||
def data_received(self, chunk):
|
||||
"""Called when a chunk of data has been received.
|
||||
|
||||
May return a `.Future` for flow control.
|
||||
"""
|
||||
pass
|
||||
|
||||
def finish(self):
|
||||
"""Called after the last chunk of data has been received."""
|
||||
pass
|
||||
|
||||
def on_connection_close(self):
|
||||
"""Called if the connection is closed without finishing the request.
|
||||
|
||||
If ``headers_received`` is called, either ``finish`` or
|
||||
``on_connection_close`` will be called, but not both.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class HTTPConnection(object):
|
||||
"""Applications use this interface to write their responses.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
def write_headers(self, start_line, headers, chunk=None, callback=None):
|
||||
"""Write an HTTP header block.
|
||||
|
||||
:arg start_line: a `.RequestStartLine` or `.ResponseStartLine`.
|
||||
:arg headers: a `.HTTPHeaders` instance.
|
||||
:arg chunk: the first (optional) chunk of data. This is an optimization
|
||||
so that small responses can be written in the same call as their
|
||||
headers.
|
||||
:arg callback: a callback to be run when the write is complete.
|
||||
|
||||
Returns a `.Future` if no callback is given.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def write(self, chunk, callback=None):
|
||||
"""Writes a chunk of body data.
|
||||
|
||||
The callback will be run when the write is complete. If no callback
|
||||
is given, returns a Future.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def finish(self):
|
||||
"""Indicates that the last body data has been written.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def url_concat(url, args):
|
||||
"""Concatenate url and argument dictionary regardless of whether
|
||||
url has existing query parameters.
|
||||
|
|
@ -310,7 +659,7 @@ def _int_or_none(val):
|
|||
return int(val)
|
||||
|
||||
|
||||
def parse_body_arguments(content_type, body, arguments, files):
|
||||
def parse_body_arguments(content_type, body, arguments, files, headers=None):
|
||||
"""Parses a form request body.
|
||||
|
||||
Supports ``application/x-www-form-urlencoded`` and
|
||||
|
|
@ -319,8 +668,16 @@ def parse_body_arguments(content_type, body, arguments, files):
|
|||
and ``files`` parameters are dictionaries that will be updated
|
||||
with the parsed contents.
|
||||
"""
|
||||
if headers and 'Content-Encoding' in headers:
|
||||
gen_log.warning("Unsupported Content-Encoding: %s",
|
||||
headers['Content-Encoding'])
|
||||
return
|
||||
if content_type.startswith("application/x-www-form-urlencoded"):
|
||||
uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True)
|
||||
try:
|
||||
uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True)
|
||||
except Exception as e:
|
||||
gen_log.warning('Invalid x-www-form-urlencoded body: %s', e)
|
||||
uri_arguments = {}
|
||||
for name, values in uri_arguments.items():
|
||||
if values:
|
||||
arguments.setdefault(name, []).extend(values)
|
||||
|
|
@ -401,6 +758,48 @@ def format_timestamp(ts):
|
|||
raise TypeError("unknown timestamp type: %r" % ts)
|
||||
return email.utils.formatdate(ts, usegmt=True)
|
||||
|
||||
|
||||
RequestStartLine = collections.namedtuple(
|
||||
'RequestStartLine', ['method', 'path', 'version'])
|
||||
|
||||
|
||||
def parse_request_start_line(line):
|
||||
"""Returns a (method, path, version) tuple for an HTTP 1.x request line.
|
||||
|
||||
The response is a `collections.namedtuple`.
|
||||
|
||||
>>> parse_request_start_line("GET /foo HTTP/1.1")
|
||||
RequestStartLine(method='GET', path='/foo', version='HTTP/1.1')
|
||||
"""
|
||||
try:
|
||||
method, path, version = line.split(" ")
|
||||
except ValueError:
|
||||
raise HTTPInputError("Malformed HTTP request line")
|
||||
if not version.startswith("HTTP/"):
|
||||
raise HTTPInputError(
|
||||
"Malformed HTTP version in HTTP Request-Line: %r" % version)
|
||||
return RequestStartLine(method, path, version)
|
||||
|
||||
|
||||
ResponseStartLine = collections.namedtuple(
|
||||
'ResponseStartLine', ['version', 'code', 'reason'])
|
||||
|
||||
|
||||
def parse_response_start_line(line):
|
||||
"""Returns a (version, code, reason) tuple for an HTTP 1.x response line.
|
||||
|
||||
The response is a `collections.namedtuple`.
|
||||
|
||||
>>> parse_response_start_line("HTTP/1.1 200 OK")
|
||||
ResponseStartLine(version='HTTP/1.1', code=200, reason='OK')
|
||||
"""
|
||||
line = native_str(line)
|
||||
match = re.match("(HTTP/1.[01]) ([0-9]+) ([^\r]*)", line)
|
||||
if not match:
|
||||
raise HTTPInputError("Error parsing response start line")
|
||||
return ResponseStartLine(match.group(1), int(match.group(2)),
|
||||
match.group(3))
|
||||
|
||||
# _parseparam and _parse_header are copied and modified from python2.7's cgi.py
|
||||
# The original 2.7 version of this code did not correctly support some
|
||||
# combinations of semicolons and double quotes.
|
||||
|
|
|
|||
|
|
@ -32,6 +32,7 @@ import datetime
|
|||
import errno
|
||||
import functools
|
||||
import heapq
|
||||
import itertools
|
||||
import logging
|
||||
import numbers
|
||||
import os
|
||||
|
|
@ -41,10 +42,10 @@ import threading
|
|||
import time
|
||||
import traceback
|
||||
|
||||
from tornado.concurrent import Future, TracebackFuture
|
||||
from tornado.concurrent import TracebackFuture, is_future
|
||||
from tornado.log import app_log, gen_log
|
||||
from tornado import stack_context
|
||||
from tornado.util import Configurable
|
||||
from tornado.util import Configurable, errno_from_exception, timedelta_to_seconds
|
||||
|
||||
try:
|
||||
import signal
|
||||
|
|
@ -59,6 +60,9 @@ except ImportError:
|
|||
from tornado.platform.auto import set_close_exec, Waker
|
||||
|
||||
|
||||
_POLL_TIMEOUT = 3600.0
|
||||
|
||||
|
||||
class TimeoutError(Exception):
|
||||
pass
|
||||
|
||||
|
|
@ -153,6 +157,15 @@ class IOLoop(Configurable):
|
|||
assert not IOLoop.initialized()
|
||||
IOLoop._instance = self
|
||||
|
||||
@staticmethod
|
||||
def clear_instance():
|
||||
"""Clear the global `IOLoop` instance.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
if hasattr(IOLoop, "_instance"):
|
||||
del IOLoop._instance
|
||||
|
||||
@staticmethod
|
||||
def current():
|
||||
"""Returns the current thread's `IOLoop`.
|
||||
|
|
@ -241,21 +254,40 @@ class IOLoop(Configurable):
|
|||
raise NotImplementedError()
|
||||
|
||||
def add_handler(self, fd, handler, events):
|
||||
"""Registers the given handler to receive the given events for fd.
|
||||
"""Registers the given handler to receive the given events for ``fd``.
|
||||
|
||||
The ``fd`` argument may either be an integer file descriptor or
|
||||
a file-like object with a ``fileno()`` method (and optionally a
|
||||
``close()`` method, which may be called when the `IOLoop` is shut
|
||||
down).
|
||||
|
||||
The ``events`` argument is a bitwise or of the constants
|
||||
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
|
||||
|
||||
When an event occurs, ``handler(fd, events)`` will be run.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
Added the ability to pass file-like objects in addition to
|
||||
raw file descriptors.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def update_handler(self, fd, events):
|
||||
"""Changes the events we listen for fd."""
|
||||
"""Changes the events we listen for ``fd``.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
Added the ability to pass file-like objects in addition to
|
||||
raw file descriptors.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def remove_handler(self, fd):
|
||||
"""Stop listening for events on fd."""
|
||||
"""Stop listening for events on ``fd``.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
Added the ability to pass file-like objects in addition to
|
||||
raw file descriptors.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def set_blocking_signal_threshold(self, seconds, action):
|
||||
|
|
@ -298,6 +330,22 @@ class IOLoop(Configurable):
|
|||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def _setup_logging(self):
|
||||
"""The IOLoop catches and logs exceptions, so it's
|
||||
important that log output be visible. However, python's
|
||||
default behavior for non-root loggers (prior to python
|
||||
3.2) is to print an unhelpful "no handlers could be
|
||||
found" message rather than the actual log entry, so we
|
||||
must explicitly configure logging if we've made it this
|
||||
far without anything.
|
||||
|
||||
This method should be called from start() in subclasses.
|
||||
"""
|
||||
if not any([logging.getLogger().handlers,
|
||||
logging.getLogger('tornado').handlers,
|
||||
logging.getLogger('tornado.application').handlers]):
|
||||
logging.basicConfig()
|
||||
|
||||
def stop(self):
|
||||
"""Stop the I/O loop.
|
||||
|
||||
|
|
@ -353,10 +401,10 @@ class IOLoop(Configurable):
|
|||
future_cell[0] = TracebackFuture()
|
||||
future_cell[0].set_exc_info(sys.exc_info())
|
||||
else:
|
||||
if isinstance(result, Future):
|
||||
if is_future(result):
|
||||
future_cell[0] = result
|
||||
else:
|
||||
future_cell[0] = Future()
|
||||
future_cell[0] = TracebackFuture()
|
||||
future_cell[0].set_result(result)
|
||||
self.add_future(future_cell[0], lambda future: self.stop())
|
||||
self.add_callback(run)
|
||||
|
|
@ -384,7 +432,7 @@ class IOLoop(Configurable):
|
|||
"""
|
||||
return time.time()
|
||||
|
||||
def add_timeout(self, deadline, callback):
|
||||
def add_timeout(self, deadline, callback, *args, **kwargs):
|
||||
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
|
||||
|
||||
Returns an opaque handle that may be passed to
|
||||
|
|
@ -393,13 +441,59 @@ class IOLoop(Configurable):
|
|||
``deadline`` may be a number denoting a time (on the same
|
||||
scale as `IOLoop.time`, normally `time.time`), or a
|
||||
`datetime.timedelta` object for a deadline relative to the
|
||||
current time.
|
||||
current time. Since Tornado 4.0, `call_later` is a more
|
||||
convenient alternative for the relative case since it does not
|
||||
require a timedelta object.
|
||||
|
||||
Note that it is not safe to call `add_timeout` from other threads.
|
||||
Instead, you must use `add_callback` to transfer control to the
|
||||
`IOLoop`'s thread, and then call `add_timeout` from there.
|
||||
|
||||
Subclasses of IOLoop must implement either `add_timeout` or
|
||||
`call_at`; the default implementations of each will call
|
||||
the other. `call_at` is usually easier to implement, but
|
||||
subclasses that wish to maintain compatibility with Tornado
|
||||
versions prior to 4.0 must use `add_timeout` instead.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
Now passes through ``*args`` and ``**kwargs`` to the callback.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
if isinstance(deadline, numbers.Real):
|
||||
return self.call_at(deadline, callback, *args, **kwargs)
|
||||
elif isinstance(deadline, datetime.timedelta):
|
||||
return self.call_at(self.time() + timedelta_to_seconds(deadline),
|
||||
callback, *args, **kwargs)
|
||||
else:
|
||||
raise TypeError("Unsupported deadline %r" % deadline)
|
||||
|
||||
def call_later(self, delay, callback, *args, **kwargs):
|
||||
"""Runs the ``callback`` after ``delay`` seconds have passed.
|
||||
|
||||
Returns an opaque handle that may be passed to `remove_timeout`
|
||||
to cancel. Note that unlike the `asyncio` method of the same
|
||||
name, the returned object does not have a ``cancel()`` method.
|
||||
|
||||
See `add_timeout` for comments on thread-safety and subclassing.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
self.call_at(self.time() + delay, callback, *args, **kwargs)
|
||||
|
||||
def call_at(self, when, callback, *args, **kwargs):
|
||||
"""Runs the ``callback`` at the absolute time designated by ``when``.
|
||||
|
||||
``when`` must be a number using the same reference point as
|
||||
`IOLoop.time`.
|
||||
|
||||
Returns an opaque handle that may be passed to `remove_timeout`
|
||||
to cancel. Note that unlike the `asyncio` method of the same
|
||||
name, the returned object does not have a ``cancel()`` method.
|
||||
|
||||
See `add_timeout` for comments on thread-safety and subclassing.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
self.add_timeout(when, callback, *args, **kwargs)
|
||||
|
||||
def remove_timeout(self, timeout):
|
||||
"""Cancels a pending timeout.
|
||||
|
|
@ -437,6 +531,19 @@ class IOLoop(Configurable):
|
|||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def spawn_callback(self, callback, *args, **kwargs):
|
||||
"""Calls the given callback on the next IOLoop iteration.
|
||||
|
||||
Unlike all other callback-related methods on IOLoop,
|
||||
``spawn_callback`` does not associate the callback with its caller's
|
||||
``stack_context``, so it is suitable for fire-and-forget callbacks
|
||||
that should not interfere with the caller.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
with stack_context.NullContext():
|
||||
self.add_callback(callback, *args, **kwargs)
|
||||
|
||||
def add_future(self, future, callback):
|
||||
"""Schedules a callback on the ``IOLoop`` when the given
|
||||
`.Future` is finished.
|
||||
|
|
@ -444,7 +551,7 @@ class IOLoop(Configurable):
|
|||
The callback is invoked with one argument, the
|
||||
`.Future`.
|
||||
"""
|
||||
assert isinstance(future, Future)
|
||||
assert is_future(future)
|
||||
callback = stack_context.wrap(callback)
|
||||
future.add_done_callback(
|
||||
lambda future: self.add_callback(callback, future))
|
||||
|
|
@ -455,7 +562,13 @@ class IOLoop(Configurable):
|
|||
For use in subclasses.
|
||||
"""
|
||||
try:
|
||||
callback()
|
||||
ret = callback()
|
||||
if ret is not None and is_future(ret):
|
||||
# Functions that return Futures typically swallow all
|
||||
# exceptions and store them in the Future. If a Future
|
||||
# makes it out to the IOLoop, ensure its exception (if any)
|
||||
# gets logged too.
|
||||
self.add_future(ret, lambda f: f.result())
|
||||
except Exception:
|
||||
self.handle_callback_exception(callback)
|
||||
|
||||
|
|
@ -471,6 +584,47 @@ class IOLoop(Configurable):
|
|||
"""
|
||||
app_log.error("Exception in callback %r", callback, exc_info=True)
|
||||
|
||||
def split_fd(self, fd):
|
||||
"""Returns an (fd, obj) pair from an ``fd`` parameter.
|
||||
|
||||
We accept both raw file descriptors and file-like objects as
|
||||
input to `add_handler` and related methods. When a file-like
|
||||
object is passed, we must retain the object itself so we can
|
||||
close it correctly when the `IOLoop` shuts down, but the
|
||||
poller interfaces favor file descriptors (they will accept
|
||||
file-like objects and call ``fileno()`` for you, but they
|
||||
always return the descriptor itself).
|
||||
|
||||
This method is provided for use by `IOLoop` subclasses and should
|
||||
not generally be used by application code.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
try:
|
||||
return fd.fileno(), fd
|
||||
except AttributeError:
|
||||
return fd, fd
|
||||
|
||||
def close_fd(self, fd):
|
||||
"""Utility method to close an ``fd``.
|
||||
|
||||
If ``fd`` is a file-like object, we close it directly; otherwise
|
||||
we use `os.close`.
|
||||
|
||||
This method is provided for use by `IOLoop` subclasses (in
|
||||
implementations of ``IOLoop.close(all_fds=True)`` and should
|
||||
not generally be used by application code.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
try:
|
||||
try:
|
||||
fd.close()
|
||||
except AttributeError:
|
||||
os.close(fd)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
class PollIOLoop(IOLoop):
|
||||
"""Base class for IOLoops built around a select-like function.
|
||||
|
|
@ -496,6 +650,7 @@ class PollIOLoop(IOLoop):
|
|||
self._closing = False
|
||||
self._thread_ident = None
|
||||
self._blocking_signal_threshold = None
|
||||
self._timeout_counter = itertools.count()
|
||||
|
||||
# Create a pipe that we send bogus data to when we want to wake
|
||||
# the I/O loop when it is idle
|
||||
|
|
@ -509,26 +664,24 @@ class PollIOLoop(IOLoop):
|
|||
self._closing = True
|
||||
self.remove_handler(self._waker.fileno())
|
||||
if all_fds:
|
||||
for fd in self._handlers.keys():
|
||||
try:
|
||||
close_method = getattr(fd, 'close', None)
|
||||
if close_method is not None:
|
||||
close_method()
|
||||
else:
|
||||
os.close(fd)
|
||||
except Exception:
|
||||
gen_log.debug("error closing fd %s", fd, exc_info=True)
|
||||
for fd, handler in self._handlers.values():
|
||||
self.close_fd(fd)
|
||||
self._waker.close()
|
||||
self._impl.close()
|
||||
self._callbacks = None
|
||||
self._timeouts = None
|
||||
|
||||
def add_handler(self, fd, handler, events):
|
||||
self._handlers[fd] = stack_context.wrap(handler)
|
||||
fd, obj = self.split_fd(fd)
|
||||
self._handlers[fd] = (obj, stack_context.wrap(handler))
|
||||
self._impl.register(fd, events | self.ERROR)
|
||||
|
||||
def update_handler(self, fd, events):
|
||||
fd, obj = self.split_fd(fd)
|
||||
self._impl.modify(fd, events | self.ERROR)
|
||||
|
||||
def remove_handler(self, fd):
|
||||
fd, obj = self.split_fd(fd)
|
||||
self._handlers.pop(fd, None)
|
||||
self._events.pop(fd, None)
|
||||
try:
|
||||
|
|
@ -547,15 +700,9 @@ class PollIOLoop(IOLoop):
|
|||
action if action is not None else signal.SIG_DFL)
|
||||
|
||||
def start(self):
|
||||
if not logging.getLogger().handlers:
|
||||
# The IOLoop catches and logs exceptions, so it's
|
||||
# important that log output be visible. However, python's
|
||||
# default behavior for non-root loggers (prior to python
|
||||
# 3.2) is to print an unhelpful "no handlers could be
|
||||
# found" message rather than the actual log entry, so we
|
||||
# must explicitly configure logging if we've made it this
|
||||
# far without anything.
|
||||
logging.basicConfig()
|
||||
if self._running:
|
||||
raise RuntimeError("IOLoop is already running")
|
||||
self._setup_logging()
|
||||
if self._stopped:
|
||||
self._stopped = False
|
||||
return
|
||||
|
|
@ -595,98 +742,113 @@ class PollIOLoop(IOLoop):
|
|||
except ValueError: # non-main thread
|
||||
pass
|
||||
|
||||
while True:
|
||||
poll_timeout = 3600.0
|
||||
try:
|
||||
while True:
|
||||
# Prevent IO event starvation by delaying new callbacks
|
||||
# to the next iteration of the event loop.
|
||||
with self._callback_lock:
|
||||
callbacks = self._callbacks
|
||||
self._callbacks = []
|
||||
|
||||
# Prevent IO event starvation by delaying new callbacks
|
||||
# to the next iteration of the event loop.
|
||||
with self._callback_lock:
|
||||
callbacks = self._callbacks
|
||||
self._callbacks = []
|
||||
for callback in callbacks:
|
||||
self._run_callback(callback)
|
||||
# Add any timeouts that have come due to the callback list.
|
||||
# Do not run anything until we have determined which ones
|
||||
# are ready, so timeouts that call add_timeout cannot
|
||||
# schedule anything in this iteration.
|
||||
if self._timeouts:
|
||||
now = self.time()
|
||||
while self._timeouts:
|
||||
if self._timeouts[0].callback is None:
|
||||
# the timeout was cancelled
|
||||
heapq.heappop(self._timeouts)
|
||||
self._cancellations -= 1
|
||||
elif self._timeouts[0].deadline <= now:
|
||||
timeout = heapq.heappop(self._timeouts)
|
||||
callbacks.append(timeout.callback)
|
||||
del timeout
|
||||
else:
|
||||
break
|
||||
if (self._cancellations > 512
|
||||
and self._cancellations > (len(self._timeouts) >> 1)):
|
||||
# Clean up the timeout queue when it gets large and it's
|
||||
# more than half cancellations.
|
||||
self._cancellations = 0
|
||||
self._timeouts = [x for x in self._timeouts
|
||||
if x.callback is not None]
|
||||
heapq.heapify(self._timeouts)
|
||||
|
||||
if self._timeouts:
|
||||
now = self.time()
|
||||
while self._timeouts:
|
||||
if self._timeouts[0].callback is None:
|
||||
# the timeout was cancelled
|
||||
heapq.heappop(self._timeouts)
|
||||
self._cancellations -= 1
|
||||
elif self._timeouts[0].deadline <= now:
|
||||
timeout = heapq.heappop(self._timeouts)
|
||||
self._run_callback(timeout.callback)
|
||||
else:
|
||||
seconds = self._timeouts[0].deadline - now
|
||||
poll_timeout = min(seconds, poll_timeout)
|
||||
break
|
||||
if (self._cancellations > 512
|
||||
and self._cancellations > (len(self._timeouts) >> 1)):
|
||||
# Clean up the timeout queue when it gets large and it's
|
||||
# more than half cancellations.
|
||||
self._cancellations = 0
|
||||
self._timeouts = [x for x in self._timeouts
|
||||
if x.callback is not None]
|
||||
heapq.heapify(self._timeouts)
|
||||
for callback in callbacks:
|
||||
self._run_callback(callback)
|
||||
# Closures may be holding on to a lot of memory, so allow
|
||||
# them to be freed before we go into our poll wait.
|
||||
callbacks = callback = None
|
||||
|
||||
if self._callbacks:
|
||||
# If any callbacks or timeouts called add_callback,
|
||||
# we don't want to wait in poll() before we run them.
|
||||
poll_timeout = 0.0
|
||||
|
||||
if not self._running:
|
||||
break
|
||||
|
||||
if self._blocking_signal_threshold is not None:
|
||||
# clear alarm so it doesn't fire while poll is waiting for
|
||||
# events.
|
||||
signal.setitimer(signal.ITIMER_REAL, 0, 0)
|
||||
|
||||
try:
|
||||
event_pairs = self._impl.poll(poll_timeout)
|
||||
except Exception as e:
|
||||
# Depending on python version and IOLoop implementation,
|
||||
# different exception types may be thrown and there are
|
||||
# two ways EINTR might be signaled:
|
||||
# * e.errno == errno.EINTR
|
||||
# * e.args is like (errno.EINTR, 'Interrupted system call')
|
||||
if (getattr(e, 'errno', None) == errno.EINTR or
|
||||
(isinstance(getattr(e, 'args', None), tuple) and
|
||||
len(e.args) == 2 and e.args[0] == errno.EINTR)):
|
||||
continue
|
||||
if self._callbacks:
|
||||
# If any callbacks or timeouts called add_callback,
|
||||
# we don't want to wait in poll() before we run them.
|
||||
poll_timeout = 0.0
|
||||
elif self._timeouts:
|
||||
# If there are any timeouts, schedule the first one.
|
||||
# Use self.time() instead of 'now' to account for time
|
||||
# spent running callbacks.
|
||||
poll_timeout = self._timeouts[0].deadline - self.time()
|
||||
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
|
||||
else:
|
||||
raise
|
||||
# No timeouts and no callbacks, so use the default.
|
||||
poll_timeout = _POLL_TIMEOUT
|
||||
|
||||
if self._blocking_signal_threshold is not None:
|
||||
signal.setitimer(signal.ITIMER_REAL,
|
||||
self._blocking_signal_threshold, 0)
|
||||
if not self._running:
|
||||
break
|
||||
|
||||
if self._blocking_signal_threshold is not None:
|
||||
# clear alarm so it doesn't fire while poll is waiting for
|
||||
# events.
|
||||
signal.setitimer(signal.ITIMER_REAL, 0, 0)
|
||||
|
||||
# Pop one fd at a time from the set of pending fds and run
|
||||
# its handler. Since that handler may perform actions on
|
||||
# other file descriptors, there may be reentrant calls to
|
||||
# this IOLoop that update self._events
|
||||
self._events.update(event_pairs)
|
||||
while self._events:
|
||||
fd, events = self._events.popitem()
|
||||
try:
|
||||
self._handlers[fd](fd, events)
|
||||
except (OSError, IOError) as e:
|
||||
if e.args[0] == errno.EPIPE:
|
||||
# Happens when the client closes the connection
|
||||
pass
|
||||
event_pairs = self._impl.poll(poll_timeout)
|
||||
except Exception as e:
|
||||
# Depending on python version and IOLoop implementation,
|
||||
# different exception types may be thrown and there are
|
||||
# two ways EINTR might be signaled:
|
||||
# * e.errno == errno.EINTR
|
||||
# * e.args is like (errno.EINTR, 'Interrupted system call')
|
||||
if errno_from_exception(e) == errno.EINTR:
|
||||
continue
|
||||
else:
|
||||
app_log.error("Exception in I/O handler for fd %s",
|
||||
fd, exc_info=True)
|
||||
except Exception:
|
||||
app_log.error("Exception in I/O handler for fd %s",
|
||||
fd, exc_info=True)
|
||||
# reset the stopped flag so another start/stop pair can be issued
|
||||
self._stopped = False
|
||||
if self._blocking_signal_threshold is not None:
|
||||
signal.setitimer(signal.ITIMER_REAL, 0, 0)
|
||||
IOLoop._current.instance = old_current
|
||||
if old_wakeup_fd is not None:
|
||||
signal.set_wakeup_fd(old_wakeup_fd)
|
||||
raise
|
||||
|
||||
if self._blocking_signal_threshold is not None:
|
||||
signal.setitimer(signal.ITIMER_REAL,
|
||||
self._blocking_signal_threshold, 0)
|
||||
|
||||
# Pop one fd at a time from the set of pending fds and run
|
||||
# its handler. Since that handler may perform actions on
|
||||
# other file descriptors, there may be reentrant calls to
|
||||
# this IOLoop that update self._events
|
||||
self._events.update(event_pairs)
|
||||
while self._events:
|
||||
fd, events = self._events.popitem()
|
||||
try:
|
||||
fd_obj, handler_func = self._handlers[fd]
|
||||
handler_func(fd_obj, events)
|
||||
except (OSError, IOError) as e:
|
||||
if errno_from_exception(e) == errno.EPIPE:
|
||||
# Happens when the client closes the connection
|
||||
pass
|
||||
else:
|
||||
self.handle_callback_exception(self._handlers.get(fd))
|
||||
except Exception:
|
||||
self.handle_callback_exception(self._handlers.get(fd))
|
||||
fd_obj = handler_func = None
|
||||
|
||||
finally:
|
||||
# reset the stopped flag so another start/stop pair can be issued
|
||||
self._stopped = False
|
||||
if self._blocking_signal_threshold is not None:
|
||||
signal.setitimer(signal.ITIMER_REAL, 0, 0)
|
||||
IOLoop._current.instance = old_current
|
||||
if old_wakeup_fd is not None:
|
||||
signal.set_wakeup_fd(old_wakeup_fd)
|
||||
|
||||
def stop(self):
|
||||
self._running = False
|
||||
|
|
@ -696,8 +858,11 @@ class PollIOLoop(IOLoop):
|
|||
def time(self):
|
||||
return self.time_func()
|
||||
|
||||
def add_timeout(self, deadline, callback):
|
||||
timeout = _Timeout(deadline, stack_context.wrap(callback), self)
|
||||
def call_at(self, deadline, callback, *args, **kwargs):
|
||||
timeout = _Timeout(
|
||||
deadline,
|
||||
functools.partial(stack_context.wrap(callback), *args, **kwargs),
|
||||
self)
|
||||
heapq.heappush(self._timeouts, timeout)
|
||||
return timeout
|
||||
|
||||
|
|
@ -717,14 +882,14 @@ class PollIOLoop(IOLoop):
|
|||
list_empty = not self._callbacks
|
||||
self._callbacks.append(functools.partial(
|
||||
stack_context.wrap(callback), *args, **kwargs))
|
||||
if list_empty and thread.get_ident() != self._thread_ident:
|
||||
# If we're in the IOLoop's thread, we know it's not currently
|
||||
# polling. If we're not, and we added the first callback to an
|
||||
# empty list, we may need to wake it up (it may wake up on its
|
||||
# own, but an occasional extra wake is harmless). Waking
|
||||
# up a polling IOLoop is relatively expensive, so we try to
|
||||
# avoid it when we can.
|
||||
self._waker.wake()
|
||||
if list_empty and thread.get_ident() != self._thread_ident:
|
||||
# If we're in the IOLoop's thread, we know it's not currently
|
||||
# polling. If we're not, and we added the first callback to an
|
||||
# empty list, we may need to wake it up (it may wake up on its
|
||||
# own, but an occasional extra wake is harmless). Waking
|
||||
# up a polling IOLoop is relatively expensive, so we try to
|
||||
# avoid it when we can.
|
||||
self._waker.wake()
|
||||
|
||||
def add_callback_from_signal(self, callback, *args, **kwargs):
|
||||
with stack_context.NullContext():
|
||||
|
|
@ -749,33 +914,26 @@ class _Timeout(object):
|
|||
"""An IOLoop timeout, a UNIX timestamp and a callback"""
|
||||
|
||||
# Reduce memory overhead when there are lots of pending callbacks
|
||||
__slots__ = ['deadline', 'callback']
|
||||
__slots__ = ['deadline', 'callback', 'tiebreaker']
|
||||
|
||||
def __init__(self, deadline, callback, io_loop):
|
||||
if isinstance(deadline, numbers.Real):
|
||||
self.deadline = deadline
|
||||
elif isinstance(deadline, datetime.timedelta):
|
||||
self.deadline = io_loop.time() + _Timeout.timedelta_to_seconds(deadline)
|
||||
else:
|
||||
if not isinstance(deadline, numbers.Real):
|
||||
raise TypeError("Unsupported deadline %r" % deadline)
|
||||
self.deadline = deadline
|
||||
self.callback = callback
|
||||
|
||||
@staticmethod
|
||||
def timedelta_to_seconds(td):
|
||||
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
|
||||
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
|
||||
self.tiebreaker = next(io_loop._timeout_counter)
|
||||
|
||||
# Comparison methods to sort by deadline, with object id as a tiebreaker
|
||||
# to guarantee a consistent ordering. The heapq module uses __le__
|
||||
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
|
||||
# use __lt__).
|
||||
def __lt__(self, other):
|
||||
return ((self.deadline, id(self)) <
|
||||
(other.deadline, id(other)))
|
||||
return ((self.deadline, self.tiebreaker) <
|
||||
(other.deadline, other.tiebreaker))
|
||||
|
||||
def __le__(self, other):
|
||||
return ((self.deadline, id(self)) <=
|
||||
(other.deadline, id(other)))
|
||||
return ((self.deadline, self.tiebreaker) <=
|
||||
(other.deadline, other.tiebreaker))
|
||||
|
||||
|
||||
class PeriodicCallback(object):
|
||||
|
|
@ -813,7 +971,7 @@ class PeriodicCallback(object):
|
|||
try:
|
||||
self.callback()
|
||||
except Exception:
|
||||
app_log.error("Error in periodic callback", exc_info=True)
|
||||
self.io_loop.handle_callback_exception(self.callback)
|
||||
self._schedule_next()
|
||||
|
||||
def _schedule_next(self):
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -286,8 +286,6 @@ class Locale(object):
|
|||
This method is primarily intended for dates in the past.
|
||||
For dates in the future, we fall back to full format.
|
||||
"""
|
||||
if self.code.startswith("ru"):
|
||||
relative = False
|
||||
if isinstance(date, numbers.Real):
|
||||
date = datetime.datetime.utcfromtimestamp(date)
|
||||
now = datetime.datetime.utcnow()
|
||||
|
|
|
|||
|
|
@ -33,7 +33,6 @@ from __future__ import absolute_import, division, print_function, with_statement
|
|||
import logging
|
||||
import logging.handlers
|
||||
import sys
|
||||
import time
|
||||
|
||||
from tornado.escape import _unicode
|
||||
from tornado.util import unicode_type, basestring_type
|
||||
|
|
@ -51,7 +50,7 @@ gen_log = logging.getLogger("tornado.general")
|
|||
|
||||
def _stderr_supports_color():
|
||||
color = False
|
||||
if curses and sys.stderr.isatty():
|
||||
if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
|
||||
try:
|
||||
curses.setupterm()
|
||||
if curses.tigetnum("colors") > 0:
|
||||
|
|
@ -61,6 +60,13 @@ def _stderr_supports_color():
|
|||
return color
|
||||
|
||||
|
||||
def _safe_unicode(s):
|
||||
try:
|
||||
return _unicode(s)
|
||||
except UnicodeDecodeError:
|
||||
return repr(s)
|
||||
|
||||
|
||||
class LogFormatter(logging.Formatter):
|
||||
"""Log formatter used in Tornado.
|
||||
|
||||
|
|
@ -74,10 +80,37 @@ class LogFormatter(logging.Formatter):
|
|||
`tornado.options.parse_command_line` (unless ``--logging=none`` is
|
||||
used).
|
||||
"""
|
||||
def __init__(self, color=True, *args, **kwargs):
|
||||
logging.Formatter.__init__(self, *args, **kwargs)
|
||||
self._color = color and _stderr_supports_color()
|
||||
if self._color:
|
||||
DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
|
||||
DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
|
||||
DEFAULT_COLORS = {
|
||||
logging.DEBUG: 4, # Blue
|
||||
logging.INFO: 2, # Green
|
||||
logging.WARNING: 3, # Yellow
|
||||
logging.ERROR: 1, # Red
|
||||
}
|
||||
|
||||
def __init__(self, color=True, fmt=DEFAULT_FORMAT,
|
||||
datefmt=DEFAULT_DATE_FORMAT, colors=DEFAULT_COLORS):
|
||||
r"""
|
||||
:arg bool color: Enables color support.
|
||||
:arg string fmt: Log message format.
|
||||
It will be applied to the attributes dict of log records. The
|
||||
text between ``%(color)s`` and ``%(end_color)s`` will be colored
|
||||
depending on the level if color support is on.
|
||||
:arg dict colors: color mappings from logging level to terminal color
|
||||
code
|
||||
:arg string datefmt: Datetime format.
|
||||
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
|
||||
|
||||
.. versionchanged:: 3.2
|
||||
|
||||
Added ``fmt`` and ``datefmt`` arguments.
|
||||
"""
|
||||
logging.Formatter.__init__(self, datefmt=datefmt)
|
||||
self._fmt = fmt
|
||||
|
||||
self._colors = {}
|
||||
if color and _stderr_supports_color():
|
||||
# The curses module has some str/bytes confusion in
|
||||
# python3. Until version 3.2.3, most methods return
|
||||
# bytes, but only accept strings. In addition, we want to
|
||||
|
|
@ -89,64 +122,56 @@ class LogFormatter(logging.Formatter):
|
|||
curses.tigetstr("setf") or "")
|
||||
if (3, 0) < sys.version_info < (3, 2, 3):
|
||||
fg_color = unicode_type(fg_color, "ascii")
|
||||
self._colors = {
|
||||
logging.DEBUG: unicode_type(curses.tparm(fg_color, 4), # Blue
|
||||
"ascii"),
|
||||
logging.INFO: unicode_type(curses.tparm(fg_color, 2), # Green
|
||||
"ascii"),
|
||||
logging.WARNING: unicode_type(curses.tparm(fg_color, 3), # Yellow
|
||||
"ascii"),
|
||||
logging.ERROR: unicode_type(curses.tparm(fg_color, 1), # Red
|
||||
"ascii"),
|
||||
}
|
||||
|
||||
for levelno, code in colors.items():
|
||||
self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii")
|
||||
self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
|
||||
else:
|
||||
self._normal = ''
|
||||
|
||||
def format(self, record):
|
||||
try:
|
||||
record.message = record.getMessage()
|
||||
message = record.getMessage()
|
||||
assert isinstance(message, basestring_type) # guaranteed by logging
|
||||
# Encoding notes: The logging module prefers to work with character
|
||||
# strings, but only enforces that log messages are instances of
|
||||
# basestring. In python 2, non-ascii bytestrings will make
|
||||
# their way through the logging framework until they blow up with
|
||||
# an unhelpful decoding error (with this formatter it happens
|
||||
# when we attach the prefix, but there are other opportunities for
|
||||
# exceptions further along in the framework).
|
||||
#
|
||||
# If a byte string makes it this far, convert it to unicode to
|
||||
# ensure it will make it out to the logs. Use repr() as a fallback
|
||||
# to ensure that all byte strings can be converted successfully,
|
||||
# but don't do it by default so we don't add extra quotes to ascii
|
||||
# bytestrings. This is a bit of a hacky place to do this, but
|
||||
# it's worth it since the encoding errors that would otherwise
|
||||
# result are so useless (and tornado is fond of using utf8-encoded
|
||||
# byte strings whereever possible).
|
||||
record.message = _safe_unicode(message)
|
||||
except Exception as e:
|
||||
record.message = "Bad message (%r): %r" % (e, record.__dict__)
|
||||
assert isinstance(record.message, basestring_type) # guaranteed by logging
|
||||
record.asctime = time.strftime(
|
||||
"%y%m%d %H:%M:%S", self.converter(record.created))
|
||||
prefix = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]' % \
|
||||
record.__dict__
|
||||
if self._color:
|
||||
prefix = (self._colors.get(record.levelno, self._normal) +
|
||||
prefix + self._normal)
|
||||
|
||||
# Encoding notes: The logging module prefers to work with character
|
||||
# strings, but only enforces that log messages are instances of
|
||||
# basestring. In python 2, non-ascii bytestrings will make
|
||||
# their way through the logging framework until they blow up with
|
||||
# an unhelpful decoding error (with this formatter it happens
|
||||
# when we attach the prefix, but there are other opportunities for
|
||||
# exceptions further along in the framework).
|
||||
#
|
||||
# If a byte string makes it this far, convert it to unicode to
|
||||
# ensure it will make it out to the logs. Use repr() as a fallback
|
||||
# to ensure that all byte strings can be converted successfully,
|
||||
# but don't do it by default so we don't add extra quotes to ascii
|
||||
# bytestrings. This is a bit of a hacky place to do this, but
|
||||
# it's worth it since the encoding errors that would otherwise
|
||||
# result are so useless (and tornado is fond of using utf8-encoded
|
||||
# byte strings whereever possible).
|
||||
def safe_unicode(s):
|
||||
try:
|
||||
return _unicode(s)
|
||||
except UnicodeDecodeError:
|
||||
return repr(s)
|
||||
record.asctime = self.formatTime(record, self.datefmt)
|
||||
|
||||
if record.levelno in self._colors:
|
||||
record.color = self._colors[record.levelno]
|
||||
record.end_color = self._normal
|
||||
else:
|
||||
record.color = record.end_color = ''
|
||||
|
||||
formatted = self._fmt % record.__dict__
|
||||
|
||||
formatted = prefix + " " + safe_unicode(record.message)
|
||||
if record.exc_info:
|
||||
if not record.exc_text:
|
||||
record.exc_text = self.formatException(record.exc_info)
|
||||
if record.exc_text:
|
||||
# exc_text contains multiple lines. We need to safe_unicode
|
||||
# exc_text contains multiple lines. We need to _safe_unicode
|
||||
# each line separately so that non-utf8 bytes don't cause
|
||||
# all the newlines to turn into '\n'.
|
||||
lines = [formatted.rstrip()]
|
||||
lines.extend(safe_unicode(ln) for ln in record.exc_text.split('\n'))
|
||||
lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n'))
|
||||
formatted = '\n'.join(lines)
|
||||
return formatted.replace("\n", "\n ")
|
||||
|
||||
|
|
@ -154,12 +179,12 @@ class LogFormatter(logging.Formatter):
|
|||
def enable_pretty_logging(options=None, logger=None):
|
||||
"""Turns on formatted logging output as configured.
|
||||
|
||||
This is called automaticaly by `tornado.options.parse_command_line`
|
||||
This is called automatically by `tornado.options.parse_command_line`
|
||||
and `tornado.options.parse_config_file`.
|
||||
"""
|
||||
if options is None:
|
||||
from tornado.options import options
|
||||
if options.logging == 'none':
|
||||
if options.logging is None or options.logging.lower() == 'none':
|
||||
return
|
||||
if logger is None:
|
||||
logger = logging.getLogger()
|
||||
|
|
|
|||
|
|
@ -20,15 +20,45 @@ from __future__ import absolute_import, division, print_function, with_statement
|
|||
|
||||
import errno
|
||||
import os
|
||||
import re
|
||||
import platform
|
||||
import socket
|
||||
import ssl
|
||||
import stat
|
||||
|
||||
from tornado.concurrent import dummy_executor, run_on_executor
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.platform.auto import set_close_exec
|
||||
from tornado.util import Configurable
|
||||
from tornado.util import u, Configurable, errno_from_exception
|
||||
|
||||
try:
|
||||
import ssl
|
||||
except ImportError:
|
||||
# ssl is not available on Google App Engine
|
||||
ssl = None
|
||||
|
||||
if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+
|
||||
ssl_match_hostname = ssl.match_hostname
|
||||
SSLCertificateError = ssl.CertificateError
|
||||
elif ssl is None:
|
||||
ssl_match_hostname = SSLCertificateError = None
|
||||
else:
|
||||
import backports.ssl_match_hostname
|
||||
ssl_match_hostname = backports.ssl_match_hostname.match_hostname
|
||||
SSLCertificateError = backports.ssl_match_hostname.CertificateError
|
||||
|
||||
# ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode,
|
||||
# getaddrinfo attempts to import encodings.idna. If this is done at
|
||||
# module-import time, the import lock is already held by the main thread,
|
||||
# leading to deadlock. Avoid it by caching the idna encoder on the main
|
||||
# thread now.
|
||||
u('foo').encode('idna')
|
||||
|
||||
# These errnos indicate that a non-blocking operation must be retried
|
||||
# at a later time. On most platforms they're the same value, but on
|
||||
# some they differ.
|
||||
_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
|
||||
|
||||
if hasattr(errno, "WSAEWOULDBLOCK"):
|
||||
_ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,)
|
||||
|
||||
|
||||
def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags=None):
|
||||
|
|
@ -63,13 +93,23 @@ def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags
|
|||
family = socket.AF_INET
|
||||
if flags is None:
|
||||
flags = socket.AI_PASSIVE
|
||||
bound_port = None
|
||||
for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM,
|
||||
0, flags)):
|
||||
af, socktype, proto, canonname, sockaddr = res
|
||||
if (platform.system() == 'Darwin' and address == 'localhost' and
|
||||
af == socket.AF_INET6 and sockaddr[3] != 0):
|
||||
# Mac OS X includes a link-local address fe80::1%lo0 in the
|
||||
# getaddrinfo results for 'localhost'. However, the firewall
|
||||
# doesn't understand that this is a local address and will
|
||||
# prompt for access (often repeatedly, due to an apparent
|
||||
# bug in its ability to remember granting access to an
|
||||
# application). Skip these addresses.
|
||||
continue
|
||||
try:
|
||||
sock = socket.socket(af, socktype, proto)
|
||||
except socket.error as e:
|
||||
if e.args[0] == errno.EAFNOSUPPORT:
|
||||
if errno_from_exception(e) == errno.EAFNOSUPPORT:
|
||||
continue
|
||||
raise
|
||||
set_close_exec(sock.fileno())
|
||||
|
|
@ -86,8 +126,16 @@ def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags
|
|||
# Python 2.x on windows doesn't have IPPROTO_IPV6.
|
||||
if hasattr(socket, "IPPROTO_IPV6"):
|
||||
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
|
||||
|
||||
# automatic port allocation with port=None
|
||||
# should bind on the same port on IPv4 and IPv6
|
||||
host, requested_port = sockaddr[:2]
|
||||
if requested_port == 0 and bound_port is not None:
|
||||
sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
|
||||
|
||||
sock.setblocking(0)
|
||||
sock.bind(sockaddr)
|
||||
bound_port = sock.getsockname()[1]
|
||||
sock.listen(backlog)
|
||||
sockets.append(sock)
|
||||
return sockets
|
||||
|
|
@ -110,7 +158,7 @@ if hasattr(socket, 'AF_UNIX'):
|
|||
try:
|
||||
st = os.stat(file)
|
||||
except OSError as err:
|
||||
if err.errno != errno.ENOENT:
|
||||
if errno_from_exception(err) != errno.ENOENT:
|
||||
raise
|
||||
else:
|
||||
if stat.S_ISSOCK(st.st_mode):
|
||||
|
|
@ -140,18 +188,18 @@ def add_accept_handler(sock, callback, io_loop=None):
|
|||
try:
|
||||
connection, address = sock.accept()
|
||||
except socket.error as e:
|
||||
# EWOULDBLOCK and EAGAIN indicate we have accepted every
|
||||
# _ERRNO_WOULDBLOCK indicate we have accepted every
|
||||
# connection that is available.
|
||||
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
|
||||
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
|
||||
return
|
||||
# ECONNABORTED indicates that there was a connection
|
||||
# but it was closed while still in the accept queue.
|
||||
# (observed on FreeBSD).
|
||||
if e.args[0] == errno.ECONNABORTED:
|
||||
if errno_from_exception(e) == errno.ECONNABORTED:
|
||||
continue
|
||||
raise
|
||||
callback(connection, address)
|
||||
io_loop.add_handler(sock.fileno(), accept_handler, IOLoop.READ)
|
||||
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
|
||||
|
||||
|
||||
def is_valid_ip(ip):
|
||||
|
|
@ -159,6 +207,10 @@ def is_valid_ip(ip):
|
|||
|
||||
Supports IPv4 and IPv6.
|
||||
"""
|
||||
if not ip or '\x00' in ip:
|
||||
# getaddrinfo resolves empty strings to localhost, and truncates
|
||||
# on zero bytes.
|
||||
return False
|
||||
try:
|
||||
res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC,
|
||||
socket.SOCK_STREAM,
|
||||
|
|
@ -363,6 +415,10 @@ def ssl_options_to_context(ssl_options):
|
|||
context.load_verify_locations(ssl_options['ca_certs'])
|
||||
if 'ciphers' in ssl_options:
|
||||
context.set_ciphers(ssl_options['ciphers'])
|
||||
if hasattr(ssl, 'OP_NO_COMPRESSION'):
|
||||
# Disable TLS compression to avoid CRIME and related attacks.
|
||||
# This constant wasn't added until python 3.3.
|
||||
context.options |= ssl.OP_NO_COMPRESSION
|
||||
return context
|
||||
|
||||
|
||||
|
|
@ -387,73 +443,3 @@ def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs):
|
|||
return context.wrap_socket(socket, **kwargs)
|
||||
else:
|
||||
return ssl.wrap_socket(socket, **dict(context, **kwargs))
|
||||
|
||||
if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+
|
||||
ssl_match_hostname = ssl.match_hostname
|
||||
SSLCertificateError = ssl.CertificateError
|
||||
else:
|
||||
# match_hostname was added to the standard library ssl module in python 3.2.
|
||||
# The following code was backported for older releases and copied from
|
||||
# https://bitbucket.org/brandon/backports.ssl_match_hostname
|
||||
class SSLCertificateError(ValueError):
|
||||
pass
|
||||
|
||||
def _dnsname_to_pat(dn, max_wildcards=1):
|
||||
pats = []
|
||||
for frag in dn.split(r'.'):
|
||||
if frag.count('*') > max_wildcards:
|
||||
# Issue #17980: avoid denials of service by refusing more
|
||||
# than one wildcard per fragment. A survery of established
|
||||
# policy among SSL implementations showed it to be a
|
||||
# reasonable choice.
|
||||
raise SSLCertificateError(
|
||||
"too many wildcards in certificate DNS name: " + repr(dn))
|
||||
if frag == '*':
|
||||
# When '*' is a fragment by itself, it matches a non-empty dotless
|
||||
# fragment.
|
||||
pats.append('[^.]+')
|
||||
else:
|
||||
# Otherwise, '*' matches any dotless fragment.
|
||||
frag = re.escape(frag)
|
||||
pats.append(frag.replace(r'\*', '[^.]*'))
|
||||
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
|
||||
|
||||
def ssl_match_hostname(cert, hostname):
|
||||
"""Verify that *cert* (in decoded format as returned by
|
||||
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
|
||||
are mostly followed, but IP addresses are not accepted for *hostname*.
|
||||
|
||||
CertificateError is raised on failure. On success, the function
|
||||
returns nothing.
|
||||
"""
|
||||
if not cert:
|
||||
raise ValueError("empty or no certificate")
|
||||
dnsnames = []
|
||||
san = cert.get('subjectAltName', ())
|
||||
for key, value in san:
|
||||
if key == 'DNS':
|
||||
if _dnsname_to_pat(value).match(hostname):
|
||||
return
|
||||
dnsnames.append(value)
|
||||
if not dnsnames:
|
||||
# The subject is only checked when there is no dNSName entry
|
||||
# in subjectAltName
|
||||
for sub in cert.get('subject', ()):
|
||||
for key, value in sub:
|
||||
# XXX according to RFC 2818, the most specific Common Name
|
||||
# must be used.
|
||||
if key == 'commonName':
|
||||
if _dnsname_to_pat(value).match(hostname):
|
||||
return
|
||||
dnsnames.append(value)
|
||||
if len(dnsnames) > 1:
|
||||
raise SSLCertificateError("hostname %r "
|
||||
"doesn't match either of %s"
|
||||
% (hostname, ', '.join(map(repr, dnsnames))))
|
||||
elif len(dnsnames) == 1:
|
||||
raise SSLCertificateError("hostname %r "
|
||||
"doesn't match %r"
|
||||
% (hostname, dnsnames[0]))
|
||||
else:
|
||||
raise SSLCertificateError("no appropriate commonName or "
|
||||
"subjectAltName fields were found")
|
||||
|
|
|
|||
|
|
@ -56,6 +56,18 @@ We support `datetimes <datetime.datetime>`, `timedeltas
|
|||
the top-level functions in this module (`define`, `parse_command_line`, etc)
|
||||
simply call methods on it. You may create additional `OptionParser`
|
||||
instances to define isolated sets of options, such as for subcommands.
|
||||
|
||||
.. note::
|
||||
|
||||
By default, several options are defined that will configure the
|
||||
standard `logging` module when `parse_command_line` or `parse_config_file`
|
||||
are called. If you want Tornado to leave the logging configuration
|
||||
alone so you can manage it yourself, either pass ``--logging=none``
|
||||
on the command line or do the following to disable it in code::
|
||||
|
||||
from tornado.options import options, parse_command_line
|
||||
options.logging = None
|
||||
parse_command_line()
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
|
@ -360,6 +372,8 @@ class _Mockable(object):
|
|||
|
||||
|
||||
class _Option(object):
|
||||
UNSET = object()
|
||||
|
||||
def __init__(self, name, default=None, type=basestring_type, help=None,
|
||||
metavar=None, multiple=False, file_name=None, group_name=None,
|
||||
callback=None):
|
||||
|
|
@ -374,10 +388,10 @@ class _Option(object):
|
|||
self.group_name = group_name
|
||||
self.callback = callback
|
||||
self.default = default
|
||||
self._value = None
|
||||
self._value = _Option.UNSET
|
||||
|
||||
def value(self):
|
||||
return self.default if self._value is None else self._value
|
||||
return self.default if self._value is _Option.UNSET else self._value
|
||||
|
||||
def parse(self, value):
|
||||
_parse = {
|
||||
|
|
|
|||
142
Shared/lib/python2.7/site-packages/tornado/platform/asyncio.py
Normal file
142
Shared/lib/python2.7/site-packages/tornado/platform/asyncio.py
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
"""Bridges between the `asyncio` module and Tornado IOLoop.
|
||||
|
||||
This is a work in progress and interfaces are subject to change.
|
||||
|
||||
To test:
|
||||
python3.4 -m tornado.test.runtests --ioloop=tornado.platform.asyncio.AsyncIOLoop
|
||||
python3.4 -m tornado.test.runtests --ioloop=tornado.platform.asyncio.AsyncIOMainLoop
|
||||
(the tests log a few warnings with AsyncIOMainLoop because they leave some
|
||||
unfinished callbacks on the event loop that fail when it resumes)
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
import datetime
|
||||
import functools
|
||||
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado import stack_context
|
||||
from tornado.util import timedelta_to_seconds
|
||||
|
||||
try:
|
||||
# Import the real asyncio module for py33+ first. Older versions of the
|
||||
# trollius backport also use this name.
|
||||
import asyncio
|
||||
except ImportError as e:
|
||||
# Asyncio itself isn't available; see if trollius is (backport to py26+).
|
||||
try:
|
||||
import trollius as asyncio
|
||||
except ImportError:
|
||||
# Re-raise the original asyncio error, not the trollius one.
|
||||
raise e
|
||||
|
||||
class BaseAsyncIOLoop(IOLoop):
|
||||
def initialize(self, asyncio_loop, close_loop=False):
|
||||
self.asyncio_loop = asyncio_loop
|
||||
self.close_loop = close_loop
|
||||
self.asyncio_loop.call_soon(self.make_current)
|
||||
# Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
|
||||
self.handlers = {}
|
||||
# Set of fds listening for reads/writes
|
||||
self.readers = set()
|
||||
self.writers = set()
|
||||
self.closing = False
|
||||
|
||||
def close(self, all_fds=False):
|
||||
self.closing = True
|
||||
for fd in list(self.handlers):
|
||||
fileobj, handler_func = self.handlers[fd]
|
||||
self.remove_handler(fd)
|
||||
if all_fds:
|
||||
self.close_fd(fileobj)
|
||||
if self.close_loop:
|
||||
self.asyncio_loop.close()
|
||||
|
||||
def add_handler(self, fd, handler, events):
|
||||
fd, fileobj = self.split_fd(fd)
|
||||
if fd in self.handlers:
|
||||
raise ValueError("fd %s added twice" % fd)
|
||||
self.handlers[fd] = (fileobj, stack_context.wrap(handler))
|
||||
if events & IOLoop.READ:
|
||||
self.asyncio_loop.add_reader(
|
||||
fd, self._handle_events, fd, IOLoop.READ)
|
||||
self.readers.add(fd)
|
||||
if events & IOLoop.WRITE:
|
||||
self.asyncio_loop.add_writer(
|
||||
fd, self._handle_events, fd, IOLoop.WRITE)
|
||||
self.writers.add(fd)
|
||||
|
||||
def update_handler(self, fd, events):
|
||||
fd, fileobj = self.split_fd(fd)
|
||||
if events & IOLoop.READ:
|
||||
if fd not in self.readers:
|
||||
self.asyncio_loop.add_reader(
|
||||
fd, self._handle_events, fd, IOLoop.READ)
|
||||
self.readers.add(fd)
|
||||
else:
|
||||
if fd in self.readers:
|
||||
self.asyncio_loop.remove_reader(fd)
|
||||
self.readers.remove(fd)
|
||||
if events & IOLoop.WRITE:
|
||||
if fd not in self.writers:
|
||||
self.asyncio_loop.add_writer(
|
||||
fd, self._handle_events, fd, IOLoop.WRITE)
|
||||
self.writers.add(fd)
|
||||
else:
|
||||
if fd in self.writers:
|
||||
self.asyncio_loop.remove_writer(fd)
|
||||
self.writers.remove(fd)
|
||||
|
||||
def remove_handler(self, fd):
|
||||
fd, fileobj = self.split_fd(fd)
|
||||
if fd not in self.handlers:
|
||||
return
|
||||
if fd in self.readers:
|
||||
self.asyncio_loop.remove_reader(fd)
|
||||
self.readers.remove(fd)
|
||||
if fd in self.writers:
|
||||
self.asyncio_loop.remove_writer(fd)
|
||||
self.writers.remove(fd)
|
||||
del self.handlers[fd]
|
||||
|
||||
def _handle_events(self, fd, events):
|
||||
fileobj, handler_func = self.handlers[fd]
|
||||
handler_func(fileobj, events)
|
||||
|
||||
def start(self):
|
||||
self._setup_logging()
|
||||
self.asyncio_loop.run_forever()
|
||||
|
||||
def stop(self):
|
||||
self.asyncio_loop.stop()
|
||||
|
||||
def call_at(self, when, callback, *args, **kwargs):
|
||||
# asyncio.call_at supports *args but not **kwargs, so bind them here.
|
||||
# We do not synchronize self.time and asyncio_loop.time, so
|
||||
# convert from absolute to relative.
|
||||
return self.asyncio_loop.call_later(
|
||||
max(0, when - self.time()), self._run_callback,
|
||||
functools.partial(stack_context.wrap(callback), *args, **kwargs))
|
||||
|
||||
def remove_timeout(self, timeout):
|
||||
timeout.cancel()
|
||||
|
||||
def add_callback(self, callback, *args, **kwargs):
|
||||
if self.closing:
|
||||
raise RuntimeError("IOLoop is closing")
|
||||
self.asyncio_loop.call_soon_threadsafe(
|
||||
self._run_callback,
|
||||
functools.partial(stack_context.wrap(callback), *args, **kwargs))
|
||||
|
||||
add_callback_from_signal = add_callback
|
||||
|
||||
|
||||
class AsyncIOMainLoop(BaseAsyncIOLoop):
|
||||
def initialize(self):
|
||||
super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(),
|
||||
close_loop=False)
|
||||
|
||||
|
||||
class AsyncIOLoop(BaseAsyncIOLoop):
|
||||
def initialize(self):
|
||||
super(AsyncIOLoop, self).initialize(asyncio.new_event_loop(),
|
||||
close_loop=True)
|
||||
|
|
@ -30,6 +30,10 @@ import os
|
|||
if os.name == 'nt':
|
||||
from tornado.platform.common import Waker
|
||||
from tornado.platform.windows import set_close_exec
|
||||
elif 'APPENGINE_RUNTIME' in os.environ:
|
||||
from tornado.platform.common import Waker
|
||||
def set_close_exec(fd):
|
||||
pass
|
||||
else:
|
||||
from tornado.platform.posix import set_close_exec, Waker
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
import pycares
|
||||
import socket
|
||||
|
||||
|
|
|
|||
|
|
@ -15,7 +15,8 @@ class Waker(interface.Waker):
|
|||
and Jython.
|
||||
"""
|
||||
def __init__(self):
|
||||
# Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py
|
||||
# Based on Zope select_trigger.py:
|
||||
# https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py
|
||||
|
||||
self.writer = socket.socket()
|
||||
# Disable buffering -- pulling the trigger sends 1 byte,
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ class _KQueue(object):
|
|||
|
||||
def register(self, fd, events):
|
||||
if fd in self._active:
|
||||
raise IOError("fd %d already registered" % fd)
|
||||
raise IOError("fd %s already registered" % fd)
|
||||
self._control(fd, events, select.KQ_EV_ADD)
|
||||
self._active[fd] = events
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ class _Select(object):
|
|||
|
||||
def register(self, fd, events):
|
||||
if fd in self.read_fds or fd in self.write_fds or fd in self.error_fds:
|
||||
raise IOError("fd %d already registered" % fd)
|
||||
raise IOError("fd %s already registered" % fd)
|
||||
if events & IOLoop.READ:
|
||||
self.read_fds.add(fd)
|
||||
if events & IOLoop.WRITE:
|
||||
|
|
|
|||
|
|
@ -68,6 +68,7 @@ from __future__ import absolute_import, division, print_function, with_statement
|
|||
|
||||
import datetime
|
||||
import functools
|
||||
import numbers
|
||||
import socket
|
||||
|
||||
import twisted.internet.abstract
|
||||
|
|
@ -90,6 +91,7 @@ from tornado.log import app_log
|
|||
from tornado.netutil import Resolver
|
||||
from tornado.stack_context import NullContext, wrap
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.util import timedelta_to_seconds
|
||||
|
||||
|
||||
@implementer(IDelayedCall)
|
||||
|
|
@ -365,8 +367,9 @@ def install(io_loop=None):
|
|||
|
||||
@implementer(IReadDescriptor, IWriteDescriptor)
|
||||
class _FD(object):
|
||||
def __init__(self, fd, handler):
|
||||
def __init__(self, fd, fileobj, handler):
|
||||
self.fd = fd
|
||||
self.fileobj = fileobj
|
||||
self.handler = handler
|
||||
self.reading = False
|
||||
self.writing = False
|
||||
|
|
@ -377,15 +380,15 @@ class _FD(object):
|
|||
|
||||
def doRead(self):
|
||||
if not self.lost:
|
||||
self.handler(self.fd, tornado.ioloop.IOLoop.READ)
|
||||
self.handler(self.fileobj, tornado.ioloop.IOLoop.READ)
|
||||
|
||||
def doWrite(self):
|
||||
if not self.lost:
|
||||
self.handler(self.fd, tornado.ioloop.IOLoop.WRITE)
|
||||
self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE)
|
||||
|
||||
def connectionLost(self, reason):
|
||||
if not self.lost:
|
||||
self.handler(self.fd, tornado.ioloop.IOLoop.ERROR)
|
||||
self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR)
|
||||
self.lost = True
|
||||
|
||||
def logPrefix(self):
|
||||
|
|
@ -412,14 +415,19 @@ class TwistedIOLoop(tornado.ioloop.IOLoop):
|
|||
self.reactor.callWhenRunning(self.make_current)
|
||||
|
||||
def close(self, all_fds=False):
|
||||
fds = self.fds
|
||||
self.reactor.removeAll()
|
||||
for c in self.reactor.getDelayedCalls():
|
||||
c.cancel()
|
||||
if all_fds:
|
||||
for fd in fds.values():
|
||||
self.close_fd(fd.fileobj)
|
||||
|
||||
def add_handler(self, fd, handler, events):
|
||||
if fd in self.fds:
|
||||
raise ValueError('fd %d added twice' % fd)
|
||||
self.fds[fd] = _FD(fd, wrap(handler))
|
||||
raise ValueError('fd %s added twice' % fd)
|
||||
fd, fileobj = self.split_fd(fd)
|
||||
self.fds[fd] = _FD(fd, fileobj, wrap(handler))
|
||||
if events & tornado.ioloop.IOLoop.READ:
|
||||
self.fds[fd].reading = True
|
||||
self.reactor.addReader(self.fds[fd])
|
||||
|
|
@ -428,6 +436,7 @@ class TwistedIOLoop(tornado.ioloop.IOLoop):
|
|||
self.reactor.addWriter(self.fds[fd])
|
||||
|
||||
def update_handler(self, fd, events):
|
||||
fd, fileobj = self.split_fd(fd)
|
||||
if events & tornado.ioloop.IOLoop.READ:
|
||||
if not self.fds[fd].reading:
|
||||
self.fds[fd].reading = True
|
||||
|
|
@ -446,6 +455,7 @@ class TwistedIOLoop(tornado.ioloop.IOLoop):
|
|||
self.reactor.removeWriter(self.fds[fd])
|
||||
|
||||
def remove_handler(self, fd):
|
||||
fd, fileobj = self.split_fd(fd)
|
||||
if fd not in self.fds:
|
||||
return
|
||||
self.fds[fd].lost = True
|
||||
|
|
@ -456,33 +466,34 @@ class TwistedIOLoop(tornado.ioloop.IOLoop):
|
|||
del self.fds[fd]
|
||||
|
||||
def start(self):
|
||||
self._setup_logging()
|
||||
self.reactor.run()
|
||||
|
||||
def stop(self):
|
||||
self.reactor.crash()
|
||||
|
||||
def _run_callback(self, callback, *args, **kwargs):
|
||||
try:
|
||||
callback(*args, **kwargs)
|
||||
except Exception:
|
||||
self.handle_callback_exception(callback)
|
||||
|
||||
def add_timeout(self, deadline, callback):
|
||||
if isinstance(deadline, (int, long, float)):
|
||||
def add_timeout(self, deadline, callback, *args, **kwargs):
|
||||
# This method could be simplified (since tornado 4.0) by
|
||||
# overriding call_at instead of add_timeout, but we leave it
|
||||
# for now as a test of backwards-compatibility.
|
||||
if isinstance(deadline, numbers.Real):
|
||||
delay = max(deadline - self.time(), 0)
|
||||
elif isinstance(deadline, datetime.timedelta):
|
||||
delay = tornado.ioloop._Timeout.timedelta_to_seconds(deadline)
|
||||
delay = timedelta_to_seconds(deadline)
|
||||
else:
|
||||
raise TypeError("Unsupported deadline %r")
|
||||
return self.reactor.callLater(delay, self._run_callback, wrap(callback))
|
||||
return self.reactor.callLater(
|
||||
delay, self._run_callback,
|
||||
functools.partial(wrap(callback), *args, **kwargs))
|
||||
|
||||
def remove_timeout(self, timeout):
|
||||
if timeout.active():
|
||||
timeout.cancel()
|
||||
|
||||
def add_callback(self, callback, *args, **kwargs):
|
||||
self.reactor.callFromThread(self._run_callback,
|
||||
wrap(callback), *args, **kwargs)
|
||||
self.reactor.callFromThread(
|
||||
self._run_callback,
|
||||
functools.partial(wrap(callback), *args, **kwargs))
|
||||
|
||||
def add_callback_from_signal(self, callback, *args, **kwargs):
|
||||
self.add_callback(callback, *args, **kwargs)
|
||||
|
|
@ -527,8 +538,10 @@ class TwistedResolver(Resolver):
|
|||
resolved_family = socket.AF_INET6
|
||||
else:
|
||||
deferred = self.resolver.getHostByName(utf8(host))
|
||||
resolved = yield gen.Task(deferred.addCallback)
|
||||
if twisted.internet.abstract.isIPAddress(resolved):
|
||||
resolved = yield gen.Task(deferred.addBoth)
|
||||
if isinstance(resolved, failure.Failure):
|
||||
resolved.raiseException()
|
||||
elif twisted.internet.abstract.isIPAddress(resolved):
|
||||
resolved_family = socket.AF_INET
|
||||
elif twisted.internet.abstract.isIPv6Address(resolved):
|
||||
resolved_family = socket.AF_INET6
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ the server into multiple processes and managing subprocesses.
|
|||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
import errno
|
||||
import multiprocessing
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
|
|
@ -35,6 +34,13 @@ from tornado.iostream import PipeIOStream
|
|||
from tornado.log import gen_log
|
||||
from tornado.platform.auto import set_close_exec
|
||||
from tornado import stack_context
|
||||
from tornado.util import errno_from_exception
|
||||
|
||||
try:
|
||||
import multiprocessing
|
||||
except ImportError:
|
||||
# Multiprocessing is not availble on Google App Engine.
|
||||
multiprocessing = None
|
||||
|
||||
try:
|
||||
long # py2
|
||||
|
|
@ -44,6 +50,8 @@ except NameError:
|
|||
|
||||
def cpu_count():
|
||||
"""Returns the number of processors on this machine."""
|
||||
if multiprocessing is None:
|
||||
return 1
|
||||
try:
|
||||
return multiprocessing.cpu_count()
|
||||
except NotImplementedError:
|
||||
|
|
@ -92,7 +100,8 @@ def fork_processes(num_processes, max_restarts=100):
|
|||
between any server code.
|
||||
|
||||
Note that multiple processes are not compatible with the autoreload
|
||||
module (or the debug=True option to `tornado.web.Application`).
|
||||
module (or the ``autoreload=True`` option to `tornado.web.Application`
|
||||
which defaults to True when ``debug=True``).
|
||||
When using multiple processes, no IOLoops can be created or
|
||||
referenced until after the call to ``fork_processes``.
|
||||
|
||||
|
|
@ -135,7 +144,7 @@ def fork_processes(num_processes, max_restarts=100):
|
|||
try:
|
||||
pid, status = os.wait()
|
||||
except OSError as e:
|
||||
if e.errno == errno.EINTR:
|
||||
if errno_from_exception(e) == errno.EINTR:
|
||||
continue
|
||||
raise
|
||||
if pid not in children:
|
||||
|
|
@ -190,23 +199,34 @@ class Subprocess(object):
|
|||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.io_loop = kwargs.pop('io_loop', None) or ioloop.IOLoop.current()
|
||||
# All FDs we create should be closed on error; those in to_close
|
||||
# should be closed in the parent process on success.
|
||||
pipe_fds = []
|
||||
to_close = []
|
||||
if kwargs.get('stdin') is Subprocess.STREAM:
|
||||
in_r, in_w = _pipe_cloexec()
|
||||
kwargs['stdin'] = in_r
|
||||
pipe_fds.extend((in_r, in_w))
|
||||
to_close.append(in_r)
|
||||
self.stdin = PipeIOStream(in_w, io_loop=self.io_loop)
|
||||
if kwargs.get('stdout') is Subprocess.STREAM:
|
||||
out_r, out_w = _pipe_cloexec()
|
||||
kwargs['stdout'] = out_w
|
||||
pipe_fds.extend((out_r, out_w))
|
||||
to_close.append(out_w)
|
||||
self.stdout = PipeIOStream(out_r, io_loop=self.io_loop)
|
||||
if kwargs.get('stderr') is Subprocess.STREAM:
|
||||
err_r, err_w = _pipe_cloexec()
|
||||
kwargs['stderr'] = err_w
|
||||
pipe_fds.extend((err_r, err_w))
|
||||
to_close.append(err_w)
|
||||
self.stderr = PipeIOStream(err_r, io_loop=self.io_loop)
|
||||
self.proc = subprocess.Popen(*args, **kwargs)
|
||||
try:
|
||||
self.proc = subprocess.Popen(*args, **kwargs)
|
||||
except:
|
||||
for fd in pipe_fds:
|
||||
os.close(fd)
|
||||
raise
|
||||
for fd in to_close:
|
||||
os.close(fd)
|
||||
for attr in ['stdin', 'stdout', 'stderr', 'pid']:
|
||||
|
|
@ -271,7 +291,7 @@ class Subprocess(object):
|
|||
try:
|
||||
ret_pid, status = os.waitpid(pid, os.WNOHANG)
|
||||
except OSError as e:
|
||||
if e.args[0] == errno.ECHILD:
|
||||
if errno_from_exception(e) == errno.ECHILD:
|
||||
return
|
||||
if ret_pid == 0:
|
||||
return
|
||||
|
|
|
|||
|
|
@ -1,23 +1,23 @@
|
|||
#!/usr/bin/env python
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
from tornado.escape import utf8, _unicode, native_str
|
||||
from tornado.concurrent import is_future
|
||||
from tornado.escape import utf8, _unicode
|
||||
from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy
|
||||
from tornado.httputil import HTTPHeaders
|
||||
from tornado.iostream import IOStream, SSLIOStream
|
||||
from tornado import httputil
|
||||
from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters
|
||||
from tornado.iostream import StreamClosedError
|
||||
from tornado.netutil import Resolver, OverrideResolver
|
||||
from tornado.log import gen_log
|
||||
from tornado import stack_context
|
||||
from tornado.util import GzipDecompressor
|
||||
from tornado.tcpclient import TCPClient
|
||||
|
||||
import base64
|
||||
import collections
|
||||
import copy
|
||||
import functools
|
||||
import os.path
|
||||
import re
|
||||
import socket
|
||||
import ssl
|
||||
import sys
|
||||
|
||||
try:
|
||||
|
|
@ -30,7 +30,23 @@ try:
|
|||
except ImportError:
|
||||
import urllib.parse as urlparse # py3
|
||||
|
||||
_DEFAULT_CA_CERTS = os.path.dirname(__file__) + '/ca-certificates.crt'
|
||||
try:
|
||||
import ssl
|
||||
except ImportError:
|
||||
# ssl is not available on Google App Engine.
|
||||
ssl = None
|
||||
|
||||
try:
|
||||
import certifi
|
||||
except ImportError:
|
||||
certifi = None
|
||||
|
||||
|
||||
def _default_ca_certs():
|
||||
if certifi is None:
|
||||
raise Exception("The 'certifi' package is required to use https "
|
||||
"in simple_httpclient")
|
||||
return certifi.where()
|
||||
|
||||
|
||||
class SimpleAsyncHTTPClient(AsyncHTTPClient):
|
||||
|
|
@ -47,7 +63,7 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
|
|||
"""
|
||||
def initialize(self, io_loop, max_clients=10,
|
||||
hostname_mapping=None, max_buffer_size=104857600,
|
||||
resolver=None, defaults=None):
|
||||
resolver=None, defaults=None, max_header_size=None):
|
||||
"""Creates a AsyncHTTPClient.
|
||||
|
||||
Only a single AsyncHTTPClient instance exists per IOLoop
|
||||
|
|
@ -72,7 +88,11 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
|
|||
self.max_clients = max_clients
|
||||
self.queue = collections.deque()
|
||||
self.active = {}
|
||||
self.waiting = {}
|
||||
self.max_buffer_size = max_buffer_size
|
||||
self.max_header_size = max_header_size
|
||||
# TCPClient could create a Resolver for us, but we have to do it
|
||||
# ourselves to support hostname_mapping.
|
||||
if resolver:
|
||||
self.resolver = resolver
|
||||
self.own_resolver = False
|
||||
|
|
@ -82,14 +102,25 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
|
|||
if hostname_mapping is not None:
|
||||
self.resolver = OverrideResolver(resolver=self.resolver,
|
||||
mapping=hostname_mapping)
|
||||
self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop)
|
||||
|
||||
def close(self):
|
||||
super(SimpleAsyncHTTPClient, self).close()
|
||||
if self.own_resolver:
|
||||
self.resolver.close()
|
||||
self.tcp_client.close()
|
||||
|
||||
def fetch_impl(self, request, callback):
|
||||
self.queue.append((request, callback))
|
||||
key = object()
|
||||
self.queue.append((key, request, callback))
|
||||
if not len(self.active) < self.max_clients:
|
||||
timeout_handle = self.io_loop.add_timeout(
|
||||
self.io_loop.time() + min(request.connect_timeout,
|
||||
request.request_timeout),
|
||||
functools.partial(self._on_timeout, key))
|
||||
else:
|
||||
timeout_handle = None
|
||||
self.waiting[key] = (request, callback, timeout_handle)
|
||||
self._process_queue()
|
||||
if self.queue:
|
||||
gen_log.debug("max_clients limit reached, request queued. "
|
||||
|
|
@ -99,26 +130,46 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient):
|
|||
def _process_queue(self):
|
||||
with stack_context.NullContext():
|
||||
while self.queue and len(self.active) < self.max_clients:
|
||||
request, callback = self.queue.popleft()
|
||||
key = object()
|
||||
key, request, callback = self.queue.popleft()
|
||||
if key not in self.waiting:
|
||||
continue
|
||||
self._remove_timeout(key)
|
||||
self.active[key] = (request, callback)
|
||||
release_callback = functools.partial(self._release_fetch, key)
|
||||
self._handle_request(request, release_callback, callback)
|
||||
|
||||
def _handle_request(self, request, release_callback, final_callback):
|
||||
_HTTPConnection(self.io_loop, self, request, release_callback,
|
||||
final_callback, self.max_buffer_size, self.resolver)
|
||||
final_callback, self.max_buffer_size, self.tcp_client,
|
||||
self.max_header_size)
|
||||
|
||||
def _release_fetch(self, key):
|
||||
del self.active[key]
|
||||
self._process_queue()
|
||||
|
||||
def _remove_timeout(self, key):
|
||||
if key in self.waiting:
|
||||
request, callback, timeout_handle = self.waiting[key]
|
||||
if timeout_handle is not None:
|
||||
self.io_loop.remove_timeout(timeout_handle)
|
||||
del self.waiting[key]
|
||||
|
||||
class _HTTPConnection(object):
|
||||
def _on_timeout(self, key):
|
||||
request, callback, timeout_handle = self.waiting[key]
|
||||
self.queue.remove((key, request, callback))
|
||||
timeout_response = HTTPResponse(
|
||||
request, 599, error=HTTPError(599, "Timeout"),
|
||||
request_time=self.io_loop.time() - request.start_time)
|
||||
self.io_loop.add_callback(callback, timeout_response)
|
||||
del self.waiting[key]
|
||||
|
||||
|
||||
class _HTTPConnection(httputil.HTTPMessageDelegate):
|
||||
_SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"])
|
||||
|
||||
def __init__(self, io_loop, client, request, release_callback,
|
||||
final_callback, max_buffer_size, resolver):
|
||||
final_callback, max_buffer_size, tcp_client,
|
||||
max_header_size):
|
||||
self.start_time = io_loop.time()
|
||||
self.io_loop = io_loop
|
||||
self.client = client
|
||||
|
|
@ -126,13 +177,15 @@ class _HTTPConnection(object):
|
|||
self.release_callback = release_callback
|
||||
self.final_callback = final_callback
|
||||
self.max_buffer_size = max_buffer_size
|
||||
self.resolver = resolver
|
||||
self.tcp_client = tcp_client
|
||||
self.max_header_size = max_header_size
|
||||
self.code = None
|
||||
self.headers = None
|
||||
self.chunks = None
|
||||
self.chunks = []
|
||||
self._decompressor = None
|
||||
# Timeout handle returned by IOLoop.add_timeout
|
||||
self._timeout = None
|
||||
self._sockaddr = None
|
||||
with stack_context.ExceptionStackContext(self._handle_exception):
|
||||
self.parsed = urlparse.urlsplit(_unicode(self.request.url))
|
||||
if self.parsed.scheme not in ("http", "https"):
|
||||
|
|
@ -155,39 +208,31 @@ class _HTTPConnection(object):
|
|||
host = host[1:-1]
|
||||
self.parsed_hostname = host # save final host for _on_connect
|
||||
|
||||
if request.allow_ipv6:
|
||||
af = socket.AF_UNSPEC
|
||||
else:
|
||||
# We only try the first IP we get from getaddrinfo,
|
||||
# so restrict to ipv4 by default.
|
||||
if request.allow_ipv6 is False:
|
||||
af = socket.AF_INET
|
||||
else:
|
||||
af = socket.AF_UNSPEC
|
||||
|
||||
self.resolver.resolve(host, port, af, callback=self._on_resolve)
|
||||
ssl_options = self._get_ssl_options(self.parsed.scheme)
|
||||
|
||||
def _on_resolve(self, addrinfo):
|
||||
self.stream = self._create_stream(addrinfo)
|
||||
timeout = min(self.request.connect_timeout, self.request.request_timeout)
|
||||
if timeout:
|
||||
self._timeout = self.io_loop.add_timeout(
|
||||
self.start_time + timeout,
|
||||
stack_context.wrap(self._on_timeout))
|
||||
self.stream.set_close_callback(self._on_close)
|
||||
# ipv6 addresses are broken (in self.parsed.hostname) until
|
||||
# 2.7, here is correctly parsed value calculated in __init__
|
||||
sockaddr = addrinfo[0][1]
|
||||
self.stream.connect(sockaddr, self._on_connect,
|
||||
server_hostname=self.parsed_hostname)
|
||||
timeout = min(self.request.connect_timeout, self.request.request_timeout)
|
||||
if timeout:
|
||||
self._timeout = self.io_loop.add_timeout(
|
||||
self.start_time + timeout,
|
||||
stack_context.wrap(self._on_timeout))
|
||||
self.tcp_client.connect(host, port, af=af,
|
||||
ssl_options=ssl_options,
|
||||
callback=self._on_connect)
|
||||
|
||||
def _create_stream(self, addrinfo):
|
||||
af = addrinfo[0][0]
|
||||
if self.parsed.scheme == "https":
|
||||
def _get_ssl_options(self, scheme):
|
||||
if scheme == "https":
|
||||
ssl_options = {}
|
||||
if self.request.validate_cert:
|
||||
ssl_options["cert_reqs"] = ssl.CERT_REQUIRED
|
||||
if self.request.ca_certs is not None:
|
||||
ssl_options["ca_certs"] = self.request.ca_certs
|
||||
else:
|
||||
ssl_options["ca_certs"] = _DEFAULT_CA_CERTS
|
||||
ssl_options["ca_certs"] = _default_ca_certs()
|
||||
if self.request.client_key is not None:
|
||||
ssl_options["keyfile"] = self.request.client_key
|
||||
if self.request.client_cert is not None:
|
||||
|
|
@ -199,27 +244,22 @@ class _HTTPConnection(object):
|
|||
# the SSL_OP_NO_SSLv2, but that wasn't exposed to python
|
||||
# until 3.2. Python 2.7 adds the ciphers argument, which
|
||||
# can also be used to disable SSLv2. As a last resort
|
||||
# on python 2.6, we set ssl_version to SSLv3. This is
|
||||
# on python 2.6, we set ssl_version to TLSv1. This is
|
||||
# more narrow than we'd like since it also breaks
|
||||
# compatibility with servers configured for TLSv1 only,
|
||||
# but nearly all servers support SSLv3:
|
||||
# compatibility with servers configured for SSLv3 only,
|
||||
# but nearly all servers support both SSLv3 and TLSv1:
|
||||
# http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html
|
||||
if sys.version_info >= (2, 7):
|
||||
ssl_options["ciphers"] = "DEFAULT:!SSLv2"
|
||||
# In addition to disabling SSLv2, we also exclude certain
|
||||
# classes of insecure ciphers.
|
||||
ssl_options["ciphers"] = "DEFAULT:!SSLv2:!EXPORT:!DES"
|
||||
else:
|
||||
# This is really only necessary for pre-1.0 versions
|
||||
# of openssl, but python 2.6 doesn't expose version
|
||||
# information.
|
||||
ssl_options["ssl_version"] = ssl.PROTOCOL_SSLv3
|
||||
|
||||
return SSLIOStream(socket.socket(af),
|
||||
io_loop=self.io_loop,
|
||||
ssl_options=ssl_options,
|
||||
max_buffer_size=self.max_buffer_size)
|
||||
else:
|
||||
return IOStream(socket.socket(af),
|
||||
io_loop=self.io_loop,
|
||||
max_buffer_size=self.max_buffer_size)
|
||||
ssl_options["ssl_version"] = ssl.PROTOCOL_TLSv1
|
||||
return ssl_options
|
||||
return None
|
||||
|
||||
def _on_timeout(self):
|
||||
self._timeout = None
|
||||
|
|
@ -231,8 +271,16 @@ class _HTTPConnection(object):
|
|||
self.io_loop.remove_timeout(self._timeout)
|
||||
self._timeout = None
|
||||
|
||||
def _on_connect(self):
|
||||
def _on_connect(self, stream):
|
||||
if self.final_callback is None:
|
||||
# final_callback is cleared if we've hit our timeout.
|
||||
stream.close()
|
||||
return
|
||||
self.stream = stream
|
||||
self.stream.set_close_callback(self._on_close)
|
||||
self._remove_timeout()
|
||||
if self.final_callback is None:
|
||||
return
|
||||
if self.request.request_timeout:
|
||||
self._timeout = self.io_loop.add_timeout(
|
||||
self.start_time + self.request.request_timeout,
|
||||
|
|
@ -269,32 +317,71 @@ class _HTTPConnection(object):
|
|||
self.request.headers["User-Agent"] = self.request.user_agent
|
||||
if not self.request.allow_nonstandard_methods:
|
||||
if self.request.method in ("POST", "PATCH", "PUT"):
|
||||
assert self.request.body is not None
|
||||
if (self.request.body is None and
|
||||
self.request.body_producer is None):
|
||||
raise AssertionError(
|
||||
'Body must not be empty for "%s" request'
|
||||
% self.request.method)
|
||||
else:
|
||||
assert self.request.body is None
|
||||
if (self.request.body is not None or
|
||||
self.request.body_producer is not None):
|
||||
raise AssertionError(
|
||||
'Body must be empty for "%s" request'
|
||||
% self.request.method)
|
||||
if self.request.expect_100_continue:
|
||||
self.request.headers["Expect"] = "100-continue"
|
||||
if self.request.body is not None:
|
||||
# When body_producer is used the caller is responsible for
|
||||
# setting Content-Length (or else chunked encoding will be used).
|
||||
self.request.headers["Content-Length"] = str(len(
|
||||
self.request.body))
|
||||
if (self.request.method == "POST" and
|
||||
"Content-Type" not in self.request.headers):
|
||||
self.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
|
||||
if self.request.use_gzip:
|
||||
if self.request.decompress_response:
|
||||
self.request.headers["Accept-Encoding"] = "gzip"
|
||||
req_path = ((self.parsed.path or '/') +
|
||||
(('?' + self.parsed.query) if self.parsed.query else ''))
|
||||
request_lines = [utf8("%s %s HTTP/1.1" % (self.request.method,
|
||||
req_path))]
|
||||
for k, v in self.request.headers.get_all():
|
||||
line = utf8(k) + b": " + utf8(v)
|
||||
if b'\n' in line:
|
||||
raise ValueError('Newline in header: ' + repr(line))
|
||||
request_lines.append(line)
|
||||
request_str = b"\r\n".join(request_lines) + b"\r\n\r\n"
|
||||
if self.request.body is not None:
|
||||
request_str += self.request.body
|
||||
(('?' + self.parsed.query) if self.parsed.query else ''))
|
||||
self.stream.set_nodelay(True)
|
||||
self.stream.write(request_str)
|
||||
self.stream.read_until_regex(b"\r?\n\r?\n", self._on_headers)
|
||||
self.connection = HTTP1Connection(
|
||||
self.stream, True,
|
||||
HTTP1ConnectionParameters(
|
||||
no_keep_alive=True,
|
||||
max_header_size=self.max_header_size,
|
||||
decompress=self.request.decompress_response),
|
||||
self._sockaddr)
|
||||
start_line = httputil.RequestStartLine(self.request.method,
|
||||
req_path, 'HTTP/1.1')
|
||||
self.connection.write_headers(start_line, self.request.headers)
|
||||
if self.request.expect_100_continue:
|
||||
self._read_response()
|
||||
else:
|
||||
self._write_body(True)
|
||||
|
||||
def _write_body(self, start_read):
|
||||
if self.request.body is not None:
|
||||
self.connection.write(self.request.body)
|
||||
self.connection.finish()
|
||||
elif self.request.body_producer is not None:
|
||||
fut = self.request.body_producer(self.connection.write)
|
||||
if is_future(fut):
|
||||
def on_body_written(fut):
|
||||
fut.result()
|
||||
self.connection.finish()
|
||||
if start_read:
|
||||
self._read_response()
|
||||
self.io_loop.add_future(fut, on_body_written)
|
||||
return
|
||||
self.connection.finish()
|
||||
if start_read:
|
||||
self._read_response()
|
||||
|
||||
def _read_response(self):
|
||||
# Ensure that any exception raised in read_response ends up in our
|
||||
# stack context.
|
||||
self.io_loop.add_future(
|
||||
self.connection.read_response(self),
|
||||
lambda f: f.result())
|
||||
|
||||
def _release(self):
|
||||
if self.release_callback is not None:
|
||||
|
|
@ -312,43 +399,39 @@ class _HTTPConnection(object):
|
|||
def _handle_exception(self, typ, value, tb):
|
||||
if self.final_callback:
|
||||
self._remove_timeout()
|
||||
if isinstance(value, StreamClosedError):
|
||||
value = HTTPError(599, "Stream closed")
|
||||
self._run_callback(HTTPResponse(self.request, 599, error=value,
|
||||
request_time=self.io_loop.time() - self.start_time,
|
||||
))
|
||||
|
||||
if hasattr(self, "stream"):
|
||||
# TODO: this may cause a StreamClosedError to be raised
|
||||
# by the connection's Future. Should we cancel the
|
||||
# connection more gracefully?
|
||||
self.stream.close()
|
||||
return True
|
||||
else:
|
||||
# If our callback has already been called, we are probably
|
||||
# catching an exception that is not caused by us but rather
|
||||
# some child of our callback. Rather than drop it on the floor,
|
||||
# pass it along.
|
||||
return False
|
||||
# pass it along, unless it's just the stream being closed.
|
||||
return isinstance(value, StreamClosedError)
|
||||
|
||||
def _on_close(self):
|
||||
if self.final_callback is not None:
|
||||
message = "Connection closed"
|
||||
if self.stream.error:
|
||||
message = str(self.stream.error)
|
||||
raise self.stream.error
|
||||
raise HTTPError(599, message)
|
||||
|
||||
def _handle_1xx(self, code):
|
||||
self.stream.read_until_regex(b"\r?\n\r?\n", self._on_headers)
|
||||
|
||||
def _on_headers(self, data):
|
||||
data = native_str(data.decode("latin1"))
|
||||
first_line, _, header_data = data.partition("\n")
|
||||
match = re.match("HTTP/1.[01] ([0-9]+) ([^\r]*)", first_line)
|
||||
assert match
|
||||
code = int(match.group(1))
|
||||
self.headers = HTTPHeaders.parse(header_data)
|
||||
if 100 <= code < 200:
|
||||
self._handle_1xx(code)
|
||||
def headers_received(self, first_line, headers):
|
||||
if self.request.expect_100_continue and first_line.code == 100:
|
||||
self._write_body(False)
|
||||
return
|
||||
else:
|
||||
self.code = code
|
||||
self.reason = match.group(2)
|
||||
self.headers = headers
|
||||
self.code = first_line.code
|
||||
self.reason = first_line.reason
|
||||
|
||||
if "Content-Length" in self.headers:
|
||||
if "," in self.headers["Content-Length"]:
|
||||
|
|
@ -365,17 +448,12 @@ class _HTTPConnection(object):
|
|||
content_length = None
|
||||
|
||||
if self.request.header_callback is not None:
|
||||
# re-attach the newline we split on earlier
|
||||
self.request.header_callback(first_line + _)
|
||||
# Reassemble the start line.
|
||||
self.request.header_callback('%s %s %s\r\n' % first_line)
|
||||
for k, v in self.headers.get_all():
|
||||
self.request.header_callback("%s: %s\r\n" % (k, v))
|
||||
self.request.header_callback('\r\n')
|
||||
|
||||
if self.request.method == "HEAD" or self.code == 304:
|
||||
# HEAD requests and 304 responses never have content, even
|
||||
# though they may have content-length headers
|
||||
self._on_body(b"")
|
||||
return
|
||||
if 100 <= self.code < 200 or self.code == 204:
|
||||
# These response codes never have bodies
|
||||
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
|
||||
|
|
@ -383,21 +461,9 @@ class _HTTPConnection(object):
|
|||
content_length not in (None, 0)):
|
||||
raise ValueError("Response with code %d should not have body" %
|
||||
self.code)
|
||||
self._on_body(b"")
|
||||
return
|
||||
|
||||
if (self.request.use_gzip and
|
||||
self.headers.get("Content-Encoding") == "gzip"):
|
||||
self._decompressor = GzipDecompressor()
|
||||
if self.headers.get("Transfer-Encoding") == "chunked":
|
||||
self.chunks = []
|
||||
self.stream.read_until(b"\r\n", self._on_chunk_length)
|
||||
elif content_length is not None:
|
||||
self.stream.read_bytes(content_length, self._on_body)
|
||||
else:
|
||||
self.stream.read_until_close(self._on_body)
|
||||
|
||||
def _on_body(self, data):
|
||||
def finish(self):
|
||||
data = b''.join(self.chunks)
|
||||
self._remove_timeout()
|
||||
original_request = getattr(self.request, "original_request",
|
||||
self.request)
|
||||
|
|
@ -433,19 +499,12 @@ class _HTTPConnection(object):
|
|||
self.client.fetch(new_request, final_callback)
|
||||
self._on_end_request()
|
||||
return
|
||||
if self._decompressor:
|
||||
data = (self._decompressor.decompress(data) +
|
||||
self._decompressor.flush())
|
||||
if self.request.streaming_callback:
|
||||
if self.chunks is None:
|
||||
# if chunks is not None, we already called streaming_callback
|
||||
# in _on_chunk_data
|
||||
self.request.streaming_callback(data)
|
||||
buffer = BytesIO()
|
||||
else:
|
||||
buffer = BytesIO(data) # TODO: don't require one big string?
|
||||
response = HTTPResponse(original_request,
|
||||
self.code, reason=self.reason,
|
||||
self.code, reason=getattr(self, 'reason', None),
|
||||
headers=self.headers,
|
||||
request_time=self.io_loop.time() - self.start_time,
|
||||
buffer=buffer,
|
||||
|
|
@ -456,40 +515,11 @@ class _HTTPConnection(object):
|
|||
def _on_end_request(self):
|
||||
self.stream.close()
|
||||
|
||||
def _on_chunk_length(self, data):
|
||||
# TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
|
||||
length = int(data.strip(), 16)
|
||||
if length == 0:
|
||||
if self._decompressor is not None:
|
||||
tail = self._decompressor.flush()
|
||||
if tail:
|
||||
# I believe the tail will always be empty (i.e.
|
||||
# decompress will return all it can). The purpose
|
||||
# of the flush call is to detect errors such
|
||||
# as truncated input. But in case it ever returns
|
||||
# anything, treat it as an extra chunk
|
||||
if self.request.streaming_callback is not None:
|
||||
self.request.streaming_callback(tail)
|
||||
else:
|
||||
self.chunks.append(tail)
|
||||
# all the data has been decompressed, so we don't need to
|
||||
# decompress again in _on_body
|
||||
self._decompressor = None
|
||||
self._on_body(b''.join(self.chunks))
|
||||
else:
|
||||
self.stream.read_bytes(length + 2, # chunk ends with \r\n
|
||||
self._on_chunk_data)
|
||||
|
||||
def _on_chunk_data(self, data):
|
||||
assert data[-2:] == b"\r\n"
|
||||
chunk = data[:-2]
|
||||
if self._decompressor:
|
||||
chunk = self._decompressor.decompress(chunk)
|
||||
def data_received(self, chunk):
|
||||
if self.request.streaming_callback is not None:
|
||||
self.request.streaming_callback(chunk)
|
||||
else:
|
||||
self.chunks.append(chunk)
|
||||
self.stream.read_until(b"\r\n", self._on_chunk_length)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
|||
|
|
@ -266,6 +266,18 @@ def wrap(fn):
|
|||
# TODO: Any other better way to store contexts and update them in wrapped function?
|
||||
cap_contexts = [_state.contexts]
|
||||
|
||||
if not cap_contexts[0][0] and not cap_contexts[0][1]:
|
||||
# Fast path when there are no active contexts.
|
||||
def null_wrapper(*args, **kwargs):
|
||||
try:
|
||||
current_state = _state.contexts
|
||||
_state.contexts = cap_contexts[0]
|
||||
return fn(*args, **kwargs)
|
||||
finally:
|
||||
_state.contexts = current_state
|
||||
null_wrapper._wrapped = True
|
||||
return null_wrapper
|
||||
|
||||
def wrapped(*args, **kwargs):
|
||||
ret = None
|
||||
try:
|
||||
|
|
|
|||
179
Shared/lib/python2.7/site-packages/tornado/tcpclient.py
Normal file
179
Shared/lib/python2.7/site-packages/tornado/tcpclient.py
Normal file
|
|
@ -0,0 +1,179 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2014 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""A non-blocking TCP connection factory.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
import functools
|
||||
import socket
|
||||
|
||||
from tornado.concurrent import Future
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.iostream import IOStream
|
||||
from tornado import gen
|
||||
from tornado.netutil import Resolver
|
||||
|
||||
_INITIAL_CONNECT_TIMEOUT = 0.3
|
||||
|
||||
|
||||
class _Connector(object):
|
||||
"""A stateless implementation of the "Happy Eyeballs" algorithm.
|
||||
|
||||
"Happy Eyeballs" is documented in RFC6555 as the recommended practice
|
||||
for when both IPv4 and IPv6 addresses are available.
|
||||
|
||||
In this implementation, we partition the addresses by family, and
|
||||
make the first connection attempt to whichever address was
|
||||
returned first by ``getaddrinfo``. If that connection fails or
|
||||
times out, we begin a connection in parallel to the first address
|
||||
of the other family. If there are additional failures we retry
|
||||
with other addresses, keeping one connection attempt per family
|
||||
in flight at a time.
|
||||
|
||||
http://tools.ietf.org/html/rfc6555
|
||||
|
||||
"""
|
||||
def __init__(self, addrinfo, io_loop, connect):
|
||||
self.io_loop = io_loop
|
||||
self.connect = connect
|
||||
|
||||
self.future = Future()
|
||||
self.timeout = None
|
||||
self.last_error = None
|
||||
self.remaining = len(addrinfo)
|
||||
self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
|
||||
|
||||
@staticmethod
|
||||
def split(addrinfo):
|
||||
"""Partition the ``addrinfo`` list by address family.
|
||||
|
||||
Returns two lists. The first list contains the first entry from
|
||||
``addrinfo`` and all others with the same family, and the
|
||||
second list contains all other addresses (normally one list will
|
||||
be AF_INET and the other AF_INET6, although non-standard resolvers
|
||||
may return additional families).
|
||||
"""
|
||||
primary = []
|
||||
secondary = []
|
||||
primary_af = addrinfo[0][0]
|
||||
for af, addr in addrinfo:
|
||||
if af == primary_af:
|
||||
primary.append((af, addr))
|
||||
else:
|
||||
secondary.append((af, addr))
|
||||
return primary, secondary
|
||||
|
||||
def start(self, timeout=_INITIAL_CONNECT_TIMEOUT):
|
||||
self.try_connect(iter(self.primary_addrs))
|
||||
self.set_timout(timeout)
|
||||
return self.future
|
||||
|
||||
def try_connect(self, addrs):
|
||||
try:
|
||||
af, addr = next(addrs)
|
||||
except StopIteration:
|
||||
# We've reached the end of our queue, but the other queue
|
||||
# might still be working. Send a final error on the future
|
||||
# only when both queues are finished.
|
||||
if self.remaining == 0 and not self.future.done():
|
||||
self.future.set_exception(self.last_error or
|
||||
IOError("connection failed"))
|
||||
return
|
||||
future = self.connect(af, addr)
|
||||
future.add_done_callback(functools.partial(self.on_connect_done,
|
||||
addrs, af, addr))
|
||||
|
||||
def on_connect_done(self, addrs, af, addr, future):
|
||||
self.remaining -= 1
|
||||
try:
|
||||
stream = future.result()
|
||||
except Exception as e:
|
||||
if self.future.done():
|
||||
return
|
||||
# Error: try again (but remember what happened so we have an
|
||||
# error to raise in the end)
|
||||
self.last_error = e
|
||||
self.try_connect(addrs)
|
||||
if self.timeout is not None:
|
||||
# If the first attempt failed, don't wait for the
|
||||
# timeout to try an address from the secondary queue.
|
||||
self.on_timeout()
|
||||
return
|
||||
self.clear_timeout()
|
||||
if self.future.done():
|
||||
# This is a late arrival; just drop it.
|
||||
stream.close()
|
||||
else:
|
||||
self.future.set_result((af, addr, stream))
|
||||
|
||||
def set_timout(self, timeout):
|
||||
self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
|
||||
self.on_timeout)
|
||||
|
||||
def on_timeout(self):
|
||||
self.timeout = None
|
||||
self.try_connect(iter(self.secondary_addrs))
|
||||
|
||||
def clear_timeout(self):
|
||||
if self.timeout is not None:
|
||||
self.io_loop.remove_timeout(self.timeout)
|
||||
|
||||
|
||||
class TCPClient(object):
|
||||
"""A non-blocking TCP connection factory.
|
||||
"""
|
||||
def __init__(self, resolver=None, io_loop=None):
|
||||
self.io_loop = io_loop or IOLoop.current()
|
||||
if resolver is not None:
|
||||
self.resolver = resolver
|
||||
self._own_resolver = False
|
||||
else:
|
||||
self.resolver = Resolver(io_loop=io_loop)
|
||||
self._own_resolver = True
|
||||
|
||||
def close(self):
|
||||
if self._own_resolver:
|
||||
self.resolver.close()
|
||||
|
||||
@gen.coroutine
|
||||
def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
|
||||
max_buffer_size=None):
|
||||
"""Connect to the given host and port.
|
||||
|
||||
Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
|
||||
``ssl_options`` is not None).
|
||||
"""
|
||||
addrinfo = yield self.resolver.resolve(host, port, af)
|
||||
connector = _Connector(
|
||||
addrinfo, self.io_loop,
|
||||
functools.partial(self._create_stream, max_buffer_size))
|
||||
af, addr, stream = yield connector.start()
|
||||
# TODO: For better performance we could cache the (af, addr)
|
||||
# information here and re-use it on sbusequent connections to
|
||||
# the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
|
||||
if ssl_options is not None:
|
||||
stream = yield stream.start_tls(False, ssl_options=ssl_options,
|
||||
server_hostname=host)
|
||||
raise gen.Return(stream)
|
||||
|
||||
def _create_stream(self, max_buffer_size, af, addr):
|
||||
# Always connect in plaintext; we'll convert to ssl if necessary
|
||||
# after one connection has completed.
|
||||
stream = IOStream(socket.socket(af),
|
||||
io_loop=self.io_loop,
|
||||
max_buffer_size=max_buffer_size)
|
||||
return stream.connect(addr)
|
||||
|
|
@ -20,13 +20,19 @@ from __future__ import absolute_import, division, print_function, with_statement
|
|||
import errno
|
||||
import os
|
||||
import socket
|
||||
import ssl
|
||||
|
||||
from tornado.log import app_log
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.iostream import IOStream, SSLIOStream
|
||||
from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket
|
||||
from tornado import process
|
||||
from tornado.util import errno_from_exception
|
||||
|
||||
try:
|
||||
import ssl
|
||||
except ImportError:
|
||||
# ssl is not available on Google App Engine.
|
||||
ssl = None
|
||||
|
||||
|
||||
class TCPServer(object):
|
||||
|
|
@ -81,13 +87,15 @@ class TCPServer(object):
|
|||
.. versionadded:: 3.1
|
||||
The ``max_buffer_size`` argument.
|
||||
"""
|
||||
def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None):
|
||||
def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None,
|
||||
read_chunk_size=None):
|
||||
self.io_loop = io_loop
|
||||
self.ssl_options = ssl_options
|
||||
self._sockets = {} # fd -> socket object
|
||||
self._pending_sockets = []
|
||||
self._started = False
|
||||
self.max_buffer_size = max_buffer_size
|
||||
self.read_chunk_size = None
|
||||
|
||||
# Verify the SSL options. Otherwise we don't get errors until clients
|
||||
# connect. This doesn't verify that the keys are legitimate, but
|
||||
|
|
@ -180,7 +188,8 @@ class TCPServer(object):
|
|||
between any server code.
|
||||
|
||||
Note that multiple processes are not compatible with the autoreload
|
||||
module (or the ``debug=True`` option to `tornado.web.Application`).
|
||||
module (or the ``autoreload=True`` option to `tornado.web.Application`
|
||||
which defaults to True when ``debug=True``).
|
||||
When using multiple processes, no IOLoops can be created or
|
||||
referenced until after the call to ``TCPServer.start(n)``.
|
||||
"""
|
||||
|
|
@ -229,16 +238,20 @@ class TCPServer(object):
|
|||
# catch another error later on (AttributeError in
|
||||
# SSLIOStream._do_ssl_handshake).
|
||||
# To test this behavior, try nmap with the -sT flag.
|
||||
# https://github.com/facebook/tornado/pull/750
|
||||
if err.args[0] in (errno.ECONNABORTED, errno.EINVAL):
|
||||
# https://github.com/tornadoweb/tornado/pull/750
|
||||
if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL):
|
||||
return connection.close()
|
||||
else:
|
||||
raise
|
||||
try:
|
||||
if self.ssl_options is not None:
|
||||
stream = SSLIOStream(connection, io_loop=self.io_loop, max_buffer_size=self.max_buffer_size)
|
||||
stream = SSLIOStream(connection, io_loop=self.io_loop,
|
||||
max_buffer_size=self.max_buffer_size,
|
||||
read_chunk_size=self.read_chunk_size)
|
||||
else:
|
||||
stream = IOStream(connection, io_loop=self.io_loop, max_buffer_size=self.max_buffer_size)
|
||||
stream = IOStream(connection, io_loop=self.io_loop,
|
||||
max_buffer_size=self.max_buffer_size,
|
||||
read_chunk_size=self.read_chunk_size)
|
||||
self.handle_stream(stream, address)
|
||||
except Exception:
|
||||
app_log.error("Error in connection callback", exc_info=True)
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ Basic usage looks like::
|
|||
t = template.Template("<html>{{ myvalue }}</html>")
|
||||
print t.generate(myvalue="XXX")
|
||||
|
||||
Loader is a class that loads templates from a root directory and caches
|
||||
`Loader` is a class that loads templates from a root directory and caches
|
||||
the compiled templates::
|
||||
|
||||
loader = template.Loader("/home/btaylor")
|
||||
|
|
@ -56,16 +56,17 @@ interesting. Syntax for the templates::
|
|||
{% end %}
|
||||
|
||||
Unlike most other template systems, we do not put any restrictions on the
|
||||
expressions you can include in your statements. if and for blocks get
|
||||
translated exactly into Python, you can do complex expressions like::
|
||||
expressions you can include in your statements. ``if`` and ``for`` blocks get
|
||||
translated exactly into Python, so you can do complex expressions like::
|
||||
|
||||
{% for student in [p for p in people if p.student and p.age > 23] %}
|
||||
<li>{{ escape(student.name) }}</li>
|
||||
{% end %}
|
||||
|
||||
Translating directly to Python means you can apply functions to expressions
|
||||
easily, like the escape() function in the examples above. You can pass
|
||||
functions in to your template just like any other variable::
|
||||
easily, like the ``escape()`` function in the examples above. You can pass
|
||||
functions in to your template just like any other variable
|
||||
(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`)::
|
||||
|
||||
### Python code
|
||||
def add(x, y):
|
||||
|
|
@ -75,8 +76,8 @@ functions in to your template just like any other variable::
|
|||
### The template
|
||||
{{ add(1, 2) }}
|
||||
|
||||
We provide the functions escape(), url_escape(), json_encode(), and squeeze()
|
||||
to all templates by default.
|
||||
We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`,
|
||||
`.json_encode()`, and `.squeeze()` to all templates by default.
|
||||
|
||||
Typical applications do not create `Template` or `Loader` instances by
|
||||
hand, but instead use the `~.RequestHandler.render` and
|
||||
|
|
@ -169,13 +170,17 @@ with ``{# ... #}``.
|
|||
|
||||
{% module Template("foo.html", arg=42) %}
|
||||
|
||||
``UIModules`` are a feature of the `tornado.web.RequestHandler`
|
||||
class (and specifically its ``render`` method) and will not work
|
||||
when the template system is used on its own in other contexts.
|
||||
|
||||
``{% raw *expr* %}``
|
||||
Outputs the result of the given expression without autoescaping.
|
||||
|
||||
``{% set *x* = *y* %}``
|
||||
Sets a local variable.
|
||||
|
||||
``{% try %}...{% except %}...{% finally %}...{% else %}...{% end %}``
|
||||
``{% try %}...{% except %}...{% else %}...{% finally %}...{% end %}``
|
||||
Same as the python ``try`` statement.
|
||||
|
||||
``{% while *condition* %}... {% end %}``
|
||||
|
|
@ -362,10 +367,9 @@ class Loader(BaseLoader):
|
|||
|
||||
def _create_template(self, name):
|
||||
path = os.path.join(self.root, name)
|
||||
f = open(path, "rb")
|
||||
template = Template(f.read(), name=name, loader=self)
|
||||
f.close()
|
||||
return template
|
||||
with open(path, "rb") as f:
|
||||
template = Template(f.read(), name=name, loader=self)
|
||||
return template
|
||||
|
||||
|
||||
class DictLoader(BaseLoader):
|
||||
|
|
@ -780,7 +784,7 @@ def _parse(reader, template, in_block=None, in_loop=None):
|
|||
if allowed_parents is not None:
|
||||
if not in_block:
|
||||
raise ParseError("%s outside %s block" %
|
||||
(operator, allowed_parents))
|
||||
(operator, allowed_parents))
|
||||
if in_block not in allowed_parents:
|
||||
raise ParseError("%s block cannot be attached to %s block" % (operator, in_block))
|
||||
body.chunks.append(_IntermediateControlBlock(contents, line))
|
||||
|
|
|
|||
14
Shared/lib/python2.7/site-packages/tornado/test/__main__.py
Normal file
14
Shared/lib/python2.7/site-packages/tornado/test/__main__.py
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
"""Shim to allow python -m tornado.test.
|
||||
|
||||
This only works in python 2.7+.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
from tornado.test.runtests import all, main
|
||||
|
||||
# tornado.testing.main autodiscovery relies on 'all' being present in
|
||||
# the main module, so import it here even though it is not used directly.
|
||||
# The following line prevents a pyflakes warning.
|
||||
all = all
|
||||
|
||||
main()
|
||||
|
|
@ -67,11 +67,29 @@ class OAuth1ClientLoginHandler(RequestHandler, OAuthMixin):
|
|||
self.finish(user)
|
||||
|
||||
def _oauth_get_user(self, access_token, callback):
|
||||
if self.get_argument('fail_in_get_user', None):
|
||||
raise Exception("failing in get_user")
|
||||
if access_token != dict(key='uiop', secret='5678'):
|
||||
raise Exception("incorrect access token %r" % access_token)
|
||||
callback(dict(email='foo@example.com'))
|
||||
|
||||
|
||||
class OAuth1ClientLoginCoroutineHandler(OAuth1ClientLoginHandler):
|
||||
"""Replaces OAuth1ClientLoginCoroutineHandler's get() with a coroutine."""
|
||||
@gen.coroutine
|
||||
def get(self):
|
||||
if self.get_argument('oauth_token', None):
|
||||
# Ensure that any exceptions are set on the returned Future,
|
||||
# not simply thrown into the surrounding StackContext.
|
||||
try:
|
||||
yield self.get_authenticated_user()
|
||||
except Exception as e:
|
||||
self.set_status(503)
|
||||
self.write("got exception: %s" % e)
|
||||
else:
|
||||
yield self.authorize_redirect()
|
||||
|
||||
|
||||
class OAuth1ClientRequestParametersHandler(RequestHandler, OAuthMixin):
|
||||
def initialize(self, version):
|
||||
self._OAUTH_VERSION = version
|
||||
|
|
@ -255,6 +273,9 @@ class AuthTest(AsyncHTTPTestCase):
|
|||
dict(version='1.0')),
|
||||
('/oauth10a/client/login', OAuth1ClientLoginHandler,
|
||||
dict(test=self, version='1.0a')),
|
||||
('/oauth10a/client/login_coroutine',
|
||||
OAuth1ClientLoginCoroutineHandler,
|
||||
dict(test=self, version='1.0a')),
|
||||
('/oauth10a/client/request_params',
|
||||
OAuth1ClientRequestParametersHandler,
|
||||
dict(version='1.0a')),
|
||||
|
|
@ -348,6 +369,12 @@ class AuthTest(AsyncHTTPTestCase):
|
|||
self.assertTrue('oauth_nonce' in parsed)
|
||||
self.assertTrue('oauth_signature' in parsed)
|
||||
|
||||
def test_oauth10a_get_user_coroutine_exception(self):
|
||||
response = self.fetch(
|
||||
'/oauth10a/client/login_coroutine?oauth_token=zxcv&fail_in_get_user=true',
|
||||
headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
|
||||
self.assertEqual(response.code, 503)
|
||||
|
||||
def test_oauth2_redirect(self):
|
||||
response = self.fetch('/oauth2/client/login', follow_redirects=False)
|
||||
self.assertEqual(response.code, 302)
|
||||
|
|
|
|||
|
|
@ -30,6 +30,12 @@ from tornado.tcpserver import TCPServer
|
|||
from tornado.testing import AsyncTestCase, LogTrapTestCase, bind_unused_port, gen_test
|
||||
|
||||
|
||||
try:
|
||||
from concurrent import futures
|
||||
except ImportError:
|
||||
futures = None
|
||||
|
||||
|
||||
class ReturnFutureTest(AsyncTestCase):
|
||||
@return_future
|
||||
def sync_future(self, callback):
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ from __future__ import absolute_import, division, print_function, with_statement
|
|||
|
||||
from hashlib import md5
|
||||
|
||||
from tornado.escape import utf8
|
||||
from tornado.httpclient import HTTPRequest
|
||||
from tornado.stack_context import ExceptionStackContext
|
||||
from tornado.testing import AsyncHTTPTestCase
|
||||
|
|
@ -21,7 +22,8 @@ if pycurl is not None:
|
|||
@unittest.skipIf(pycurl is None, "pycurl module not present")
|
||||
class CurlHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
|
||||
def get_http_client(self):
|
||||
client = CurlAsyncHTTPClient(io_loop=self.io_loop)
|
||||
client = CurlAsyncHTTPClient(io_loop=self.io_loop,
|
||||
defaults=dict(allow_ipv6=False))
|
||||
# make sure AsyncHTTPClient magic doesn't give us the wrong class
|
||||
self.assertTrue(isinstance(client, CurlAsyncHTTPClient))
|
||||
return client
|
||||
|
|
@ -51,10 +53,10 @@ class DigestAuthHandler(RequestHandler):
|
|||
assert param_dict['nonce'] == nonce
|
||||
assert param_dict['username'] == username
|
||||
assert param_dict['uri'] == self.request.path
|
||||
h1 = md5('%s:%s:%s' % (username, realm, password)).hexdigest()
|
||||
h2 = md5('%s:%s' % (self.request.method,
|
||||
self.request.path)).hexdigest()
|
||||
digest = md5('%s:%s:%s' % (h1, nonce, h2)).hexdigest()
|
||||
h1 = md5(utf8('%s:%s:%s' % (username, realm, password))).hexdigest()
|
||||
h2 = md5(utf8('%s:%s' % (self.request.method,
|
||||
self.request.path))).hexdigest()
|
||||
digest = md5(utf8('%s:%s:%s' % (h1, nonce, h2))).hexdigest()
|
||||
if digest == param_dict['response']:
|
||||
self.write('ok')
|
||||
else:
|
||||
|
|
@ -66,15 +68,28 @@ class DigestAuthHandler(RequestHandler):
|
|||
(realm, nonce, opaque))
|
||||
|
||||
|
||||
class CustomReasonHandler(RequestHandler):
|
||||
def get(self):
|
||||
self.set_status(200, "Custom reason")
|
||||
|
||||
|
||||
class CustomFailReasonHandler(RequestHandler):
|
||||
def get(self):
|
||||
self.set_status(400, "Custom reason")
|
||||
|
||||
|
||||
@unittest.skipIf(pycurl is None, "pycurl module not present")
|
||||
class CurlHTTPClientTestCase(AsyncHTTPTestCase):
|
||||
def setUp(self):
|
||||
super(CurlHTTPClientTestCase, self).setUp()
|
||||
self.http_client = CurlAsyncHTTPClient(self.io_loop)
|
||||
self.http_client = CurlAsyncHTTPClient(self.io_loop,
|
||||
defaults=dict(allow_ipv6=False))
|
||||
|
||||
def get_app(self):
|
||||
return Application([
|
||||
('/digest', DigestAuthHandler),
|
||||
('/custom_reason', CustomReasonHandler),
|
||||
('/custom_fail_reason', CustomFailReasonHandler),
|
||||
])
|
||||
|
||||
def test_prepare_curl_callback_stack_context(self):
|
||||
|
|
@ -97,3 +112,11 @@ class CurlHTTPClientTestCase(AsyncHTTPTestCase):
|
|||
response = self.fetch('/digest', auth_mode='digest',
|
||||
auth_username='foo', auth_password='bar')
|
||||
self.assertEqual(response.body, b'ok')
|
||||
|
||||
def test_custom_reason(self):
|
||||
response = self.fetch('/custom_reason')
|
||||
self.assertEqual(response.reason, "Custom reason")
|
||||
|
||||
def test_fail_custom_reason(self):
|
||||
response = self.fetch('/custom_fail_reason')
|
||||
self.assertEqual(str(response.error), "HTTP 400: Custom reason")
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ class EscapeTestCase(unittest.TestCase):
|
|||
(u("<foo>"), u("<foo>")),
|
||||
(b"<foo>", b"<foo>"),
|
||||
|
||||
("<>&\"", "<>&""),
|
||||
("<>&\"'", "<>&"'"),
|
||||
("&", "&amp;"),
|
||||
|
||||
(u("<\u00e9>"), u("<\u00e9>")),
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
import contextlib
|
||||
import datetime
|
||||
import functools
|
||||
import sys
|
||||
import textwrap
|
||||
|
|
@ -8,7 +9,7 @@ import time
|
|||
import platform
|
||||
import weakref
|
||||
|
||||
from tornado.concurrent import return_future
|
||||
from tornado.concurrent import return_future, Future
|
||||
from tornado.escape import url_escape
|
||||
from tornado.httpclient import AsyncHTTPClient
|
||||
from tornado.ioloop import IOLoop
|
||||
|
|
@ -20,6 +21,10 @@ from tornado.web import Application, RequestHandler, asynchronous, HTTPError
|
|||
|
||||
from tornado import gen
|
||||
|
||||
try:
|
||||
from concurrent import futures
|
||||
except ImportError:
|
||||
futures = None
|
||||
|
||||
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 not available')
|
||||
skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
|
||||
|
|
@ -281,18 +286,67 @@ class GenEngineTest(AsyncTestCase):
|
|||
self.stop()
|
||||
self.run_gen(f)
|
||||
|
||||
def test_multi_delayed(self):
|
||||
def test_multi_dict(self):
|
||||
@gen.engine
|
||||
def f():
|
||||
(yield gen.Callback("k1"))("v1")
|
||||
(yield gen.Callback("k2"))("v2")
|
||||
results = yield dict(foo=gen.Wait("k1"), bar=gen.Wait("k2"))
|
||||
self.assertEqual(results, dict(foo="v1", bar="v2"))
|
||||
self.stop()
|
||||
self.run_gen(f)
|
||||
|
||||
# The following tests explicitly run with both gen.Multi
|
||||
# and gen.multi_future (Task returns a Future, so it can be used
|
||||
# with either).
|
||||
def test_multi_yieldpoint_delayed(self):
|
||||
@gen.engine
|
||||
def f():
|
||||
# callbacks run at different times
|
||||
responses = yield [
|
||||
responses = yield gen.Multi([
|
||||
gen.Task(self.delay_callback, 3, arg="v1"),
|
||||
gen.Task(self.delay_callback, 1, arg="v2"),
|
||||
]
|
||||
])
|
||||
self.assertEqual(responses, ["v1", "v2"])
|
||||
self.stop()
|
||||
self.run_gen(f)
|
||||
|
||||
def test_multi_yieldpoint_dict_delayed(self):
|
||||
@gen.engine
|
||||
def f():
|
||||
# callbacks run at different times
|
||||
responses = yield gen.Multi(dict(
|
||||
foo=gen.Task(self.delay_callback, 3, arg="v1"),
|
||||
bar=gen.Task(self.delay_callback, 1, arg="v2"),
|
||||
))
|
||||
self.assertEqual(responses, dict(foo="v1", bar="v2"))
|
||||
self.stop()
|
||||
self.run_gen(f)
|
||||
|
||||
def test_multi_future_delayed(self):
|
||||
@gen.engine
|
||||
def f():
|
||||
# callbacks run at different times
|
||||
responses = yield gen.multi_future([
|
||||
gen.Task(self.delay_callback, 3, arg="v1"),
|
||||
gen.Task(self.delay_callback, 1, arg="v2"),
|
||||
])
|
||||
self.assertEqual(responses, ["v1", "v2"])
|
||||
self.stop()
|
||||
self.run_gen(f)
|
||||
|
||||
def test_multi_future_dict_delayed(self):
|
||||
@gen.engine
|
||||
def f():
|
||||
# callbacks run at different times
|
||||
responses = yield gen.multi_future(dict(
|
||||
foo=gen.Task(self.delay_callback, 3, arg="v1"),
|
||||
bar=gen.Task(self.delay_callback, 1, arg="v2"),
|
||||
))
|
||||
self.assertEqual(responses, dict(foo="v1", bar="v2"))
|
||||
self.stop()
|
||||
self.run_gen(f)
|
||||
|
||||
@skipOnTravis
|
||||
@gen_test
|
||||
def test_multi_performance(self):
|
||||
|
|
@ -304,6 +358,23 @@ class GenEngineTest(AsyncTestCase):
|
|||
end = time.time()
|
||||
self.assertLess(end - start, 1.0)
|
||||
|
||||
@gen_test
|
||||
def test_multi_empty(self):
|
||||
# Empty lists or dicts should return the same type.
|
||||
x = yield []
|
||||
self.assertTrue(isinstance(x, list))
|
||||
y = yield {}
|
||||
self.assertTrue(isinstance(y, dict))
|
||||
|
||||
@gen_test
|
||||
def test_multi_mixed_types(self):
|
||||
# A YieldPoint (Wait) and Future (Task) can be combined
|
||||
# (and use the YieldPoint codepath)
|
||||
(yield gen.Callback("k1"))("v1")
|
||||
responses = yield [gen.Wait("k1"),
|
||||
gen.Task(self.delay_callback, 3, arg="v2")]
|
||||
self.assertEqual(responses, ["v1", "v2"])
|
||||
|
||||
@gen_test
|
||||
def test_future(self):
|
||||
result = yield self.async_future(1)
|
||||
|
|
@ -314,6 +385,11 @@ class GenEngineTest(AsyncTestCase):
|
|||
results = yield [self.async_future(1), self.async_future(2)]
|
||||
self.assertEqual(results, [1, 2])
|
||||
|
||||
@gen_test
|
||||
def test_multi_dict_future(self):
|
||||
results = yield dict(foo=self.async_future(1), bar=self.async_future(2))
|
||||
self.assertEqual(results, dict(foo=1, bar=2))
|
||||
|
||||
def test_arguments(self):
|
||||
@gen.engine
|
||||
def f():
|
||||
|
|
@ -698,8 +774,14 @@ class GenCoroutineTest(AsyncTestCase):
|
|||
def test_replace_context_exception(self):
|
||||
# Test exception handling: exceptions thrown into the stack context
|
||||
# can be caught and replaced.
|
||||
# Note that this test and the following are for behavior that is
|
||||
# not really supported any more: coroutines no longer create a
|
||||
# stack context automatically; but one is created after the first
|
||||
# YieldPoint (i.e. not a Future).
|
||||
@gen.coroutine
|
||||
def f2():
|
||||
(yield gen.Callback(1))()
|
||||
yield gen.Wait(1)
|
||||
self.io_loop.add_callback(lambda: 1 / 0)
|
||||
try:
|
||||
yield gen.Task(self.io_loop.add_timeout,
|
||||
|
|
@ -718,6 +800,8 @@ class GenCoroutineTest(AsyncTestCase):
|
|||
# can be caught and ignored.
|
||||
@gen.coroutine
|
||||
def f2():
|
||||
(yield gen.Callback(1))()
|
||||
yield gen.Wait(1)
|
||||
self.io_loop.add_callback(lambda: 1 / 0)
|
||||
try:
|
||||
yield gen.Task(self.io_loop.add_timeout,
|
||||
|
|
@ -729,6 +813,31 @@ class GenCoroutineTest(AsyncTestCase):
|
|||
self.assertEqual(result, 42)
|
||||
self.finished = True
|
||||
|
||||
@gen_test
|
||||
def test_moment(self):
|
||||
calls = []
|
||||
@gen.coroutine
|
||||
def f(name, yieldable):
|
||||
for i in range(5):
|
||||
calls.append(name)
|
||||
yield yieldable
|
||||
# First, confirm the behavior without moment: each coroutine
|
||||
# monopolizes the event loop until it finishes.
|
||||
immediate = Future()
|
||||
immediate.set_result(None)
|
||||
yield [f('a', immediate), f('b', immediate)]
|
||||
self.assertEqual(''.join(calls), 'aaaaabbbbb')
|
||||
|
||||
# With moment, they take turns.
|
||||
calls = []
|
||||
yield [f('a', gen.moment), f('b', gen.moment)]
|
||||
self.assertEqual(''.join(calls), 'ababababab')
|
||||
self.finished = True
|
||||
|
||||
calls = []
|
||||
yield [f('a', gen.moment), f('b', immediate)]
|
||||
self.assertEqual(''.join(calls), 'abbbbbaaaa')
|
||||
|
||||
|
||||
class GenSequenceHandler(RequestHandler):
|
||||
@asynchronous
|
||||
|
|
@ -803,7 +912,6 @@ class GenExceptionHandler(RequestHandler):
|
|||
|
||||
|
||||
class GenCoroutineExceptionHandler(RequestHandler):
|
||||
@asynchronous
|
||||
@gen.coroutine
|
||||
def get(self):
|
||||
# This test depends on the order of the two decorators.
|
||||
|
|
@ -909,5 +1017,55 @@ class GenWebTest(AsyncHTTPTestCase):
|
|||
response = self.fetch('/async_prepare_error')
|
||||
self.assertEqual(response.code, 403)
|
||||
|
||||
|
||||
class WithTimeoutTest(AsyncTestCase):
|
||||
@gen_test
|
||||
def test_timeout(self):
|
||||
with self.assertRaises(gen.TimeoutError):
|
||||
yield gen.with_timeout(datetime.timedelta(seconds=0.1),
|
||||
Future())
|
||||
|
||||
@gen_test
|
||||
def test_completes_before_timeout(self):
|
||||
future = Future()
|
||||
self.io_loop.add_timeout(datetime.timedelta(seconds=0.1),
|
||||
lambda: future.set_result('asdf'))
|
||||
result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
|
||||
future)
|
||||
self.assertEqual(result, 'asdf')
|
||||
|
||||
@gen_test
|
||||
def test_fails_before_timeout(self):
|
||||
future = Future()
|
||||
self.io_loop.add_timeout(
|
||||
datetime.timedelta(seconds=0.1),
|
||||
lambda: future.set_exception(ZeroDivisionError))
|
||||
with self.assertRaises(ZeroDivisionError):
|
||||
yield gen.with_timeout(datetime.timedelta(seconds=3600), future)
|
||||
|
||||
@gen_test
|
||||
def test_already_resolved(self):
|
||||
future = Future()
|
||||
future.set_result('asdf')
|
||||
result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
|
||||
future)
|
||||
self.assertEqual(result, 'asdf')
|
||||
|
||||
@unittest.skipIf(futures is None, 'futures module not present')
|
||||
@gen_test
|
||||
def test_timeout_concurrent_future(self):
|
||||
with futures.ThreadPoolExecutor(1) as executor:
|
||||
with self.assertRaises(gen.TimeoutError):
|
||||
yield gen.with_timeout(self.io_loop.time(),
|
||||
executor.submit(time.sleep, 0.1))
|
||||
|
||||
@unittest.skipIf(futures is None, 'futures module not present')
|
||||
@gen_test
|
||||
def test_completed_concurrent_future(self):
|
||||
with futures.ThreadPoolExecutor(1) as executor:
|
||||
yield gen.with_timeout(datetime.timedelta(seconds=3600),
|
||||
executor.submit(lambda: None))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ from tornado.log import gen_log
|
|||
from tornado import netutil
|
||||
from tornado.stack_context import ExceptionStackContext, NullContext
|
||||
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
|
||||
from tornado.test.util import unittest
|
||||
from tornado.test.util import unittest, skipOnTravis
|
||||
from tornado.util import u, bytes_type
|
||||
from tornado.web import Application, RequestHandler, url
|
||||
|
||||
|
|
@ -110,6 +110,7 @@ class HTTPClientCommonTestCase(AsyncHTTPTestCase):
|
|||
url("/all_methods", AllMethodsHandler),
|
||||
], gzip=True)
|
||||
|
||||
@skipOnTravis
|
||||
def test_hello_world(self):
|
||||
response = self.fetch("/hello")
|
||||
self.assertEqual(response.code, 200)
|
||||
|
|
@ -309,7 +310,7 @@ Transfer-Encoding: chunked
|
|||
self.assertIs(exc_info[0][0], ZeroDivisionError)
|
||||
|
||||
def test_configure_defaults(self):
|
||||
defaults = dict(user_agent='TestDefaultUserAgent')
|
||||
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
|
||||
# Construct a new instance of the configured client class
|
||||
client = self.http_client.__class__(self.io_loop, force_instance=True,
|
||||
defaults=defaults)
|
||||
|
|
@ -355,11 +356,10 @@ Transfer-Encoding: chunked
|
|||
|
||||
@gen_test
|
||||
def test_future_http_error(self):
|
||||
try:
|
||||
with self.assertRaises(HTTPError) as context:
|
||||
yield self.http_client.fetch(self.get_url('/notfound'))
|
||||
except HTTPError as e:
|
||||
self.assertEqual(e.code, 404)
|
||||
self.assertEqual(e.response.code, 404)
|
||||
self.assertEqual(context.exception.code, 404)
|
||||
self.assertEqual(context.exception.response.code, 404)
|
||||
|
||||
@gen_test
|
||||
def test_reuse_request_from_response(self):
|
||||
|
|
@ -387,6 +387,19 @@ Transfer-Encoding: chunked
|
|||
allow_nonstandard_methods=True)
|
||||
self.assertEqual(response.body, b'OTHER')
|
||||
|
||||
@gen_test
|
||||
def test_body(self):
|
||||
hello_url = self.get_url('/hello')
|
||||
with self.assertRaises(AssertionError) as context:
|
||||
yield self.http_client.fetch(hello_url, body='data')
|
||||
|
||||
self.assertTrue('must be empty' in str(context.exception))
|
||||
|
||||
with self.assertRaises(AssertionError) as context:
|
||||
yield self.http_client.fetch(hello_url, method='POST')
|
||||
|
||||
self.assertTrue('must not be empty' in str(context.exception))
|
||||
|
||||
|
||||
class RequestProxyTest(unittest.TestCase):
|
||||
def test_request_set(self):
|
||||
|
|
@ -433,17 +446,22 @@ class HTTPResponseTestCase(unittest.TestCase):
|
|||
|
||||
class SyncHTTPClientTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
if IOLoop.configured_class().__name__ == 'TwistedIOLoop':
|
||||
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
|
||||
'AsyncIOMainLoop'):
|
||||
# TwistedIOLoop only supports the global reactor, so we can't have
|
||||
# separate IOLoops for client and server threads.
|
||||
# AsyncIOMainLoop doesn't work with the default policy
|
||||
# (although it could with some tweaks to this test and a
|
||||
# policy that created loops for non-main threads).
|
||||
raise unittest.SkipTest(
|
||||
'Sync HTTPClient not compatible with TwistedIOLoop')
|
||||
'Sync HTTPClient not compatible with TwistedIOLoop or '
|
||||
'AsyncIOMainLoop')
|
||||
self.server_ioloop = IOLoop()
|
||||
|
||||
sock, self.port = bind_unused_port()
|
||||
app = Application([('/', HelloWorldHandler)])
|
||||
server = HTTPServer(app, io_loop=self.server_ioloop)
|
||||
server.add_socket(sock)
|
||||
self.server = HTTPServer(app, io_loop=self.server_ioloop)
|
||||
self.server.add_socket(sock)
|
||||
|
||||
self.server_thread = threading.Thread(target=self.server_ioloop.start)
|
||||
self.server_thread.start()
|
||||
|
|
@ -451,7 +469,10 @@ class SyncHTTPClientTest(unittest.TestCase):
|
|||
self.http_client = HTTPClient()
|
||||
|
||||
def tearDown(self):
|
||||
self.server_ioloop.add_callback(self.server_ioloop.stop)
|
||||
def stop_server():
|
||||
self.server.stop()
|
||||
self.server_ioloop.stop()
|
||||
self.server_ioloop.add_callback(stop_server)
|
||||
self.server_thread.join()
|
||||
self.http_client.close()
|
||||
self.server_ioloop.close(all_fds=True)
|
||||
|
|
@ -469,3 +490,28 @@ class SyncHTTPClientTest(unittest.TestCase):
|
|||
with self.assertRaises(HTTPError) as assertion:
|
||||
self.http_client.fetch(self.get_url('/notfound'))
|
||||
self.assertEqual(assertion.exception.code, 404)
|
||||
|
||||
|
||||
class HTTPRequestTestCase(unittest.TestCase):
|
||||
def test_headers(self):
|
||||
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
|
||||
self.assertEqual(request.headers, {'foo': 'bar'})
|
||||
|
||||
def test_headers_setter(self):
|
||||
request = HTTPRequest('http://example.com')
|
||||
request.headers = {'bar': 'baz'}
|
||||
self.assertEqual(request.headers, {'bar': 'baz'})
|
||||
|
||||
def test_null_headers_setter(self):
|
||||
request = HTTPRequest('http://example.com')
|
||||
request.headers = None
|
||||
self.assertEqual(request.headers, {})
|
||||
|
||||
def test_body(self):
|
||||
request = HTTPRequest('http://example.com', body='foo')
|
||||
self.assertEqual(request.body, utf8('foo'))
|
||||
|
||||
def test_body_setter(self):
|
||||
request = HTTPRequest('http://example.com')
|
||||
request.body = 'foo'
|
||||
self.assertEqual(request.body, utf8('foo'))
|
||||
|
|
|
|||
|
|
@ -2,20 +2,23 @@
|
|||
|
||||
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
from tornado import httpclient, simple_httpclient, netutil
|
||||
from tornado.escape import json_decode, utf8, _unicode, recursive_unicode, native_str
|
||||
from tornado import netutil
|
||||
from tornado.escape import json_decode, json_encode, utf8, _unicode, recursive_unicode, native_str
|
||||
from tornado import gen
|
||||
from tornado.http1connection import HTTP1Connection
|
||||
from tornado.httpserver import HTTPServer
|
||||
from tornado.httputil import HTTPHeaders
|
||||
from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine
|
||||
from tornado.iostream import IOStream
|
||||
from tornado.log import gen_log
|
||||
from tornado.netutil import ssl_options_to_context, Resolver
|
||||
from tornado.log import gen_log, app_log
|
||||
from tornado.netutil import ssl_options_to_context
|
||||
from tornado.simple_httpclient import SimpleAsyncHTTPClient
|
||||
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog
|
||||
from tornado.test.util import unittest
|
||||
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog, gen_test
|
||||
from tornado.test.util import unittest, skipOnTravis
|
||||
from tornado.util import u, bytes_type
|
||||
from tornado.web import Application, RequestHandler, asynchronous
|
||||
from tornado.web import Application, RequestHandler, asynchronous, stream_request_body
|
||||
from contextlib import closing
|
||||
import datetime
|
||||
import gzip
|
||||
import os
|
||||
import shutil
|
||||
import socket
|
||||
|
|
@ -23,6 +26,28 @@ import ssl
|
|||
import sys
|
||||
import tempfile
|
||||
|
||||
try:
|
||||
from io import BytesIO # python 3
|
||||
except ImportError:
|
||||
from cStringIO import StringIO as BytesIO # python 2
|
||||
|
||||
|
||||
def read_stream_body(stream, callback):
|
||||
"""Reads an HTTP response from `stream` and runs callback with its
|
||||
headers and body."""
|
||||
chunks = []
|
||||
class Delegate(HTTPMessageDelegate):
|
||||
def headers_received(self, start_line, headers):
|
||||
self.headers = headers
|
||||
|
||||
def data_received(self, chunk):
|
||||
chunks.append(chunk)
|
||||
|
||||
def finish(self):
|
||||
callback((self.headers, b''.join(chunks)))
|
||||
conn = HTTP1Connection(stream, True)
|
||||
conn.read_response(Delegate())
|
||||
|
||||
|
||||
class HandlerBaseTestCase(AsyncHTTPTestCase):
|
||||
def get_app(self):
|
||||
|
|
@ -86,11 +111,13 @@ class SSLTestMixin(object):
|
|||
# connection, rather than waiting for a timeout or otherwise
|
||||
# misbehaving.
|
||||
with ExpectLog(gen_log, '(SSL Error|uncaught exception)'):
|
||||
self.http_client.fetch(self.get_url("/").replace('https:', 'http:'),
|
||||
self.stop,
|
||||
request_timeout=3600,
|
||||
connect_timeout=3600)
|
||||
response = self.wait()
|
||||
with ExpectLog(gen_log, 'Uncaught exception', required=False):
|
||||
self.http_client.fetch(
|
||||
self.get_url("/").replace('https:', 'http:'),
|
||||
self.stop,
|
||||
request_timeout=3600,
|
||||
connect_timeout=3600)
|
||||
response = self.wait()
|
||||
self.assertEqual(response.code, 599)
|
||||
|
||||
# Python's SSL implementation differs significantly between versions.
|
||||
|
|
@ -163,18 +190,7 @@ class MultipartTestHandler(RequestHandler):
|
|||
})
|
||||
|
||||
|
||||
class RawRequestHTTPConnection(simple_httpclient._HTTPConnection):
|
||||
def set_request(self, request):
|
||||
self.__next_request = request
|
||||
|
||||
def _on_connect(self):
|
||||
self.stream.write(self.__next_request)
|
||||
self.__next_request = None
|
||||
self.stream.read_until(b"\r\n\r\n", self._on_headers)
|
||||
|
||||
# This test is also called from wsgi_test
|
||||
|
||||
|
||||
class HTTPConnectionTest(AsyncHTTPTestCase):
|
||||
def get_handlers(self):
|
||||
return [("/multipart", MultipartTestHandler),
|
||||
|
|
@ -184,23 +200,16 @@ class HTTPConnectionTest(AsyncHTTPTestCase):
|
|||
return Application(self.get_handlers())
|
||||
|
||||
def raw_fetch(self, headers, body):
|
||||
with closing(Resolver(io_loop=self.io_loop)) as resolver:
|
||||
with closing(SimpleAsyncHTTPClient(self.io_loop,
|
||||
resolver=resolver)) as client:
|
||||
conn = RawRequestHTTPConnection(
|
||||
self.io_loop, client,
|
||||
httpclient._RequestProxy(
|
||||
httpclient.HTTPRequest(self.get_url("/")),
|
||||
dict(httpclient.HTTPRequest._DEFAULTS)),
|
||||
None, self.stop,
|
||||
1024 * 1024, resolver)
|
||||
conn.set_request(
|
||||
b"\r\n".join(headers +
|
||||
[utf8("Content-Length: %d\r\n" % len(body))]) +
|
||||
b"\r\n" + body)
|
||||
response = self.wait()
|
||||
response.rethrow()
|
||||
return response
|
||||
with closing(IOStream(socket.socket())) as stream:
|
||||
stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
|
||||
self.wait()
|
||||
stream.write(
|
||||
b"\r\n".join(headers +
|
||||
[utf8("Content-Length: %d\r\n" % len(body))]) +
|
||||
b"\r\n" + body)
|
||||
read_stream_body(stream, self.stop)
|
||||
headers, body = self.wait()
|
||||
return body
|
||||
|
||||
def test_multipart_form(self):
|
||||
# Encodings here are tricky: Headers are latin1, bodies can be
|
||||
|
|
@ -211,17 +220,17 @@ class HTTPConnectionTest(AsyncHTTPTestCase):
|
|||
b"X-Header-encoding-test: \xe9",
|
||||
],
|
||||
b"\r\n".join([
|
||||
b"Content-Disposition: form-data; name=argument",
|
||||
b"",
|
||||
u("\u00e1").encode("utf-8"),
|
||||
b"--1234567890",
|
||||
u('Content-Disposition: form-data; name="files"; filename="\u00f3"').encode("utf8"),
|
||||
b"",
|
||||
u("\u00fa").encode("utf-8"),
|
||||
b"--1234567890--",
|
||||
b"",
|
||||
b"Content-Disposition: form-data; name=argument",
|
||||
b"",
|
||||
u("\u00e1").encode("utf-8"),
|
||||
b"--1234567890",
|
||||
u('Content-Disposition: form-data; name="files"; filename="\u00f3"').encode("utf8"),
|
||||
b"",
|
||||
u("\u00fa").encode("utf-8"),
|
||||
b"--1234567890--",
|
||||
b"",
|
||||
]))
|
||||
data = json_decode(response.body)
|
||||
data = json_decode(response)
|
||||
self.assertEqual(u("\u00e9"), data["header"])
|
||||
self.assertEqual(u("\u00e1"), data["argument"])
|
||||
self.assertEqual(u("\u00f3"), data["filename"])
|
||||
|
|
@ -344,6 +353,21 @@ class HTTPServerTest(AsyncHTTPTestCase):
|
|||
self.assertEqual(200, response.code)
|
||||
self.assertEqual(json_decode(response.body), {})
|
||||
|
||||
def test_malformed_body(self):
|
||||
# parse_qs is pretty forgiving, but it will fail on python 3
|
||||
# if the data is not utf8. On python 2 parse_qs will work,
|
||||
# but then the recursive_unicode call in EchoHandler will
|
||||
# fail.
|
||||
if str is bytes_type:
|
||||
return
|
||||
with ExpectLog(gen_log, 'Invalid x-www-form-urlencoded body'):
|
||||
response = self.fetch(
|
||||
'/echo', method="POST",
|
||||
headers={'Content-Type': 'application/x-www-form-urlencoded'},
|
||||
body=b'\xe9')
|
||||
self.assertEqual(200, response.code)
|
||||
self.assertEqual(b'{}', response.body)
|
||||
|
||||
|
||||
class HTTPServerRawTest(AsyncHTTPTestCase):
|
||||
def get_app(self):
|
||||
|
|
@ -382,6 +406,25 @@ class HTTPServerRawTest(AsyncHTTPTestCase):
|
|||
self.stop)
|
||||
self.wait()
|
||||
|
||||
def test_chunked_request_body(self):
|
||||
# Chunked requests are not widely supported and we don't have a way
|
||||
# to generate them in AsyncHTTPClient, but HTTPServer will read them.
|
||||
self.stream.write(b"""\
|
||||
POST /echo HTTP/1.1
|
||||
Transfer-Encoding: chunked
|
||||
Content-Type: application/x-www-form-urlencoded
|
||||
|
||||
4
|
||||
foo=
|
||||
3
|
||||
bar
|
||||
0
|
||||
|
||||
""".replace(b"\n", b"\r\n"))
|
||||
read_stream_body(self.stream, self.stop)
|
||||
headers, response = self.wait()
|
||||
self.assertEqual(json_decode(response), {u('foo'): [u('bar')]})
|
||||
|
||||
|
||||
class XHeaderTest(HandlerBaseTestCase):
|
||||
class Handler(RequestHandler):
|
||||
|
|
@ -497,31 +540,40 @@ class UnixSocketTest(AsyncTestCase):
|
|||
def setUp(self):
|
||||
super(UnixSocketTest, self).setUp()
|
||||
self.tmpdir = tempfile.mkdtemp()
|
||||
self.sockfile = os.path.join(self.tmpdir, "test.sock")
|
||||
sock = netutil.bind_unix_socket(self.sockfile)
|
||||
app = Application([("/hello", HelloWorldRequestHandler)])
|
||||
self.server = HTTPServer(app, io_loop=self.io_loop)
|
||||
self.server.add_socket(sock)
|
||||
self.stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop)
|
||||
self.stream.connect(self.sockfile, self.stop)
|
||||
self.wait()
|
||||
|
||||
def tearDown(self):
|
||||
self.stream.close()
|
||||
self.server.stop()
|
||||
shutil.rmtree(self.tmpdir)
|
||||
super(UnixSocketTest, self).tearDown()
|
||||
|
||||
def test_unix_socket(self):
|
||||
sockfile = os.path.join(self.tmpdir, "test.sock")
|
||||
sock = netutil.bind_unix_socket(sockfile)
|
||||
app = Application([("/hello", HelloWorldRequestHandler)])
|
||||
server = HTTPServer(app, io_loop=self.io_loop)
|
||||
server.add_socket(sock)
|
||||
stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop)
|
||||
stream.connect(sockfile, self.stop)
|
||||
self.wait()
|
||||
stream.write(b"GET /hello HTTP/1.0\r\n\r\n")
|
||||
stream.read_until(b"\r\n", self.stop)
|
||||
self.stream.write(b"GET /hello HTTP/1.0\r\n\r\n")
|
||||
self.stream.read_until(b"\r\n", self.stop)
|
||||
response = self.wait()
|
||||
self.assertEqual(response, b"HTTP/1.0 200 OK\r\n")
|
||||
stream.read_until(b"\r\n\r\n", self.stop)
|
||||
self.stream.read_until(b"\r\n\r\n", self.stop)
|
||||
headers = HTTPHeaders.parse(self.wait().decode('latin1'))
|
||||
stream.read_bytes(int(headers["Content-Length"]), self.stop)
|
||||
self.stream.read_bytes(int(headers["Content-Length"]), self.stop)
|
||||
body = self.wait()
|
||||
self.assertEqual(body, b"Hello world")
|
||||
stream.close()
|
||||
server.stop()
|
||||
|
||||
def test_unix_socket_bad_request(self):
|
||||
# Unix sockets don't have remote addresses so they just return an
|
||||
# empty string.
|
||||
with ExpectLog(gen_log, "Malformed HTTP message from"):
|
||||
self.stream.write(b"garbage\r\n\r\n")
|
||||
self.stream.read_until_close(self.stop)
|
||||
response = self.wait()
|
||||
self.assertEqual(response, b"")
|
||||
|
||||
|
||||
class KeepAliveTest(AsyncHTTPTestCase):
|
||||
|
|
@ -586,8 +638,8 @@ class KeepAliveTest(AsyncHTTPTestCase):
|
|||
return headers
|
||||
|
||||
def read_response(self):
|
||||
headers = self.read_headers()
|
||||
self.stream.read_bytes(int(headers['Content-Length']), self.stop)
|
||||
self.headers = self.read_headers()
|
||||
self.stream.read_bytes(int(self.headers['Content-Length']), self.stop)
|
||||
body = self.wait()
|
||||
self.assertEqual(b'Hello world', body)
|
||||
|
||||
|
|
@ -621,6 +673,7 @@ class KeepAliveTest(AsyncHTTPTestCase):
|
|||
self.stream.read_until_close(callback=self.stop)
|
||||
data = self.wait()
|
||||
self.assertTrue(not data)
|
||||
self.assertTrue('Connection' not in self.headers)
|
||||
self.close()
|
||||
|
||||
def test_http10_keepalive(self):
|
||||
|
|
@ -628,8 +681,10 @@ class KeepAliveTest(AsyncHTTPTestCase):
|
|||
self.connect()
|
||||
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
|
||||
self.read_response()
|
||||
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
|
||||
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
|
||||
self.read_response()
|
||||
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
|
||||
self.close()
|
||||
|
||||
def test_pipelined_requests(self):
|
||||
|
|
@ -659,3 +714,322 @@ class KeepAliveTest(AsyncHTTPTestCase):
|
|||
self.stream.write(b'GET /finish_on_close HTTP/1.1\r\n\r\n')
|
||||
self.read_headers()
|
||||
self.close()
|
||||
|
||||
|
||||
class GzipBaseTest(object):
|
||||
def get_app(self):
|
||||
return Application([('/', EchoHandler)])
|
||||
|
||||
def post_gzip(self, body):
|
||||
bytesio = BytesIO()
|
||||
gzip_file = gzip.GzipFile(mode='w', fileobj=bytesio)
|
||||
gzip_file.write(utf8(body))
|
||||
gzip_file.close()
|
||||
compressed_body = bytesio.getvalue()
|
||||
return self.fetch('/', method='POST', body=compressed_body,
|
||||
headers={'Content-Encoding': 'gzip'})
|
||||
|
||||
def test_uncompressed(self):
|
||||
response = self.fetch('/', method='POST', body='foo=bar')
|
||||
self.assertEquals(json_decode(response.body), {u('foo'): [u('bar')]})
|
||||
|
||||
|
||||
class GzipTest(GzipBaseTest, AsyncHTTPTestCase):
|
||||
def get_httpserver_options(self):
|
||||
return dict(decompress_request=True)
|
||||
|
||||
def test_gzip(self):
|
||||
response = self.post_gzip('foo=bar')
|
||||
self.assertEquals(json_decode(response.body), {u('foo'): [u('bar')]})
|
||||
|
||||
|
||||
class GzipUnsupportedTest(GzipBaseTest, AsyncHTTPTestCase):
|
||||
def test_gzip_unsupported(self):
|
||||
# Gzip support is opt-in; without it the server fails to parse
|
||||
# the body (but parsing form bodies is currently just a log message,
|
||||
# not a fatal error).
|
||||
with ExpectLog(gen_log, "Unsupported Content-Encoding"):
|
||||
response = self.post_gzip('foo=bar')
|
||||
self.assertEquals(json_decode(response.body), {})
|
||||
|
||||
|
||||
class StreamingChunkSizeTest(AsyncHTTPTestCase):
|
||||
# 50 characters long, and repetitive so it can be compressed.
|
||||
BODY = b'01234567890123456789012345678901234567890123456789'
|
||||
CHUNK_SIZE = 16
|
||||
|
||||
def get_http_client(self):
|
||||
# body_producer doesn't work on curl_httpclient, so override the
|
||||
# configured AsyncHTTPClient implementation.
|
||||
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
|
||||
|
||||
def get_httpserver_options(self):
|
||||
return dict(chunk_size=self.CHUNK_SIZE, decompress_request=True)
|
||||
|
||||
class MessageDelegate(HTTPMessageDelegate):
|
||||
def __init__(self, connection):
|
||||
self.connection = connection
|
||||
|
||||
def headers_received(self, start_line, headers):
|
||||
self.chunk_lengths = []
|
||||
|
||||
def data_received(self, chunk):
|
||||
self.chunk_lengths.append(len(chunk))
|
||||
|
||||
def finish(self):
|
||||
response_body = utf8(json_encode(self.chunk_lengths))
|
||||
self.connection.write_headers(
|
||||
ResponseStartLine('HTTP/1.1', 200, 'OK'),
|
||||
HTTPHeaders({'Content-Length': str(len(response_body))}))
|
||||
self.connection.write(response_body)
|
||||
self.connection.finish()
|
||||
|
||||
def get_app(self):
|
||||
class App(HTTPServerConnectionDelegate):
|
||||
def start_request(self, connection):
|
||||
return StreamingChunkSizeTest.MessageDelegate(connection)
|
||||
return App()
|
||||
|
||||
def fetch_chunk_sizes(self, **kwargs):
|
||||
response = self.fetch('/', method='POST', **kwargs)
|
||||
response.rethrow()
|
||||
chunks = json_decode(response.body)
|
||||
self.assertEqual(len(self.BODY), sum(chunks))
|
||||
for chunk_size in chunks:
|
||||
self.assertLessEqual(chunk_size, self.CHUNK_SIZE,
|
||||
'oversized chunk: ' + str(chunks))
|
||||
self.assertGreater(chunk_size, 0,
|
||||
'empty chunk: ' + str(chunks))
|
||||
return chunks
|
||||
|
||||
def compress(self, body):
|
||||
bytesio = BytesIO()
|
||||
gzfile = gzip.GzipFile(mode='w', fileobj=bytesio)
|
||||
gzfile.write(body)
|
||||
gzfile.close()
|
||||
compressed = bytesio.getvalue()
|
||||
if len(compressed) >= len(body):
|
||||
raise Exception("body did not shrink when compressed")
|
||||
return compressed
|
||||
|
||||
def test_regular_body(self):
|
||||
chunks = self.fetch_chunk_sizes(body=self.BODY)
|
||||
# Without compression we know exactly what to expect.
|
||||
self.assertEqual([16, 16, 16, 2], chunks)
|
||||
|
||||
def test_compressed_body(self):
|
||||
self.fetch_chunk_sizes(body=self.compress(self.BODY),
|
||||
headers={'Content-Encoding': 'gzip'})
|
||||
# Compression creates irregular boundaries so the assertions
|
||||
# in fetch_chunk_sizes are as specific as we can get.
|
||||
|
||||
def test_chunked_body(self):
|
||||
def body_producer(write):
|
||||
write(self.BODY[:20])
|
||||
write(self.BODY[20:])
|
||||
chunks = self.fetch_chunk_sizes(body_producer=body_producer)
|
||||
# HTTP chunk boundaries translate to application-visible breaks
|
||||
self.assertEqual([16, 4, 16, 14], chunks)
|
||||
|
||||
def test_chunked_compressed(self):
|
||||
compressed = self.compress(self.BODY)
|
||||
self.assertGreater(len(compressed), 20)
|
||||
def body_producer(write):
|
||||
write(compressed[:20])
|
||||
write(compressed[20:])
|
||||
self.fetch_chunk_sizes(body_producer=body_producer,
|
||||
headers={'Content-Encoding': 'gzip'})
|
||||
|
||||
|
||||
class MaxHeaderSizeTest(AsyncHTTPTestCase):
|
||||
def get_app(self):
|
||||
return Application([('/', HelloWorldRequestHandler)])
|
||||
|
||||
def get_httpserver_options(self):
|
||||
return dict(max_header_size=1024)
|
||||
|
||||
def test_small_headers(self):
|
||||
response = self.fetch("/", headers={'X-Filler': 'a' * 100})
|
||||
response.rethrow()
|
||||
self.assertEqual(response.body, b"Hello world")
|
||||
|
||||
def test_large_headers(self):
|
||||
with ExpectLog(gen_log, "Unsatisfiable read"):
|
||||
response = self.fetch("/", headers={'X-Filler': 'a' * 1000})
|
||||
self.assertEqual(response.code, 599)
|
||||
|
||||
|
||||
@skipOnTravis
|
||||
class IdleTimeoutTest(AsyncHTTPTestCase):
|
||||
def get_app(self):
|
||||
return Application([('/', HelloWorldRequestHandler)])
|
||||
|
||||
def get_httpserver_options(self):
|
||||
return dict(idle_connection_timeout=0.1)
|
||||
|
||||
def setUp(self):
|
||||
super(IdleTimeoutTest, self).setUp()
|
||||
self.streams = []
|
||||
|
||||
def tearDown(self):
|
||||
super(IdleTimeoutTest, self).tearDown()
|
||||
for stream in self.streams:
|
||||
stream.close()
|
||||
|
||||
def connect(self):
|
||||
stream = IOStream(socket.socket())
|
||||
stream.connect(('localhost', self.get_http_port()), self.stop)
|
||||
self.wait()
|
||||
self.streams.append(stream)
|
||||
return stream
|
||||
|
||||
def test_unused_connection(self):
|
||||
stream = self.connect()
|
||||
stream.set_close_callback(self.stop)
|
||||
self.wait()
|
||||
|
||||
def test_idle_after_use(self):
|
||||
stream = self.connect()
|
||||
stream.set_close_callback(lambda: self.stop("closed"))
|
||||
|
||||
# Use the connection twice to make sure keep-alives are working
|
||||
for i in range(2):
|
||||
stream.write(b"GET / HTTP/1.1\r\n\r\n")
|
||||
stream.read_until(b"\r\n\r\n", self.stop)
|
||||
self.wait()
|
||||
stream.read_bytes(11, self.stop)
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b"Hello world")
|
||||
|
||||
# Now let the timeout trigger and close the connection.
|
||||
data = self.wait()
|
||||
self.assertEqual(data, "closed")
|
||||
|
||||
|
||||
class BodyLimitsTest(AsyncHTTPTestCase):
|
||||
def get_app(self):
|
||||
class BufferedHandler(RequestHandler):
|
||||
def put(self):
|
||||
self.write(str(len(self.request.body)))
|
||||
|
||||
@stream_request_body
|
||||
class StreamingHandler(RequestHandler):
|
||||
def initialize(self):
|
||||
self.bytes_read = 0
|
||||
|
||||
def prepare(self):
|
||||
if 'expected_size' in self.request.arguments:
|
||||
self.request.connection.set_max_body_size(
|
||||
int(self.get_argument('expected_size')))
|
||||
if 'body_timeout' in self.request.arguments:
|
||||
self.request.connection.set_body_timeout(
|
||||
float(self.get_argument('body_timeout')))
|
||||
|
||||
def data_received(self, data):
|
||||
self.bytes_read += len(data)
|
||||
|
||||
def put(self):
|
||||
self.write(str(self.bytes_read))
|
||||
|
||||
return Application([('/buffered', BufferedHandler),
|
||||
('/streaming', StreamingHandler)])
|
||||
|
||||
def get_httpserver_options(self):
|
||||
return dict(body_timeout=3600, max_body_size=4096)
|
||||
|
||||
def get_http_client(self):
|
||||
# body_producer doesn't work on curl_httpclient, so override the
|
||||
# configured AsyncHTTPClient implementation.
|
||||
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
|
||||
|
||||
def test_small_body(self):
|
||||
response = self.fetch('/buffered', method='PUT', body=b'a' * 4096)
|
||||
self.assertEqual(response.body, b'4096')
|
||||
response = self.fetch('/streaming', method='PUT', body=b'a' * 4096)
|
||||
self.assertEqual(response.body, b'4096')
|
||||
|
||||
def test_large_body_buffered(self):
|
||||
with ExpectLog(gen_log, '.*Content-Length too long'):
|
||||
response = self.fetch('/buffered', method='PUT', body=b'a' * 10240)
|
||||
self.assertEqual(response.code, 599)
|
||||
|
||||
def test_large_body_buffered_chunked(self):
|
||||
with ExpectLog(gen_log, '.*chunked body too large'):
|
||||
response = self.fetch('/buffered', method='PUT',
|
||||
body_producer=lambda write: write(b'a' * 10240))
|
||||
self.assertEqual(response.code, 599)
|
||||
|
||||
def test_large_body_streaming(self):
|
||||
with ExpectLog(gen_log, '.*Content-Length too long'):
|
||||
response = self.fetch('/streaming', method='PUT', body=b'a' * 10240)
|
||||
self.assertEqual(response.code, 599)
|
||||
|
||||
def test_large_body_streaming_chunked(self):
|
||||
with ExpectLog(gen_log, '.*chunked body too large'):
|
||||
response = self.fetch('/streaming', method='PUT',
|
||||
body_producer=lambda write: write(b'a' * 10240))
|
||||
self.assertEqual(response.code, 599)
|
||||
|
||||
def test_large_body_streaming_override(self):
|
||||
response = self.fetch('/streaming?expected_size=10240', method='PUT',
|
||||
body=b'a' * 10240)
|
||||
self.assertEqual(response.body, b'10240')
|
||||
|
||||
def test_large_body_streaming_chunked_override(self):
|
||||
response = self.fetch('/streaming?expected_size=10240', method='PUT',
|
||||
body_producer=lambda write: write(b'a' * 10240))
|
||||
self.assertEqual(response.body, b'10240')
|
||||
|
||||
@gen_test
|
||||
def test_timeout(self):
|
||||
stream = IOStream(socket.socket())
|
||||
try:
|
||||
yield stream.connect(('127.0.0.1', self.get_http_port()))
|
||||
# Use a raw stream because AsyncHTTPClient won't let us read a
|
||||
# response without finishing a body.
|
||||
stream.write(b'PUT /streaming?body_timeout=0.1 HTTP/1.0\r\n'
|
||||
b'Content-Length: 42\r\n\r\n')
|
||||
with ExpectLog(gen_log, 'Timeout reading body'):
|
||||
response = yield stream.read_until_close()
|
||||
self.assertEqual(response, b'')
|
||||
finally:
|
||||
stream.close()
|
||||
|
||||
@gen_test
|
||||
def test_body_size_override_reset(self):
|
||||
# The max_body_size override is reset between requests.
|
||||
stream = IOStream(socket.socket())
|
||||
try:
|
||||
yield stream.connect(('127.0.0.1', self.get_http_port()))
|
||||
# Use a raw stream so we can make sure it's all on one connection.
|
||||
stream.write(b'PUT /streaming?expected_size=10240 HTTP/1.1\r\n'
|
||||
b'Content-Length: 10240\r\n\r\n')
|
||||
stream.write(b'a' * 10240)
|
||||
headers, response = yield gen.Task(read_stream_body, stream)
|
||||
self.assertEqual(response, b'10240')
|
||||
# Without the ?expected_size parameter, we get the old default value
|
||||
stream.write(b'PUT /streaming HTTP/1.1\r\n'
|
||||
b'Content-Length: 10240\r\n\r\n')
|
||||
with ExpectLog(gen_log, '.*Content-Length too long'):
|
||||
data = yield stream.read_until_close()
|
||||
self.assertEqual(data, b'')
|
||||
finally:
|
||||
stream.close()
|
||||
|
||||
|
||||
class LegacyInterfaceTest(AsyncHTTPTestCase):
|
||||
def get_app(self):
|
||||
# The old request_callback interface does not implement the
|
||||
# delegate interface, and writes its response via request.write
|
||||
# instead of request.connection.write_headers.
|
||||
def handle_request(request):
|
||||
message = b"Hello world"
|
||||
request.write(utf8("HTTP/1.1 200 OK\r\n"
|
||||
"Content-Length: %d\r\n\r\n" % len(message)))
|
||||
request.write(message)
|
||||
request.finish()
|
||||
return handle_request
|
||||
|
||||
def test_legacy_interface(self):
|
||||
response = self.fetch('/')
|
||||
self.assertEqual(response.body, b"Hello world")
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ class ImportTest(unittest.TestCase):
|
|||
# import tornado.curl_httpclient # depends on pycurl
|
||||
import tornado.escape
|
||||
import tornado.gen
|
||||
import tornado.http1connection
|
||||
import tornado.httpclient
|
||||
import tornado.httpserver
|
||||
import tornado.httputil
|
||||
|
|
|
|||
|
|
@ -12,8 +12,9 @@ import time
|
|||
|
||||
from tornado import gen
|
||||
from tornado.ioloop import IOLoop, TimeoutError
|
||||
from tornado.log import app_log
|
||||
from tornado.stack_context import ExceptionStackContext, StackContext, wrap, NullContext
|
||||
from tornado.testing import AsyncTestCase, bind_unused_port
|
||||
from tornado.testing import AsyncTestCase, bind_unused_port, ExpectLog
|
||||
from tornado.test.util import unittest, skipIfNonUnix, skipOnTravis
|
||||
|
||||
try:
|
||||
|
|
@ -51,7 +52,8 @@ class TestIOLoop(AsyncTestCase):
|
|||
thread = threading.Thread(target=target)
|
||||
self.io_loop.add_callback(thread.start)
|
||||
self.wait()
|
||||
self.assertAlmostEqual(time.time(), self.stop_time, places=2)
|
||||
delta = time.time() - self.stop_time
|
||||
self.assertLess(delta, 0.1)
|
||||
thread.join()
|
||||
|
||||
def test_add_timeout_timedelta(self):
|
||||
|
|
@ -153,7 +155,7 @@ class TestIOLoop(AsyncTestCase):
|
|||
|
||||
def test_remove_timeout_after_fire(self):
|
||||
# It is not an error to call remove_timeout after it has run.
|
||||
handle = self.io_loop.add_timeout(self.io_loop.time(), self.stop())
|
||||
handle = self.io_loop.add_timeout(self.io_loop.time(), self.stop)
|
||||
self.wait()
|
||||
self.io_loop.remove_timeout(handle)
|
||||
|
||||
|
|
@ -171,6 +173,131 @@ class TestIOLoop(AsyncTestCase):
|
|||
self.io_loop.add_callback(lambda: self.io_loop.add_callback(self.stop))
|
||||
self.wait()
|
||||
|
||||
def test_timeout_with_arguments(self):
|
||||
# This tests that all the timeout methods pass through *args correctly.
|
||||
results = []
|
||||
self.io_loop.add_timeout(self.io_loop.time(), results.append, 1)
|
||||
self.io_loop.add_timeout(datetime.timedelta(seconds=0),
|
||||
results.append, 2)
|
||||
self.io_loop.call_at(self.io_loop.time(), results.append, 3)
|
||||
self.io_loop.call_later(0, results.append, 4)
|
||||
self.io_loop.call_later(0, self.stop)
|
||||
self.wait()
|
||||
self.assertEqual(results, [1, 2, 3, 4])
|
||||
|
||||
def test_close_file_object(self):
|
||||
"""When a file object is used instead of a numeric file descriptor,
|
||||
the object should be closed (by IOLoop.close(all_fds=True),
|
||||
not just the fd.
|
||||
"""
|
||||
# Use a socket since they are supported by IOLoop on all platforms.
|
||||
# Unfortunately, sockets don't support the .closed attribute for
|
||||
# inspecting their close status, so we must use a wrapper.
|
||||
class SocketWrapper(object):
|
||||
def __init__(self, sockobj):
|
||||
self.sockobj = sockobj
|
||||
self.closed = False
|
||||
|
||||
def fileno(self):
|
||||
return self.sockobj.fileno()
|
||||
|
||||
def close(self):
|
||||
self.closed = True
|
||||
self.sockobj.close()
|
||||
sockobj, port = bind_unused_port()
|
||||
socket_wrapper = SocketWrapper(sockobj)
|
||||
io_loop = IOLoop()
|
||||
io_loop.add_handler(socket_wrapper, lambda fd, events: None,
|
||||
IOLoop.READ)
|
||||
io_loop.close(all_fds=True)
|
||||
self.assertTrue(socket_wrapper.closed)
|
||||
|
||||
def test_handler_callback_file_object(self):
|
||||
"""The handler callback receives the same fd object it passed in."""
|
||||
server_sock, port = bind_unused_port()
|
||||
fds = []
|
||||
def handle_connection(fd, events):
|
||||
fds.append(fd)
|
||||
conn, addr = server_sock.accept()
|
||||
conn.close()
|
||||
self.stop()
|
||||
self.io_loop.add_handler(server_sock, handle_connection, IOLoop.READ)
|
||||
with contextlib.closing(socket.socket()) as client_sock:
|
||||
client_sock.connect(('127.0.0.1', port))
|
||||
self.wait()
|
||||
self.io_loop.remove_handler(server_sock)
|
||||
self.io_loop.add_handler(server_sock.fileno(), handle_connection,
|
||||
IOLoop.READ)
|
||||
with contextlib.closing(socket.socket()) as client_sock:
|
||||
client_sock.connect(('127.0.0.1', port))
|
||||
self.wait()
|
||||
self.assertIs(fds[0], server_sock)
|
||||
self.assertEqual(fds[1], server_sock.fileno())
|
||||
self.io_loop.remove_handler(server_sock.fileno())
|
||||
server_sock.close()
|
||||
|
||||
def test_mixed_fd_fileobj(self):
|
||||
server_sock, port = bind_unused_port()
|
||||
def f(fd, events):
|
||||
pass
|
||||
self.io_loop.add_handler(server_sock, f, IOLoop.READ)
|
||||
with self.assertRaises(Exception):
|
||||
# The exact error is unspecified - some implementations use
|
||||
# IOError, others use ValueError.
|
||||
self.io_loop.add_handler(server_sock.fileno(), f, IOLoop.READ)
|
||||
self.io_loop.remove_handler(server_sock.fileno())
|
||||
server_sock.close()
|
||||
|
||||
def test_reentrant(self):
|
||||
"""Calling start() twice should raise an error, not deadlock."""
|
||||
returned_from_start = [False]
|
||||
got_exception = [False]
|
||||
def callback():
|
||||
try:
|
||||
self.io_loop.start()
|
||||
returned_from_start[0] = True
|
||||
except Exception:
|
||||
got_exception[0] = True
|
||||
self.stop()
|
||||
self.io_loop.add_callback(callback)
|
||||
self.wait()
|
||||
self.assertTrue(got_exception[0])
|
||||
self.assertFalse(returned_from_start[0])
|
||||
|
||||
def test_exception_logging(self):
|
||||
"""Uncaught exceptions get logged by the IOLoop."""
|
||||
# Use a NullContext to keep the exception from being caught by
|
||||
# AsyncTestCase.
|
||||
with NullContext():
|
||||
self.io_loop.add_callback(lambda: 1/0)
|
||||
self.io_loop.add_callback(self.stop)
|
||||
with ExpectLog(app_log, "Exception in callback"):
|
||||
self.wait()
|
||||
|
||||
def test_exception_logging_future(self):
|
||||
"""The IOLoop examines exceptions from Futures and logs them."""
|
||||
with NullContext():
|
||||
@gen.coroutine
|
||||
def callback():
|
||||
self.io_loop.add_callback(self.stop)
|
||||
1/0
|
||||
self.io_loop.add_callback(callback)
|
||||
with ExpectLog(app_log, "Exception in callback"):
|
||||
self.wait()
|
||||
|
||||
def test_spawn_callback(self):
|
||||
# An added callback runs in the test's stack_context, so will be
|
||||
# re-arised in wait().
|
||||
self.io_loop.add_callback(lambda: 1/0)
|
||||
with self.assertRaises(ZeroDivisionError):
|
||||
self.wait()
|
||||
# A spawned callback is run directly on the IOLoop, so it will be
|
||||
# logged without stopping the test.
|
||||
self.io_loop.spawn_callback(lambda: 1/0)
|
||||
self.io_loop.add_callback(self.stop)
|
||||
with ExpectLog(app_log, "Exception in callback"):
|
||||
self.wait()
|
||||
|
||||
|
||||
# Deliberately not a subclass of AsyncTestCase so the IOLoop isn't
|
||||
# automatically set as current.
|
||||
|
|
@ -329,5 +456,6 @@ class TestIOLoopRunSync(unittest.TestCase):
|
|||
yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)
|
||||
self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
|||
|
|
@ -1,13 +1,16 @@
|
|||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
from tornado.concurrent import Future
|
||||
from tornado import gen
|
||||
from tornado import netutil
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.iostream import IOStream, SSLIOStream, PipeIOStream
|
||||
from tornado.iostream import IOStream, SSLIOStream, PipeIOStream, StreamClosedError
|
||||
from tornado.httputil import HTTPHeaders
|
||||
from tornado.log import gen_log, app_log
|
||||
from tornado.netutil import ssl_wrap_socket
|
||||
from tornado.stack_context import NullContext
|
||||
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog
|
||||
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog, gen_test
|
||||
from tornado.test.util import unittest, skipIfNonUnix
|
||||
from tornado.web import RequestHandler, Application
|
||||
import certifi
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
|
|
@ -17,6 +20,13 @@ import ssl
|
|||
import sys
|
||||
|
||||
|
||||
def _server_ssl_options():
|
||||
return dict(
|
||||
certfile=os.path.join(os.path.dirname(__file__), 'test.crt'),
|
||||
keyfile=os.path.join(os.path.dirname(__file__), 'test.key'),
|
||||
)
|
||||
|
||||
|
||||
class HelloHandler(RequestHandler):
|
||||
def get(self):
|
||||
self.write("Hello")
|
||||
|
|
@ -106,6 +116,48 @@ class TestIOStreamWebMixin(object):
|
|||
|
||||
stream.close()
|
||||
|
||||
@gen_test
|
||||
def test_future_interface(self):
|
||||
"""Basic test of IOStream's ability to return Futures."""
|
||||
stream = self._make_client_iostream()
|
||||
connect_result = yield stream.connect(
|
||||
("localhost", self.get_http_port()))
|
||||
self.assertIs(connect_result, stream)
|
||||
yield stream.write(b"GET / HTTP/1.0\r\n\r\n")
|
||||
first_line = yield stream.read_until(b"\r\n")
|
||||
self.assertEqual(first_line, b"HTTP/1.0 200 OK\r\n")
|
||||
# callback=None is equivalent to no callback.
|
||||
header_data = yield stream.read_until(b"\r\n\r\n", callback=None)
|
||||
headers = HTTPHeaders.parse(header_data.decode('latin1'))
|
||||
content_length = int(headers['Content-Length'])
|
||||
body = yield stream.read_bytes(content_length)
|
||||
self.assertEqual(body, b'Hello')
|
||||
stream.close()
|
||||
|
||||
@gen_test
|
||||
def test_future_close_while_reading(self):
|
||||
stream = self._make_client_iostream()
|
||||
yield stream.connect(("localhost", self.get_http_port()))
|
||||
yield stream.write(b"GET / HTTP/1.0\r\n\r\n")
|
||||
with self.assertRaises(StreamClosedError):
|
||||
yield stream.read_bytes(1024 * 1024)
|
||||
stream.close()
|
||||
|
||||
@gen_test
|
||||
def test_future_read_until_close(self):
|
||||
# Ensure that the data comes through before the StreamClosedError.
|
||||
stream = self._make_client_iostream()
|
||||
yield stream.connect(("localhost", self.get_http_port()))
|
||||
yield stream.write(b"GET / HTTP/1.0\r\nConnection: close\r\n\r\n")
|
||||
yield stream.read_until(b"\r\n\r\n")
|
||||
body = yield stream.read_until_close()
|
||||
self.assertEqual(body, b"Hello")
|
||||
|
||||
# Nothing else to read; the error comes immediately without waiting
|
||||
# for yield.
|
||||
with self.assertRaises(StreamClosedError):
|
||||
stream.read_bytes(1)
|
||||
|
||||
|
||||
class TestIOStreamMixin(object):
|
||||
def _make_server_iostream(self, connection, **kwargs):
|
||||
|
|
@ -120,16 +172,6 @@ class TestIOStreamMixin(object):
|
|||
|
||||
def accept_callback(connection, address):
|
||||
streams[0] = self._make_server_iostream(connection, **kwargs)
|
||||
if isinstance(streams[0], SSLIOStream):
|
||||
# HACK: The SSL handshake won't complete (and
|
||||
# therefore the client connect callback won't be
|
||||
# run)until the server side has tried to do something
|
||||
# with the connection. For these tests we want both
|
||||
# sides to connect before we do anything else with the
|
||||
# connection, so we must cause some dummy activity on the
|
||||
# server. If this turns out to be useful for real apps
|
||||
# it should have a cleaner interface.
|
||||
streams[0]._add_io_state(IOLoop.READ)
|
||||
self.stop()
|
||||
|
||||
def connect_callback():
|
||||
|
|
@ -168,9 +210,6 @@ class TestIOStreamMixin(object):
|
|||
server, client = self.make_iostream_pair()
|
||||
server.write(b'', callback=self.stop)
|
||||
self.wait()
|
||||
# As a side effect, the stream is now listening for connection
|
||||
# close (if it wasn't already), but is not listening for writes
|
||||
self.assertEqual(server._state, IOLoop.READ | IOLoop.ERROR)
|
||||
server.close()
|
||||
client.close()
|
||||
|
||||
|
|
@ -193,8 +232,11 @@ class TestIOStreamMixin(object):
|
|||
self.assertFalse(self.connect_called)
|
||||
self.assertTrue(isinstance(stream.error, socket.error), stream.error)
|
||||
if sys.platform != 'cygwin':
|
||||
_ERRNO_CONNREFUSED = (errno.ECONNREFUSED,)
|
||||
if hasattr(errno, "WSAECONNREFUSED"):
|
||||
_ERRNO_CONNREFUSED += (errno.WSAECONNREFUSED,)
|
||||
# cygwin's errnos don't match those used on native windows python
|
||||
self.assertEqual(stream.error.args[0], errno.ECONNREFUSED)
|
||||
self.assertTrue(stream.error.args[0] in _ERRNO_CONNREFUSED)
|
||||
|
||||
def test_gaierror(self):
|
||||
# Test that IOStream sets its exc_info on getaddrinfo error
|
||||
|
|
@ -308,6 +350,25 @@ class TestIOStreamMixin(object):
|
|||
server.close()
|
||||
client.close()
|
||||
|
||||
def test_future_delayed_close_callback(self):
|
||||
# Same as test_delayed_close_callback, but with the future interface.
|
||||
server, client = self.make_iostream_pair()
|
||||
# We can't call make_iostream_pair inside a gen_test function
|
||||
# because the ioloop is not reentrant.
|
||||
@gen_test
|
||||
def f(self):
|
||||
server.write(b"12")
|
||||
chunks = []
|
||||
chunks.append((yield client.read_bytes(1)))
|
||||
server.close()
|
||||
chunks.append((yield client.read_bytes(1)))
|
||||
self.assertEqual(chunks, [b"1", b"2"])
|
||||
try:
|
||||
f(self)
|
||||
finally:
|
||||
server.close()
|
||||
client.close()
|
||||
|
||||
def test_close_buffered_data(self):
|
||||
# Similar to the previous test, but with data stored in the OS's
|
||||
# socket buffers instead of the IOStream's read buffer. Out-of-band
|
||||
|
|
@ -340,14 +401,18 @@ class TestIOStreamMixin(object):
|
|||
# Similar to test_delayed_close_callback, but read_until_close takes
|
||||
# a separate code path so test it separately.
|
||||
server, client = self.make_iostream_pair()
|
||||
client.set_close_callback(self.stop)
|
||||
try:
|
||||
server.write(b"1234")
|
||||
server.close()
|
||||
self.wait()
|
||||
# Read one byte to make sure the client has received the data.
|
||||
# It won't run the close callback as long as there is more buffered
|
||||
# data that could satisfy a later read.
|
||||
client.read_bytes(1, self.stop)
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b"1")
|
||||
client.read_until_close(self.stop)
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b"1234")
|
||||
self.assertEqual(data, b"234")
|
||||
finally:
|
||||
server.close()
|
||||
client.close()
|
||||
|
|
@ -357,17 +422,18 @@ class TestIOStreamMixin(object):
|
|||
# All data should go through the streaming callback,
|
||||
# and the final read callback just gets an empty string.
|
||||
server, client = self.make_iostream_pair()
|
||||
client.set_close_callback(self.stop)
|
||||
try:
|
||||
server.write(b"1234")
|
||||
server.close()
|
||||
self.wait()
|
||||
client.read_bytes(1, self.stop)
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b"1")
|
||||
streaming_data = []
|
||||
client.read_until_close(self.stop,
|
||||
streaming_callback=streaming_data.append)
|
||||
data = self.wait()
|
||||
self.assertEqual(b'', data)
|
||||
self.assertEqual(b''.join(streaming_data), b"1234")
|
||||
self.assertEqual(b''.join(streaming_data), b"234")
|
||||
finally:
|
||||
server.close()
|
||||
client.close()
|
||||
|
|
@ -461,6 +527,203 @@ class TestIOStreamMixin(object):
|
|||
server.close()
|
||||
client.close()
|
||||
|
||||
def test_future_close_callback(self):
|
||||
# Regression test for interaction between the Future read interfaces
|
||||
# and IOStream._maybe_add_error_listener.
|
||||
server, client = self.make_iostream_pair()
|
||||
closed = [False]
|
||||
def close_callback():
|
||||
closed[0] = True
|
||||
self.stop()
|
||||
server.set_close_callback(close_callback)
|
||||
try:
|
||||
client.write(b'a')
|
||||
future = server.read_bytes(1)
|
||||
self.io_loop.add_future(future, self.stop)
|
||||
self.assertEqual(self.wait().result(), b'a')
|
||||
self.assertFalse(closed[0])
|
||||
client.close()
|
||||
self.wait()
|
||||
self.assertTrue(closed[0])
|
||||
finally:
|
||||
server.close()
|
||||
client.close()
|
||||
|
||||
def test_read_bytes_partial(self):
|
||||
server, client = self.make_iostream_pair()
|
||||
try:
|
||||
# Ask for more than is available with partial=True
|
||||
client.read_bytes(50, self.stop, partial=True)
|
||||
server.write(b"hello")
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b"hello")
|
||||
|
||||
# Ask for less than what is available; num_bytes is still
|
||||
# respected.
|
||||
client.read_bytes(3, self.stop, partial=True)
|
||||
server.write(b"world")
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b"wor")
|
||||
|
||||
# Partial reads won't return an empty string, but read_bytes(0)
|
||||
# will.
|
||||
client.read_bytes(0, self.stop, partial=True)
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b'')
|
||||
finally:
|
||||
server.close()
|
||||
client.close()
|
||||
|
||||
def test_read_until_max_bytes(self):
|
||||
server, client = self.make_iostream_pair()
|
||||
client.set_close_callback(lambda: self.stop("closed"))
|
||||
try:
|
||||
# Extra room under the limit
|
||||
client.read_until(b"def", self.stop, max_bytes=50)
|
||||
server.write(b"abcdef")
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b"abcdef")
|
||||
|
||||
# Just enough space
|
||||
client.read_until(b"def", self.stop, max_bytes=6)
|
||||
server.write(b"abcdef")
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b"abcdef")
|
||||
|
||||
# Not enough space, but we don't know it until all we can do is
|
||||
# log a warning and close the connection.
|
||||
with ExpectLog(gen_log, "Unsatisfiable read"):
|
||||
client.read_until(b"def", self.stop, max_bytes=5)
|
||||
server.write(b"123456")
|
||||
data = self.wait()
|
||||
self.assertEqual(data, "closed")
|
||||
finally:
|
||||
server.close()
|
||||
client.close()
|
||||
|
||||
def test_read_until_max_bytes_inline(self):
|
||||
server, client = self.make_iostream_pair()
|
||||
client.set_close_callback(lambda: self.stop("closed"))
|
||||
try:
|
||||
# Similar to the error case in the previous test, but the
|
||||
# server writes first so client reads are satisfied
|
||||
# inline. For consistency with the out-of-line case, we
|
||||
# do not raise the error synchronously.
|
||||
server.write(b"123456")
|
||||
with ExpectLog(gen_log, "Unsatisfiable read"):
|
||||
client.read_until(b"def", self.stop, max_bytes=5)
|
||||
data = self.wait()
|
||||
self.assertEqual(data, "closed")
|
||||
finally:
|
||||
server.close()
|
||||
client.close()
|
||||
|
||||
def test_read_until_max_bytes_ignores_extra(self):
|
||||
server, client = self.make_iostream_pair()
|
||||
client.set_close_callback(lambda: self.stop("closed"))
|
||||
try:
|
||||
# Even though data that matches arrives the same packet that
|
||||
# puts us over the limit, we fail the request because it was not
|
||||
# found within the limit.
|
||||
server.write(b"abcdef")
|
||||
with ExpectLog(gen_log, "Unsatisfiable read"):
|
||||
client.read_until(b"def", self.stop, max_bytes=5)
|
||||
data = self.wait()
|
||||
self.assertEqual(data, "closed")
|
||||
finally:
|
||||
server.close()
|
||||
client.close()
|
||||
|
||||
def test_read_until_regex_max_bytes(self):
|
||||
server, client = self.make_iostream_pair()
|
||||
client.set_close_callback(lambda: self.stop("closed"))
|
||||
try:
|
||||
# Extra room under the limit
|
||||
client.read_until_regex(b"def", self.stop, max_bytes=50)
|
||||
server.write(b"abcdef")
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b"abcdef")
|
||||
|
||||
# Just enough space
|
||||
client.read_until_regex(b"def", self.stop, max_bytes=6)
|
||||
server.write(b"abcdef")
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b"abcdef")
|
||||
|
||||
# Not enough space, but we don't know it until all we can do is
|
||||
# log a warning and close the connection.
|
||||
with ExpectLog(gen_log, "Unsatisfiable read"):
|
||||
client.read_until_regex(b"def", self.stop, max_bytes=5)
|
||||
server.write(b"123456")
|
||||
data = self.wait()
|
||||
self.assertEqual(data, "closed")
|
||||
finally:
|
||||
server.close()
|
||||
client.close()
|
||||
|
||||
def test_read_until_regex_max_bytes_inline(self):
|
||||
server, client = self.make_iostream_pair()
|
||||
client.set_close_callback(lambda: self.stop("closed"))
|
||||
try:
|
||||
# Similar to the error case in the previous test, but the
|
||||
# server writes first so client reads are satisfied
|
||||
# inline. For consistency with the out-of-line case, we
|
||||
# do not raise the error synchronously.
|
||||
server.write(b"123456")
|
||||
with ExpectLog(gen_log, "Unsatisfiable read"):
|
||||
client.read_until_regex(b"def", self.stop, max_bytes=5)
|
||||
data = self.wait()
|
||||
self.assertEqual(data, "closed")
|
||||
finally:
|
||||
server.close()
|
||||
client.close()
|
||||
|
||||
def test_read_until_regex_max_bytes_ignores_extra(self):
|
||||
server, client = self.make_iostream_pair()
|
||||
client.set_close_callback(lambda: self.stop("closed"))
|
||||
try:
|
||||
# Even though data that matches arrives the same packet that
|
||||
# puts us over the limit, we fail the request because it was not
|
||||
# found within the limit.
|
||||
server.write(b"abcdef")
|
||||
with ExpectLog(gen_log, "Unsatisfiable read"):
|
||||
client.read_until_regex(b"def", self.stop, max_bytes=5)
|
||||
data = self.wait()
|
||||
self.assertEqual(data, "closed")
|
||||
finally:
|
||||
server.close()
|
||||
client.close()
|
||||
|
||||
def test_small_reads_from_large_buffer(self):
|
||||
# 10KB buffer size, 100KB available to read.
|
||||
# Read 1KB at a time and make sure that the buffer is not eagerly
|
||||
# filled.
|
||||
server, client = self.make_iostream_pair(max_buffer_size=10 * 1024)
|
||||
try:
|
||||
server.write(b"a" * 1024 * 100)
|
||||
for i in range(100):
|
||||
client.read_bytes(1024, self.stop)
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b"a" * 1024)
|
||||
finally:
|
||||
server.close()
|
||||
client.close()
|
||||
|
||||
def test_small_read_untils_from_large_buffer(self):
|
||||
# 10KB buffer size, 100KB available to read.
|
||||
# Read 1KB at a time and make sure that the buffer is not eagerly
|
||||
# filled.
|
||||
server, client = self.make_iostream_pair(max_buffer_size=10 * 1024)
|
||||
try:
|
||||
server.write((b"a" * 1023 + b"\n") * 100)
|
||||
for i in range(100):
|
||||
client.read_until(b"\n", self.stop, max_bytes=4096)
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b"a" * 1023 + b"\n")
|
||||
finally:
|
||||
server.close()
|
||||
client.close()
|
||||
|
||||
|
||||
class TestIOStreamWebHTTP(TestIOStreamWebMixin, AsyncHTTPTestCase):
|
||||
def _make_client_iostream(self):
|
||||
|
|
@ -482,14 +745,10 @@ class TestIOStream(TestIOStreamMixin, AsyncTestCase):
|
|||
|
||||
class TestIOStreamSSL(TestIOStreamMixin, AsyncTestCase):
|
||||
def _make_server_iostream(self, connection, **kwargs):
|
||||
ssl_options = dict(
|
||||
certfile=os.path.join(os.path.dirname(__file__), 'test.crt'),
|
||||
keyfile=os.path.join(os.path.dirname(__file__), 'test.key'),
|
||||
)
|
||||
connection = ssl.wrap_socket(connection,
|
||||
server_side=True,
|
||||
do_handshake_on_connect=False,
|
||||
**ssl_options)
|
||||
**_server_ssl_options())
|
||||
return SSLIOStream(connection, io_loop=self.io_loop, **kwargs)
|
||||
|
||||
def _make_client_iostream(self, connection, **kwargs):
|
||||
|
|
@ -517,6 +776,91 @@ class TestIOStreamSSLContext(TestIOStreamMixin, AsyncTestCase):
|
|||
ssl_options=context, **kwargs)
|
||||
|
||||
|
||||
class TestIOStreamStartTLS(AsyncTestCase):
|
||||
def setUp(self):
|
||||
try:
|
||||
super(TestIOStreamStartTLS, self).setUp()
|
||||
self.listener, self.port = bind_unused_port()
|
||||
self.server_stream = None
|
||||
self.server_accepted = Future()
|
||||
netutil.add_accept_handler(self.listener, self.accept)
|
||||
self.client_stream = IOStream(socket.socket())
|
||||
self.io_loop.add_future(self.client_stream.connect(
|
||||
('127.0.0.1', self.port)), self.stop)
|
||||
self.wait()
|
||||
self.io_loop.add_future(self.server_accepted, self.stop)
|
||||
self.wait()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
raise
|
||||
|
||||
def tearDown(self):
|
||||
if self.server_stream is not None:
|
||||
self.server_stream.close()
|
||||
if self.client_stream is not None:
|
||||
self.client_stream.close()
|
||||
self.listener.close()
|
||||
super(TestIOStreamStartTLS, self).tearDown()
|
||||
|
||||
def accept(self, connection, address):
|
||||
if self.server_stream is not None:
|
||||
self.fail("should only get one connection")
|
||||
self.server_stream = IOStream(connection)
|
||||
self.server_accepted.set_result(None)
|
||||
|
||||
@gen.coroutine
|
||||
def client_send_line(self, line):
|
||||
self.client_stream.write(line)
|
||||
recv_line = yield self.server_stream.read_until(b"\r\n")
|
||||
self.assertEqual(line, recv_line)
|
||||
|
||||
@gen.coroutine
|
||||
def server_send_line(self, line):
|
||||
self.server_stream.write(line)
|
||||
recv_line = yield self.client_stream.read_until(b"\r\n")
|
||||
self.assertEqual(line, recv_line)
|
||||
|
||||
def client_start_tls(self, ssl_options=None):
|
||||
client_stream = self.client_stream
|
||||
self.client_stream = None
|
||||
return client_stream.start_tls(False, ssl_options)
|
||||
|
||||
def server_start_tls(self, ssl_options=None):
|
||||
server_stream = self.server_stream
|
||||
self.server_stream = None
|
||||
return server_stream.start_tls(True, ssl_options)
|
||||
|
||||
@gen_test
|
||||
def test_start_tls_smtp(self):
|
||||
# This flow is simplified from RFC 3207 section 5.
|
||||
# We don't really need all of this, but it helps to make sure
|
||||
# that after realistic back-and-forth traffic the buffers end up
|
||||
# in a sane state.
|
||||
yield self.server_send_line(b"220 mail.example.com ready\r\n")
|
||||
yield self.client_send_line(b"EHLO mail.example.com\r\n")
|
||||
yield self.server_send_line(b"250-mail.example.com welcome\r\n")
|
||||
yield self.server_send_line(b"250 STARTTLS\r\n")
|
||||
yield self.client_send_line(b"STARTTLS\r\n")
|
||||
yield self.server_send_line(b"220 Go ahead\r\n")
|
||||
client_future = self.client_start_tls()
|
||||
server_future = self.server_start_tls(_server_ssl_options())
|
||||
self.client_stream = yield client_future
|
||||
self.server_stream = yield server_future
|
||||
self.assertTrue(isinstance(self.client_stream, SSLIOStream))
|
||||
self.assertTrue(isinstance(self.server_stream, SSLIOStream))
|
||||
yield self.client_send_line(b"EHLO mail.example.com\r\n")
|
||||
yield self.server_send_line(b"250 mail.example.com welcome\r\n")
|
||||
|
||||
@gen_test
|
||||
def test_handshake_fail(self):
|
||||
self.server_start_tls(_server_ssl_options())
|
||||
client_future = self.client_start_tls(
|
||||
dict(cert_reqs=ssl.CERT_REQUIRED, ca_certs=certifi.where()))
|
||||
with ExpectLog(gen_log, "SSL Error"):
|
||||
with self.assertRaises(ssl.SSLError):
|
||||
yield client_future
|
||||
|
||||
|
||||
@skipIfNonUnix
|
||||
class TestPipeIOStream(AsyncTestCase):
|
||||
def test_pipe_iostream(self):
|
||||
|
|
@ -543,3 +887,21 @@ class TestPipeIOStream(AsyncTestCase):
|
|||
self.assertEqual(data, b"ld")
|
||||
|
||||
rs.close()
|
||||
|
||||
def test_pipe_iostream_big_write(self):
|
||||
r, w = os.pipe()
|
||||
|
||||
rs = PipeIOStream(r, io_loop=self.io_loop)
|
||||
ws = PipeIOStream(w, io_loop=self.io_loop)
|
||||
|
||||
NUM_BYTES = 1048576
|
||||
|
||||
# Write 1MB of data, which should fill the buffer
|
||||
ws.write(b"1" * NUM_BYTES)
|
||||
|
||||
rs.read_bytes(NUM_BYTES, self.stop)
|
||||
data = self.wait()
|
||||
self.assertEqual(data, b"1" * NUM_BYTES)
|
||||
|
||||
ws.close()
|
||||
rs.close()
|
||||
|
|
|
|||
|
|
@ -20,6 +20,8 @@ import glob
|
|||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import warnings
|
||||
|
||||
|
|
@ -52,7 +54,6 @@ class LogFormatterTest(unittest.TestCase):
|
|||
logging.ERROR: u("\u0001"),
|
||||
}
|
||||
self.formatter._normal = u("\u0002")
|
||||
self.formatter._color = True
|
||||
# construct a Logger directly to bypass getLogger's caching
|
||||
self.logger = logging.Logger('LogFormatterTest')
|
||||
self.logger.propagate = False
|
||||
|
|
@ -157,3 +158,50 @@ class EnablePrettyLoggingTest(unittest.TestCase):
|
|||
for filename in glob.glob(tmpdir + '/test_log*'):
|
||||
os.unlink(filename)
|
||||
os.rmdir(tmpdir)
|
||||
|
||||
|
||||
class LoggingOptionTest(unittest.TestCase):
|
||||
"""Test the ability to enable and disable Tornado's logging hooks."""
|
||||
def logs_present(self, statement, args=None):
|
||||
# Each test may manipulate and/or parse the options and then logs
|
||||
# a line at the 'info' level. This level is ignored in the
|
||||
# logging module by default, but Tornado turns it on by default
|
||||
# so it is the easiest way to tell whether tornado's logging hooks
|
||||
# ran.
|
||||
IMPORT = 'from tornado.options import options, parse_command_line'
|
||||
LOG_INFO = 'import logging; logging.info("hello")'
|
||||
program = ';'.join([IMPORT, statement, LOG_INFO])
|
||||
proc = subprocess.Popen(
|
||||
[sys.executable, '-c', program] + (args or []),
|
||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
stdout, stderr = proc.communicate()
|
||||
self.assertEqual(proc.returncode, 0, 'process failed: %r' % stdout)
|
||||
return b'hello' in stdout
|
||||
|
||||
def test_default(self):
|
||||
self.assertFalse(self.logs_present('pass'))
|
||||
|
||||
def test_tornado_default(self):
|
||||
self.assertTrue(self.logs_present('parse_command_line()'))
|
||||
|
||||
def test_disable_command_line(self):
|
||||
self.assertFalse(self.logs_present('parse_command_line()',
|
||||
['--logging=none']))
|
||||
|
||||
def test_disable_command_line_case_insensitive(self):
|
||||
self.assertFalse(self.logs_present('parse_command_line()',
|
||||
['--logging=None']))
|
||||
|
||||
def test_disable_code_string(self):
|
||||
self.assertFalse(self.logs_present(
|
||||
'options.logging = "none"; parse_command_line()'))
|
||||
|
||||
def test_disable_code_none(self):
|
||||
self.assertFalse(self.logs_present(
|
||||
'options.logging = None; parse_command_line()'))
|
||||
|
||||
def test_disable_override(self):
|
||||
# command line trumps code defaults
|
||||
self.assertTrue(self.logs_present(
|
||||
'options.logging = None; parse_command_line()',
|
||||
['--logging=info']))
|
||||
|
|
|
|||
|
|
@ -1,10 +1,16 @@
|
|||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
from subprocess import Popen
|
||||
import sys
|
||||
import time
|
||||
|
||||
from tornado.netutil import BlockingResolver, ThreadedResolver, is_valid_ip
|
||||
from tornado.netutil import BlockingResolver, ThreadedResolver, is_valid_ip, bind_sockets
|
||||
from tornado.stack_context import ExceptionStackContext
|
||||
from tornado.testing import AsyncTestCase, gen_test
|
||||
from tornado.test.util import unittest
|
||||
from tornado.test.util import unittest, skipIfNoNetwork
|
||||
|
||||
try:
|
||||
from concurrent import futures
|
||||
|
|
@ -20,6 +26,7 @@ else:
|
|||
|
||||
try:
|
||||
import twisted
|
||||
import twisted.names
|
||||
except ImportError:
|
||||
twisted = None
|
||||
else:
|
||||
|
|
@ -27,6 +34,15 @@ else:
|
|||
|
||||
|
||||
class _ResolverTestMixin(object):
|
||||
def skipOnCares(self):
|
||||
# Some DNS-hijacking ISPs (e.g. Time Warner) return non-empty results
|
||||
# with an NXDOMAIN status code. Most resolvers treat this as an error;
|
||||
# C-ares returns the results, making the "bad_host" tests unreliable.
|
||||
# C-ares will try to resolve even malformed names, such as the
|
||||
# name with spaces used in this test.
|
||||
if self.resolver.__class__.__name__ == 'CaresResolver':
|
||||
self.skipTest("CaresResolver doesn't recognize fake NXDOMAIN")
|
||||
|
||||
def test_localhost(self):
|
||||
self.resolver.resolve('localhost', 80, callback=self.stop)
|
||||
result = self.wait()
|
||||
|
|
@ -39,13 +55,34 @@ class _ResolverTestMixin(object):
|
|||
self.assertIn((socket.AF_INET, ('127.0.0.1', 80)),
|
||||
addrinfo)
|
||||
|
||||
def test_bad_host(self):
|
||||
self.skipOnCares()
|
||||
def handler(exc_typ, exc_val, exc_tb):
|
||||
self.stop(exc_val)
|
||||
return True # Halt propagation.
|
||||
|
||||
with ExceptionStackContext(handler):
|
||||
self.resolver.resolve('an invalid domain', 80, callback=self.stop)
|
||||
|
||||
result = self.wait()
|
||||
self.assertIsInstance(result, Exception)
|
||||
|
||||
@gen_test
|
||||
def test_future_interface_bad_host(self):
|
||||
self.skipOnCares()
|
||||
with self.assertRaises(Exception):
|
||||
yield self.resolver.resolve('an invalid domain', 80,
|
||||
socket.AF_UNSPEC)
|
||||
|
||||
|
||||
@skipIfNoNetwork
|
||||
class BlockingResolverTest(AsyncTestCase, _ResolverTestMixin):
|
||||
def setUp(self):
|
||||
super(BlockingResolverTest, self).setUp()
|
||||
self.resolver = BlockingResolver(io_loop=self.io_loop)
|
||||
|
||||
|
||||
@skipIfNoNetwork
|
||||
@unittest.skipIf(futures is None, "futures module not present")
|
||||
class ThreadedResolverTest(AsyncTestCase, _ResolverTestMixin):
|
||||
def setUp(self):
|
||||
|
|
@ -57,6 +94,34 @@ class ThreadedResolverTest(AsyncTestCase, _ResolverTestMixin):
|
|||
super(ThreadedResolverTest, self).tearDown()
|
||||
|
||||
|
||||
@skipIfNoNetwork
|
||||
@unittest.skipIf(futures is None, "futures module not present")
|
||||
@unittest.skipIf(sys.platform == 'win32', "preexec_fn not available on win32")
|
||||
class ThreadedResolverImportTest(unittest.TestCase):
|
||||
def test_import(self):
|
||||
TIMEOUT = 5
|
||||
|
||||
# Test for a deadlock when importing a module that runs the
|
||||
# ThreadedResolver at import-time. See resolve_test.py for
|
||||
# full explanation.
|
||||
command = [
|
||||
sys.executable,
|
||||
'-c',
|
||||
'import tornado.test.resolve_test_helper']
|
||||
|
||||
start = time.time()
|
||||
popen = Popen(command, preexec_fn=lambda: signal.alarm(TIMEOUT))
|
||||
while time.time() - start < TIMEOUT:
|
||||
return_code = popen.poll()
|
||||
if return_code is not None:
|
||||
self.assertEqual(0, return_code)
|
||||
return # Success.
|
||||
time.sleep(0.05)
|
||||
|
||||
self.fail("import timed out")
|
||||
|
||||
|
||||
@skipIfNoNetwork
|
||||
@unittest.skipIf(pycares is None, "pycares module not present")
|
||||
class CaresResolverTest(AsyncTestCase, _ResolverTestMixin):
|
||||
def setUp(self):
|
||||
|
|
@ -64,6 +129,7 @@ class CaresResolverTest(AsyncTestCase, _ResolverTestMixin):
|
|||
self.resolver = CaresResolver(io_loop=self.io_loop)
|
||||
|
||||
|
||||
@skipIfNoNetwork
|
||||
@unittest.skipIf(twisted is None, "twisted module not present")
|
||||
@unittest.skipIf(getattr(twisted, '__version__', '0.0') < "12.1", "old version of twisted")
|
||||
class TwistedResolverTest(AsyncTestCase, _ResolverTestMixin):
|
||||
|
|
@ -82,3 +148,21 @@ class IsValidIPTest(unittest.TestCase):
|
|||
self.assertTrue(not is_valid_ip('localhost'))
|
||||
self.assertTrue(not is_valid_ip('4.4.4.4<'))
|
||||
self.assertTrue(not is_valid_ip(' 127.0.0.1'))
|
||||
self.assertTrue(not is_valid_ip(''))
|
||||
self.assertTrue(not is_valid_ip(' '))
|
||||
self.assertTrue(not is_valid_ip('\n'))
|
||||
self.assertTrue(not is_valid_ip('\x00'))
|
||||
|
||||
|
||||
class TestPortAllocation(unittest.TestCase):
|
||||
def test_same_port_allocation(self):
|
||||
if 'TRAVIS' in os.environ:
|
||||
self.skipTest("dual-stack servers often have port conflicts on travis")
|
||||
sockets = bind_sockets(None, 'localhost')
|
||||
try:
|
||||
port = sockets[0].getsockname()[1]
|
||||
self.assertTrue(all(s.getsockname()[1] == port
|
||||
for s in sockets[1:]))
|
||||
finally:
|
||||
for sock in sockets:
|
||||
sock.close()
|
||||
|
|
|
|||
|
|
@ -19,8 +19,10 @@ from tornado.web import RequestHandler, Application
|
|||
|
||||
|
||||
def skip_if_twisted():
|
||||
if IOLoop.configured_class().__name__.endswith('TwistedIOLoop'):
|
||||
raise unittest.SkipTest("Process tests not compatible with TwistedIOLoop")
|
||||
if IOLoop.configured_class().__name__.endswith(('TwistedIOLoop',
|
||||
'AsyncIOMainLoop')):
|
||||
raise unittest.SkipTest("Process tests not compatible with "
|
||||
"TwistedIOLoop or AsyncIOMainLoop")
|
||||
|
||||
# Not using AsyncHTTPTestCase because we need control over the IOLoop.
|
||||
|
||||
|
|
@ -135,6 +137,14 @@ class ProcessTest(unittest.TestCase):
|
|||
@skipIfNonUnix
|
||||
class SubprocessTest(AsyncTestCase):
|
||||
def test_subprocess(self):
|
||||
if IOLoop.configured_class().__name__.endswith('LayeredTwistedIOLoop'):
|
||||
# This test fails non-deterministically with LayeredTwistedIOLoop.
|
||||
# (the read_until('\n') returns '\n' instead of 'hello\n')
|
||||
# This probably indicates a problem with either TornadoReactor
|
||||
# or TwistedIOLoop, but I haven't been able to track it down
|
||||
# and for now this is just causing spurious travis-ci failures.
|
||||
raise unittest.SkipTest("Subprocess tests not compatible with "
|
||||
"LayeredTwistedIOLoop")
|
||||
subproc = Subprocess([sys.executable, '-u', '-i'],
|
||||
stdin=Subprocess.STREAM,
|
||||
stdout=Subprocess.STREAM, stderr=subprocess.STDOUT,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,12 @@
|
|||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.netutil import ThreadedResolver
|
||||
from tornado.util import u
|
||||
|
||||
# When this module is imported, it runs getaddrinfo on a thread. Since
|
||||
# the hostname is unicode, getaddrinfo attempts to import encodings.idna
|
||||
# but blocks on the import lock. Verify that ThreadedResolver avoids
|
||||
# this deadlock.
|
||||
|
||||
resolver = ThreadedResolver()
|
||||
IOLoop.current().run_sync(lambda: resolver.resolve(u('localhost'), 80))
|
||||
|
|
@ -13,6 +13,11 @@ from tornado.netutil import Resolver
|
|||
from tornado.options import define, options, add_parse_callback
|
||||
from tornado.test.util import unittest
|
||||
|
||||
try:
|
||||
reduce # py2
|
||||
except NameError:
|
||||
from functools import reduce # py3
|
||||
|
||||
TEST_MODULES = [
|
||||
'tornado.httputil.doctests',
|
||||
'tornado.iostream.doctests',
|
||||
|
|
@ -35,6 +40,7 @@ TEST_MODULES = [
|
|||
'tornado.test.process_test',
|
||||
'tornado.test.simple_httpclient_test',
|
||||
'tornado.test.stack_context_test',
|
||||
'tornado.test.tcpclient_test',
|
||||
'tornado.test.template_test',
|
||||
'tornado.test.testing_test',
|
||||
'tornado.test.twisted_test',
|
||||
|
|
@ -60,7 +66,8 @@ class TornadoTextTestRunner(unittest.TextTestRunner):
|
|||
self.stream.write("\n")
|
||||
return result
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
def main():
|
||||
# The -W command-line option does not work in a virtualenv with
|
||||
# python 3 (as of virtualenv 1.7), so configure warnings
|
||||
# programmatically instead.
|
||||
|
|
@ -77,6 +84,9 @@ if __name__ == '__main__':
|
|||
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||
warnings.filterwarnings("error", category=DeprecationWarning,
|
||||
module=r"tornado\..*")
|
||||
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
|
||||
warnings.filterwarnings("error", category=PendingDeprecationWarning,
|
||||
module=r"tornado\..*")
|
||||
# The unittest module is aggressive about deprecating redundant methods,
|
||||
# leaving some without non-deprecated spellings that work on both
|
||||
# 2.7 and 3.2
|
||||
|
|
@ -86,7 +96,8 @@ if __name__ == '__main__':
|
|||
logging.getLogger("tornado.access").setLevel(logging.CRITICAL)
|
||||
|
||||
define('httpclient', type=str, default=None,
|
||||
callback=AsyncHTTPClient.configure)
|
||||
callback=lambda s: AsyncHTTPClient.configure(
|
||||
s, defaults=dict(allow_ipv6=False)))
|
||||
define('ioloop', type=str, default=None)
|
||||
define('ioloop_time_monotonic', default=False)
|
||||
define('resolver', type=str, default=None,
|
||||
|
|
@ -121,3 +132,6 @@ if __name__ == '__main__':
|
|||
kwargs['warnings'] = False
|
||||
kwargs['testRunner'] = TornadoTextTestRunner
|
||||
tornado.testing.main(**kwargs)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -10,16 +10,18 @@ import re
|
|||
import socket
|
||||
import sys
|
||||
|
||||
from tornado import gen
|
||||
from tornado.httpclient import AsyncHTTPClient
|
||||
from tornado.httputil import HTTPHeaders
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.log import gen_log
|
||||
from tornado.simple_httpclient import SimpleAsyncHTTPClient, _DEFAULT_CA_CERTS
|
||||
from tornado.log import gen_log, app_log
|
||||
from tornado.netutil import Resolver, bind_sockets
|
||||
from tornado.simple_httpclient import SimpleAsyncHTTPClient, _default_ca_certs
|
||||
from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler
|
||||
from tornado.test import httpclient_test
|
||||
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog
|
||||
from tornado.test.util import unittest, skipOnTravis
|
||||
from tornado.web import RequestHandler, Application, asynchronous, url
|
||||
from tornado.test.util import skipOnTravis, skipIfNoIPv6
|
||||
from tornado.web import RequestHandler, Application, asynchronous, url, stream_request_body
|
||||
|
||||
|
||||
class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
|
||||
|
|
@ -69,7 +71,8 @@ class OptionsHandler(RequestHandler):
|
|||
class NoContentHandler(RequestHandler):
|
||||
def get(self):
|
||||
if self.get_argument("error", None):
|
||||
self.set_header("Content-Length", "7")
|
||||
self.set_header("Content-Length", "5")
|
||||
self.write("hello")
|
||||
self.set_status(204)
|
||||
|
||||
|
||||
|
|
@ -93,6 +96,30 @@ class HostEchoHandler(RequestHandler):
|
|||
self.write(self.request.headers["Host"])
|
||||
|
||||
|
||||
class NoContentLengthHandler(RequestHandler):
|
||||
@gen.coroutine
|
||||
def get(self):
|
||||
# Emulate the old HTTP/1.0 behavior of returning a body with no
|
||||
# content-length. Tornado handles content-length at the framework
|
||||
# level so we have to go around it.
|
||||
stream = self.request.connection.stream
|
||||
yield stream.write(b"HTTP/1.0 200 OK\r\n\r\n"
|
||||
b"hello")
|
||||
stream.close()
|
||||
|
||||
|
||||
class EchoPostHandler(RequestHandler):
|
||||
def post(self):
|
||||
self.write(self.request.body)
|
||||
|
||||
|
||||
@stream_request_body
|
||||
class RespondInPrepareHandler(RequestHandler):
|
||||
def prepare(self):
|
||||
self.set_status(403)
|
||||
self.finish("forbidden")
|
||||
|
||||
|
||||
class SimpleHTTPClientTestMixin(object):
|
||||
def get_app(self):
|
||||
# callable objects to finish pending /trigger requests
|
||||
|
|
@ -111,6 +138,9 @@ class SimpleHTTPClientTestMixin(object):
|
|||
url("/see_other_post", SeeOtherPostHandler),
|
||||
url("/see_other_get", SeeOtherGetHandler),
|
||||
url("/host_echo", HostEchoHandler),
|
||||
url("/no_content_length", NoContentLengthHandler),
|
||||
url("/echo_post", EchoPostHandler),
|
||||
url("/respond_in_prepare", RespondInPrepareHandler),
|
||||
], gzip=True)
|
||||
|
||||
def test_singleton(self):
|
||||
|
|
@ -122,9 +152,9 @@ class SimpleHTTPClientTestMixin(object):
|
|||
SimpleAsyncHTTPClient(self.io_loop,
|
||||
force_instance=True))
|
||||
# different IOLoops use different objects
|
||||
io_loop2 = IOLoop()
|
||||
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
|
||||
SimpleAsyncHTTPClient(io_loop2))
|
||||
with closing(IOLoop()) as io_loop2:
|
||||
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
|
||||
SimpleAsyncHTTPClient(io_loop2))
|
||||
|
||||
def test_connection_limit(self):
|
||||
with closing(self.create_client(max_clients=2)) as client:
|
||||
|
|
@ -162,7 +192,7 @@ class SimpleHTTPClientTestMixin(object):
|
|||
response.rethrow()
|
||||
|
||||
def test_default_certificates_exist(self):
|
||||
open(_DEFAULT_CA_CERTS).close()
|
||||
open(_default_ca_certs()).close()
|
||||
|
||||
def test_gzip(self):
|
||||
# All the tests in this file should be using gzip, but this test
|
||||
|
|
@ -212,28 +242,30 @@ class SimpleHTTPClientTestMixin(object):
|
|||
# trigger the hanging request to let it clean up after itself
|
||||
self.triggers.popleft()()
|
||||
|
||||
@unittest.skipIf(not socket.has_ipv6, 'ipv6 support not present')
|
||||
@skipIfNoIPv6
|
||||
def test_ipv6(self):
|
||||
try:
|
||||
self.http_server.listen(self.get_http_port(), address='::1')
|
||||
[sock] = bind_sockets(None, '::1', family=socket.AF_INET6)
|
||||
port = sock.getsockname()[1]
|
||||
self.http_server.add_socket(sock)
|
||||
except socket.gaierror as e:
|
||||
if e.args[0] == socket.EAI_ADDRFAMILY:
|
||||
# python supports ipv6, but it's not configured on the network
|
||||
# interface, so skip this test.
|
||||
return
|
||||
raise
|
||||
url = self.get_url("/hello").replace("localhost", "[::1]")
|
||||
url = '%s://[::1]:%d/hello' % (self.get_protocol(), port)
|
||||
|
||||
# ipv6 is currently disabled by default and must be explicitly requested
|
||||
self.http_client.fetch(url, self.stop)
|
||||
# ipv6 is currently enabled by default but can be disabled
|
||||
self.http_client.fetch(url, self.stop, allow_ipv6=False)
|
||||
response = self.wait()
|
||||
self.assertEqual(response.code, 599)
|
||||
|
||||
self.http_client.fetch(url, self.stop, allow_ipv6=True)
|
||||
self.http_client.fetch(url, self.stop)
|
||||
response = self.wait()
|
||||
self.assertEqual(response.body, b"Hello world!")
|
||||
|
||||
def test_multiple_content_length_accepted(self):
|
||||
def xtest_multiple_content_length_accepted(self):
|
||||
response = self.fetch("/content_length?value=2,2")
|
||||
self.assertEqual(response.body, b"ok")
|
||||
response = self.fetch("/content_length?value=2,%202,2")
|
||||
|
|
@ -265,7 +297,8 @@ class SimpleHTTPClientTestMixin(object):
|
|||
self.assertEqual(response.headers["Content-length"], "0")
|
||||
|
||||
# 204 status with non-zero content length is malformed
|
||||
response = self.fetch("/no_content?error=1")
|
||||
with ExpectLog(app_log, "Uncaught exception"):
|
||||
response = self.fetch("/no_content?error=1")
|
||||
self.assertEqual(response.code, 599)
|
||||
|
||||
def test_host_header(self):
|
||||
|
|
@ -288,14 +321,86 @@ class SimpleHTTPClientTestMixin(object):
|
|||
|
||||
if sys.platform != 'cygwin':
|
||||
# cygwin returns EPERM instead of ECONNREFUSED here
|
||||
self.assertTrue(str(errno.ECONNREFUSED) in str(response.error),
|
||||
response.error)
|
||||
contains_errno = str(errno.ECONNREFUSED) in str(response.error)
|
||||
if not contains_errno and hasattr(errno, "WSAECONNREFUSED"):
|
||||
contains_errno = str(errno.WSAECONNREFUSED) in str(response.error)
|
||||
self.assertTrue(contains_errno, response.error)
|
||||
# This is usually "Connection refused".
|
||||
# On windows, strerror is broken and returns "Unknown error".
|
||||
expected_message = os.strerror(errno.ECONNREFUSED)
|
||||
self.assertTrue(expected_message in str(response.error),
|
||||
response.error)
|
||||
|
||||
def test_queue_timeout(self):
|
||||
with closing(self.create_client(max_clients=1)) as client:
|
||||
client.fetch(self.get_url('/trigger'), self.stop,
|
||||
request_timeout=10)
|
||||
# Wait for the trigger request to block, not complete.
|
||||
self.wait()
|
||||
client.fetch(self.get_url('/hello'), self.stop,
|
||||
connect_timeout=0.1)
|
||||
response = self.wait()
|
||||
|
||||
self.assertEqual(response.code, 599)
|
||||
self.assertTrue(response.request_time < 1, response.request_time)
|
||||
self.assertEqual(str(response.error), "HTTP 599: Timeout")
|
||||
self.triggers.popleft()()
|
||||
self.wait()
|
||||
|
||||
def test_no_content_length(self):
|
||||
response = self.fetch("/no_content_length")
|
||||
self.assertEquals(b"hello", response.body)
|
||||
|
||||
def sync_body_producer(self, write):
|
||||
write(b'1234')
|
||||
write(b'5678')
|
||||
|
||||
@gen.coroutine
|
||||
def async_body_producer(self, write):
|
||||
yield write(b'1234')
|
||||
yield gen.Task(IOLoop.current().add_callback)
|
||||
yield write(b'5678')
|
||||
|
||||
def test_sync_body_producer_chunked(self):
|
||||
response = self.fetch("/echo_post", method="POST",
|
||||
body_producer=self.sync_body_producer)
|
||||
response.rethrow()
|
||||
self.assertEqual(response.body, b"12345678")
|
||||
|
||||
def test_sync_body_producer_content_length(self):
|
||||
response = self.fetch("/echo_post", method="POST",
|
||||
body_producer=self.sync_body_producer,
|
||||
headers={'Content-Length': '8'})
|
||||
response.rethrow()
|
||||
self.assertEqual(response.body, b"12345678")
|
||||
|
||||
def test_async_body_producer_chunked(self):
|
||||
response = self.fetch("/echo_post", method="POST",
|
||||
body_producer=self.async_body_producer)
|
||||
response.rethrow()
|
||||
self.assertEqual(response.body, b"12345678")
|
||||
|
||||
def test_async_body_producer_content_length(self):
|
||||
response = self.fetch("/echo_post", method="POST",
|
||||
body_producer=self.async_body_producer,
|
||||
headers={'Content-Length': '8'})
|
||||
response.rethrow()
|
||||
self.assertEqual(response.body, b"12345678")
|
||||
|
||||
def test_100_continue(self):
|
||||
response = self.fetch("/echo_post", method="POST",
|
||||
body=b"1234",
|
||||
expect_100_continue=True)
|
||||
self.assertEqual(response.body, b"1234")
|
||||
|
||||
def test_100_continue_early_response(self):
|
||||
def body_producer(write):
|
||||
raise Exception("should not be called")
|
||||
response = self.fetch("/respond_in_prepare", method="POST",
|
||||
body_producer=body_producer,
|
||||
expect_100_continue=True)
|
||||
self.assertEqual(response.code, 403)
|
||||
|
||||
|
||||
class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase):
|
||||
def setUp(self):
|
||||
|
|
@ -396,3 +501,52 @@ class HostnameMappingTestCase(AsyncHTTPTestCase):
|
|||
response = self.wait()
|
||||
response.rethrow()
|
||||
self.assertEqual(response.body, b'Hello world!')
|
||||
|
||||
|
||||
class ResolveTimeoutTestCase(AsyncHTTPTestCase):
|
||||
def setUp(self):
|
||||
# Dummy Resolver subclass that never invokes its callback.
|
||||
class BadResolver(Resolver):
|
||||
def resolve(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
super(ResolveTimeoutTestCase, self).setUp()
|
||||
self.http_client = SimpleAsyncHTTPClient(
|
||||
self.io_loop,
|
||||
resolver=BadResolver())
|
||||
|
||||
def get_app(self):
|
||||
return Application([url("/hello", HelloWorldHandler), ])
|
||||
|
||||
def test_resolve_timeout(self):
|
||||
response = self.fetch('/hello', connect_timeout=0.1)
|
||||
self.assertEqual(response.code, 599)
|
||||
|
||||
|
||||
class MaxHeaderSizeTest(AsyncHTTPTestCase):
|
||||
def get_app(self):
|
||||
class SmallHeaders(RequestHandler):
|
||||
def get(self):
|
||||
self.set_header("X-Filler", "a" * 100)
|
||||
self.write("ok")
|
||||
|
||||
class LargeHeaders(RequestHandler):
|
||||
def get(self):
|
||||
self.set_header("X-Filler", "a" * 1000)
|
||||
self.write("ok")
|
||||
|
||||
return Application([('/small', SmallHeaders),
|
||||
('/large', LargeHeaders)])
|
||||
|
||||
def get_http_client(self):
|
||||
return SimpleAsyncHTTPClient(io_loop=self.io_loop, max_header_size=1024)
|
||||
|
||||
def test_small_headers(self):
|
||||
response = self.fetch('/small')
|
||||
response.rethrow()
|
||||
self.assertEqual(response.body, b'ok')
|
||||
|
||||
def test_large_headers(self):
|
||||
with ExpectLog(gen_log, "Unsatisfiable read"):
|
||||
response = self.fetch('/large')
|
||||
self.assertEqual(response.code, 599)
|
||||
|
|
|
|||
|
|
@ -35,11 +35,11 @@ class TestRequestHandler(RequestHandler):
|
|||
logging.debug('in part3()')
|
||||
raise Exception('test exception')
|
||||
|
||||
def get_error_html(self, status_code, **kwargs):
|
||||
if 'exception' in kwargs and str(kwargs['exception']) == 'test exception':
|
||||
return 'got expected exception'
|
||||
def write_error(self, status_code, **kwargs):
|
||||
if 'exc_info' in kwargs and str(kwargs['exc_info'][1]) == 'test exception':
|
||||
self.write('got expected exception')
|
||||
else:
|
||||
return 'unexpected failure'
|
||||
self.write('unexpected failure')
|
||||
|
||||
|
||||
class HTTPStackContextTest(AsyncHTTPTestCase):
|
||||
|
|
@ -219,16 +219,22 @@ class StackContextTest(AsyncTestCase):
|
|||
def test_yield_in_with(self):
|
||||
@gen.engine
|
||||
def f():
|
||||
self.callback = yield gen.Callback('a')
|
||||
with StackContext(functools.partial(self.context, 'c1')):
|
||||
# This yield is a problem: the generator will be suspended
|
||||
# and the StackContext's __exit__ is not called yet, so
|
||||
# the context will be left on _state.contexts for anything
|
||||
# that runs before the yield resolves.
|
||||
yield gen.Task(self.io_loop.add_callback)
|
||||
yield gen.Wait('a')
|
||||
|
||||
with self.assertRaises(StackContextInconsistentError):
|
||||
f()
|
||||
self.wait()
|
||||
# Cleanup: to avoid GC warnings (which for some reason only seem
|
||||
# to show up on py33-asyncio), invoke the callback (which will do
|
||||
# nothing since the gen.Runner is already finished) and delete it.
|
||||
self.callback()
|
||||
del self.callback
|
||||
|
||||
@gen_test
|
||||
def test_yield_outside_with(self):
|
||||
|
|
@ -256,12 +262,13 @@ class StackContextTest(AsyncTestCase):
|
|||
self.io_loop.add_callback(cb)
|
||||
yield gen.Wait('k1')
|
||||
|
||||
@gen_test
|
||||
def test_run_with_stack_context(self):
|
||||
@gen.coroutine
|
||||
def f1():
|
||||
self.assertEqual(self.active_contexts, ['c1'])
|
||||
yield run_with_stack_context(
|
||||
StackContext(functools.partial(self.context, 'c1')),
|
||||
StackContext(functools.partial(self.context, 'c2')),
|
||||
f2)
|
||||
self.assertEqual(self.active_contexts, ['c1'])
|
||||
|
||||
|
|
@ -272,7 +279,7 @@ class StackContextTest(AsyncTestCase):
|
|||
self.assertEqual(self.active_contexts, ['c1', 'c2'])
|
||||
|
||||
self.assertEqual(self.active_contexts, [])
|
||||
run_with_stack_context(
|
||||
yield run_with_stack_context(
|
||||
StackContext(functools.partial(self.context, 'c1')),
|
||||
f1)
|
||||
self.assertEqual(self.active_contexts, [])
|
||||
|
|
|
|||
|
|
@ -0,0 +1,278 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2014 Facebook
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
from contextlib import closing
|
||||
import os
|
||||
import socket
|
||||
|
||||
from tornado.concurrent import Future
|
||||
from tornado.netutil import bind_sockets, Resolver
|
||||
from tornado.tcpclient import TCPClient, _Connector
|
||||
from tornado.tcpserver import TCPServer
|
||||
from tornado.testing import AsyncTestCase, bind_unused_port, gen_test
|
||||
from tornado.test.util import skipIfNoIPv6, unittest
|
||||
|
||||
# Fake address families for testing. Used in place of AF_INET
|
||||
# and AF_INET6 because some installations do not have AF_INET6.
|
||||
AF1, AF2 = 1, 2
|
||||
|
||||
|
||||
class TestTCPServer(TCPServer):
|
||||
def __init__(self, family):
|
||||
super(TestTCPServer, self).__init__()
|
||||
self.streams = []
|
||||
sockets = bind_sockets(None, 'localhost', family)
|
||||
self.add_sockets(sockets)
|
||||
self.port = sockets[0].getsockname()[1]
|
||||
|
||||
def handle_stream(self, stream, address):
|
||||
self.streams.append(stream)
|
||||
|
||||
def stop(self):
|
||||
super(TestTCPServer, self).stop()
|
||||
for stream in self.streams:
|
||||
stream.close()
|
||||
|
||||
|
||||
class TCPClientTest(AsyncTestCase):
|
||||
def setUp(self):
|
||||
super(TCPClientTest, self).setUp()
|
||||
self.server = None
|
||||
self.client = TCPClient()
|
||||
|
||||
def start_server(self, family):
|
||||
if family == socket.AF_UNSPEC and 'TRAVIS' in os.environ:
|
||||
self.skipTest("dual-stack servers often have port conflicts on travis")
|
||||
self.server = TestTCPServer(family)
|
||||
return self.server.port
|
||||
|
||||
def stop_server(self):
|
||||
if self.server is not None:
|
||||
self.server.stop()
|
||||
self.server = None
|
||||
|
||||
def tearDown(self):
|
||||
self.client.close()
|
||||
self.stop_server()
|
||||
super(TCPClientTest, self).tearDown()
|
||||
|
||||
def skipIfLocalhostV4(self):
|
||||
Resolver().resolve('localhost', 0, callback=self.stop)
|
||||
addrinfo = self.wait()
|
||||
families = set(addr[0] for addr in addrinfo)
|
||||
if socket.AF_INET6 not in families:
|
||||
self.skipTest("localhost does not resolve to ipv6")
|
||||
|
||||
@gen_test
|
||||
def do_test_connect(self, family, host):
|
||||
port = self.start_server(family)
|
||||
stream = yield self.client.connect(host, port)
|
||||
with closing(stream):
|
||||
stream.write(b"hello")
|
||||
data = yield self.server.streams[0].read_bytes(5)
|
||||
self.assertEqual(data, b"hello")
|
||||
|
||||
def test_connect_ipv4_ipv4(self):
|
||||
self.do_test_connect(socket.AF_INET, '127.0.0.1')
|
||||
|
||||
def test_connect_ipv4_dual(self):
|
||||
self.do_test_connect(socket.AF_INET, 'localhost')
|
||||
|
||||
@skipIfNoIPv6
|
||||
def test_connect_ipv6_ipv6(self):
|
||||
self.skipIfLocalhostV4()
|
||||
self.do_test_connect(socket.AF_INET6, '::1')
|
||||
|
||||
@skipIfNoIPv6
|
||||
def test_connect_ipv6_dual(self):
|
||||
self.skipIfLocalhostV4()
|
||||
if Resolver.configured_class().__name__.endswith('TwistedResolver'):
|
||||
self.skipTest('TwistedResolver does not support multiple addresses')
|
||||
self.do_test_connect(socket.AF_INET6, 'localhost')
|
||||
|
||||
def test_connect_unspec_ipv4(self):
|
||||
self.do_test_connect(socket.AF_UNSPEC, '127.0.0.1')
|
||||
|
||||
@skipIfNoIPv6
|
||||
def test_connect_unspec_ipv6(self):
|
||||
self.skipIfLocalhostV4()
|
||||
self.do_test_connect(socket.AF_UNSPEC, '::1')
|
||||
|
||||
def test_connect_unspec_dual(self):
|
||||
self.do_test_connect(socket.AF_UNSPEC, 'localhost')
|
||||
|
||||
@gen_test
|
||||
def test_refused_ipv4(self):
|
||||
sock, port = bind_unused_port()
|
||||
sock.close()
|
||||
with self.assertRaises(IOError):
|
||||
yield self.client.connect('127.0.0.1', port)
|
||||
|
||||
|
||||
class TestConnectorSplit(unittest.TestCase):
|
||||
def test_one_family(self):
|
||||
# These addresses aren't in the right format, but split doesn't care.
|
||||
primary, secondary = _Connector.split(
|
||||
[(AF1, 'a'),
|
||||
(AF1, 'b')])
|
||||
self.assertEqual(primary, [(AF1, 'a'),
|
||||
(AF1, 'b')])
|
||||
self.assertEqual(secondary, [])
|
||||
|
||||
def test_mixed(self):
|
||||
primary, secondary = _Connector.split(
|
||||
[(AF1, 'a'),
|
||||
(AF2, 'b'),
|
||||
(AF1, 'c'),
|
||||
(AF2, 'd')])
|
||||
self.assertEqual(primary, [(AF1, 'a'), (AF1, 'c')])
|
||||
self.assertEqual(secondary, [(AF2, 'b'), (AF2, 'd')])
|
||||
|
||||
|
||||
class ConnectorTest(AsyncTestCase):
|
||||
class FakeStream(object):
|
||||
def __init__(self):
|
||||
self.closed = False
|
||||
|
||||
def close(self):
|
||||
self.closed = True
|
||||
|
||||
def setUp(self):
|
||||
super(ConnectorTest, self).setUp()
|
||||
self.connect_futures = {}
|
||||
self.streams = {}
|
||||
self.addrinfo = [(AF1, 'a'), (AF1, 'b'),
|
||||
(AF2, 'c'), (AF2, 'd')]
|
||||
|
||||
def tearDown(self):
|
||||
# Unless explicitly checked (and popped) in the test, we shouldn't
|
||||
# be closing any streams
|
||||
for stream in self.streams.values():
|
||||
self.assertFalse(stream.closed)
|
||||
super(ConnectorTest, self).tearDown()
|
||||
|
||||
def create_stream(self, af, addr):
|
||||
future = Future()
|
||||
self.connect_futures[(af, addr)] = future
|
||||
return future
|
||||
|
||||
def assert_pending(self, *keys):
|
||||
self.assertEqual(sorted(self.connect_futures.keys()), sorted(keys))
|
||||
|
||||
def resolve_connect(self, af, addr, success):
|
||||
future = self.connect_futures.pop((af, addr))
|
||||
if success:
|
||||
self.streams[addr] = ConnectorTest.FakeStream()
|
||||
future.set_result(self.streams[addr])
|
||||
else:
|
||||
future.set_exception(IOError())
|
||||
|
||||
def start_connect(self, addrinfo):
|
||||
conn = _Connector(addrinfo, self.io_loop, self.create_stream)
|
||||
# Give it a huge timeout; we'll trigger timeouts manually.
|
||||
future = conn.start(3600)
|
||||
return conn, future
|
||||
|
||||
def test_immediate_success(self):
|
||||
conn, future = self.start_connect(self.addrinfo)
|
||||
self.assertEqual(list(self.connect_futures.keys()),
|
||||
[(AF1, 'a')])
|
||||
self.resolve_connect(AF1, 'a', True)
|
||||
self.assertEqual(future.result(), (AF1, 'a', self.streams['a']))
|
||||
|
||||
def test_immediate_failure(self):
|
||||
# Fail with just one address.
|
||||
conn, future = self.start_connect([(AF1, 'a')])
|
||||
self.assert_pending((AF1, 'a'))
|
||||
self.resolve_connect(AF1, 'a', False)
|
||||
self.assertRaises(IOError, future.result)
|
||||
|
||||
def test_one_family_second_try(self):
|
||||
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
|
||||
self.assert_pending((AF1, 'a'))
|
||||
self.resolve_connect(AF1, 'a', False)
|
||||
self.assert_pending((AF1, 'b'))
|
||||
self.resolve_connect(AF1, 'b', True)
|
||||
self.assertEqual(future.result(), (AF1, 'b', self.streams['b']))
|
||||
|
||||
def test_one_family_second_try_failure(self):
|
||||
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
|
||||
self.assert_pending((AF1, 'a'))
|
||||
self.resolve_connect(AF1, 'a', False)
|
||||
self.assert_pending((AF1, 'b'))
|
||||
self.resolve_connect(AF1, 'b', False)
|
||||
self.assertRaises(IOError, future.result)
|
||||
|
||||
def test_one_family_second_try_timeout(self):
|
||||
conn, future = self.start_connect([(AF1, 'a'), (AF1, 'b')])
|
||||
self.assert_pending((AF1, 'a'))
|
||||
# trigger the timeout while the first lookup is pending;
|
||||
# nothing happens.
|
||||
conn.on_timeout()
|
||||
self.assert_pending((AF1, 'a'))
|
||||
self.resolve_connect(AF1, 'a', False)
|
||||
self.assert_pending((AF1, 'b'))
|
||||
self.resolve_connect(AF1, 'b', True)
|
||||
self.assertEqual(future.result(), (AF1, 'b', self.streams['b']))
|
||||
|
||||
def test_two_families_immediate_failure(self):
|
||||
conn, future = self.start_connect(self.addrinfo)
|
||||
self.assert_pending((AF1, 'a'))
|
||||
self.resolve_connect(AF1, 'a', False)
|
||||
self.assert_pending((AF1, 'b'), (AF2, 'c'))
|
||||
self.resolve_connect(AF1, 'b', False)
|
||||
self.resolve_connect(AF2, 'c', True)
|
||||
self.assertEqual(future.result(), (AF2, 'c', self.streams['c']))
|
||||
|
||||
def test_two_families_timeout(self):
|
||||
conn, future = self.start_connect(self.addrinfo)
|
||||
self.assert_pending((AF1, 'a'))
|
||||
conn.on_timeout()
|
||||
self.assert_pending((AF1, 'a'), (AF2, 'c'))
|
||||
self.resolve_connect(AF2, 'c', True)
|
||||
self.assertEqual(future.result(), (AF2, 'c', self.streams['c']))
|
||||
# resolving 'a' after the connection has completed doesn't start 'b'
|
||||
self.resolve_connect(AF1, 'a', False)
|
||||
self.assert_pending()
|
||||
|
||||
def test_success_after_timeout(self):
|
||||
conn, future = self.start_connect(self.addrinfo)
|
||||
self.assert_pending((AF1, 'a'))
|
||||
conn.on_timeout()
|
||||
self.assert_pending((AF1, 'a'), (AF2, 'c'))
|
||||
self.resolve_connect(AF1, 'a', True)
|
||||
self.assertEqual(future.result(), (AF1, 'a', self.streams['a']))
|
||||
# resolving 'c' after completion closes the connection.
|
||||
self.resolve_connect(AF2, 'c', True)
|
||||
self.assertTrue(self.streams.pop('c').closed)
|
||||
|
||||
def test_all_fail(self):
|
||||
conn, future = self.start_connect(self.addrinfo)
|
||||
self.assert_pending((AF1, 'a'))
|
||||
conn.on_timeout()
|
||||
self.assert_pending((AF1, 'a'), (AF2, 'c'))
|
||||
self.resolve_connect(AF2, 'c', False)
|
||||
self.assert_pending((AF1, 'a'), (AF2, 'd'))
|
||||
self.resolve_connect(AF2, 'd', False)
|
||||
# one queue is now empty
|
||||
self.assert_pending((AF1, 'a'))
|
||||
self.resolve_connect(AF1, 'a', False)
|
||||
self.assert_pending((AF1, 'b'))
|
||||
self.assertFalse(future.done())
|
||||
self.resolve_connect(AF1, 'b', False)
|
||||
self.assertRaises(IOError, future.result)
|
||||
|
|
@ -182,6 +182,7 @@ three
|
|||
"""})
|
||||
try:
|
||||
loader.load("test.html").generate()
|
||||
self.fail("did not get expected exception")
|
||||
except ZeroDivisionError:
|
||||
self.assertTrue("# test.html:2" in traceback.format_exc())
|
||||
|
||||
|
|
@ -192,6 +193,7 @@ three{%end%}
|
|||
"""})
|
||||
try:
|
||||
loader.load("test.html").generate()
|
||||
self.fail("did not get expected exception")
|
||||
except ZeroDivisionError:
|
||||
self.assertTrue("# test.html:2" in traceback.format_exc())
|
||||
|
||||
|
|
@ -202,6 +204,7 @@ three{%end%}
|
|||
}, namespace={"_tt_modules": ObjectDict({"Template": lambda path, **kwargs: loader.load(path).generate(**kwargs)})})
|
||||
try:
|
||||
loader.load("base.html").generate()
|
||||
self.fail("did not get expected exception")
|
||||
except ZeroDivisionError:
|
||||
exc_stack = traceback.format_exc()
|
||||
self.assertTrue('# base.html:1' in exc_stack)
|
||||
|
|
@ -214,6 +217,7 @@ three{%end%}
|
|||
})
|
||||
try:
|
||||
loader.load("base.html").generate()
|
||||
self.fail("did not get expected exception")
|
||||
except ZeroDivisionError:
|
||||
self.assertTrue("# sub.html:1 (via base.html:1)" in
|
||||
traceback.format_exc())
|
||||
|
|
@ -225,6 +229,7 @@ three{%end%}
|
|||
})
|
||||
try:
|
||||
loader.load("sub.html").generate()
|
||||
self.fail("did not get expected exception")
|
||||
except ZeroDivisionError:
|
||||
exc_stack = traceback.format_exc()
|
||||
self.assertTrue("# base.html:1" in exc_stack)
|
||||
|
|
@ -240,6 +245,7 @@ three{%end%}
|
|||
"""})
|
||||
try:
|
||||
loader.load("sub.html").generate()
|
||||
self.fail("did not get expected exception")
|
||||
except ZeroDivisionError:
|
||||
self.assertTrue("# sub.html:4 (via base.html:1)" in
|
||||
traceback.format_exc())
|
||||
|
|
@ -252,6 +258,7 @@ three{%end%}
|
|||
})
|
||||
try:
|
||||
loader.load("a.html").generate()
|
||||
self.fail("did not get expected exception")
|
||||
except ZeroDivisionError:
|
||||
self.assertTrue("# c.html:1 (via b.html:1, a.html:1)" in
|
||||
traceback.format_exc())
|
||||
|
|
@ -380,6 +387,20 @@ raw: {% raw name %}""",
|
|||
self.assertEqual(render("foo.py", ["not a string"]),
|
||||
b"""s = "['not a string']"\n""")
|
||||
|
||||
def test_minimize_whitespace(self):
|
||||
# Whitespace including newlines is allowed within template tags
|
||||
# and directives, and this is one way to avoid long lines while
|
||||
# keeping extra whitespace out of the rendered output.
|
||||
loader = DictLoader({'foo.txt': """\
|
||||
{% for i in items
|
||||
%}{% if i > 0 %}, {% end %}{#
|
||||
#}{{i
|
||||
}}{% end
|
||||
%}""",
|
||||
})
|
||||
self.assertEqual(loader.load("foo.txt").generate(items=range(5)),
|
||||
b"0, 1, 2, 3, 4")
|
||||
|
||||
|
||||
class TemplateLoaderTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
|
|
|
|||
|
|
@ -8,11 +8,12 @@ from tornado.test.util import unittest
|
|||
|
||||
import contextlib
|
||||
import os
|
||||
import traceback
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_environ(name, value):
|
||||
old_value = os.environ.get('name')
|
||||
old_value = os.environ.get(name)
|
||||
os.environ[name] = value
|
||||
|
||||
try:
|
||||
|
|
@ -62,6 +63,39 @@ class AsyncTestCaseTest(AsyncTestCase):
|
|||
self.wait(timeout=0.15)
|
||||
|
||||
|
||||
class AsyncTestCaseWrapperTest(unittest.TestCase):
|
||||
def test_undecorated_generator(self):
|
||||
class Test(AsyncTestCase):
|
||||
def test_gen(self):
|
||||
yield
|
||||
test = Test('test_gen')
|
||||
result = unittest.TestResult()
|
||||
test.run(result)
|
||||
self.assertEqual(len(result.errors), 1)
|
||||
self.assertIn("should be decorated", result.errors[0][1])
|
||||
|
||||
def test_undecorated_generator_with_skip(self):
|
||||
class Test(AsyncTestCase):
|
||||
@unittest.skip("don't run this")
|
||||
def test_gen(self):
|
||||
yield
|
||||
test = Test('test_gen')
|
||||
result = unittest.TestResult()
|
||||
test.run(result)
|
||||
self.assertEqual(len(result.errors), 0)
|
||||
self.assertEqual(len(result.skipped), 1)
|
||||
|
||||
def test_other_return(self):
|
||||
class Test(AsyncTestCase):
|
||||
def test_other_return(self):
|
||||
return 42
|
||||
test = Test('test_other_return')
|
||||
result = unittest.TestResult()
|
||||
test.run(result)
|
||||
self.assertEqual(len(result.errors), 1)
|
||||
self.assertIn("Return value from test method ignored", result.errors[0][1])
|
||||
|
||||
|
||||
class SetUpTearDownTest(unittest.TestCase):
|
||||
def test_set_up_tear_down(self):
|
||||
"""
|
||||
|
|
@ -115,8 +149,17 @@ class GenTest(AsyncTestCase):
|
|||
def test(self):
|
||||
yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)
|
||||
|
||||
with self.assertRaises(ioloop.TimeoutError):
|
||||
# This can't use assertRaises because we need to inspect the
|
||||
# exc_info triple (and not just the exception object)
|
||||
try:
|
||||
test(self)
|
||||
self.fail("did not get expected exception")
|
||||
except ioloop.TimeoutError:
|
||||
# The stack trace should blame the add_timeout line, not just
|
||||
# unrelated IOLoop/testing internals.
|
||||
self.assertIn(
|
||||
"gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)",
|
||||
traceback.format_exc())
|
||||
|
||||
self.finished = True
|
||||
|
||||
|
|
@ -155,5 +198,23 @@ class GenTest(AsyncTestCase):
|
|||
|
||||
self.finished = True
|
||||
|
||||
def test_with_method_args(self):
|
||||
@gen_test
|
||||
def test_with_args(self, *args):
|
||||
self.assertEqual(args, ('test',))
|
||||
yield gen.Task(self.io_loop.add_callback)
|
||||
|
||||
test_with_args(self, 'test')
|
||||
self.finished = True
|
||||
|
||||
def test_with_method_kwargs(self):
|
||||
@gen_test
|
||||
def test_with_kwargs(self, **kwargs):
|
||||
self.assertDictEqual(kwargs, {'test': 'test'})
|
||||
yield gen.Task(self.io_loop.add_callback)
|
||||
|
||||
test_with_kwargs(self, test='test')
|
||||
self.finished = True
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
|||
|
|
@ -470,14 +470,17 @@ if have_twisted:
|
|||
'twisted.internet.test.test_core.ObjectModelIntegrationTest': [],
|
||||
'twisted.internet.test.test_core.SystemEventTestsBuilder': [
|
||||
'test_iterate', # deliberately not supported
|
||||
'test_runAfterCrash', # fails because TwistedIOLoop uses the global reactor
|
||||
] if issubclass(IOLoop.configured_class(), TwistedIOLoop) else [
|
||||
'test_iterate', # deliberately not supported
|
||||
# Fails on TwistedIOLoop and AsyncIOLoop.
|
||||
'test_runAfterCrash',
|
||||
],
|
||||
'twisted.internet.test.test_fdset.ReactorFDSetTestsBuilder': [
|
||||
"test_lostFileDescriptor", # incompatible with epoll and kqueue
|
||||
],
|
||||
'twisted.internet.test.test_process.ProcessTestsBuilder': [
|
||||
# Only work as root. Twisted's "skip" functionality works
|
||||
# with py27+, but not unittest2 on py26.
|
||||
'test_changeGID',
|
||||
'test_changeUID',
|
||||
],
|
||||
# Process tests appear to work on OSX 10.7, but not 10.6
|
||||
#'twisted.internet.test.test_process.PTYProcessTestsBuilder': [
|
||||
|
|
|
|||
|
|
@ -1,14 +1,18 @@
|
|||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
|
||||
# Encapsulate the choice of unittest or unittest2 here.
|
||||
# To be used as 'from tornado.test.util import unittest'.
|
||||
if sys.version_info >= (2, 7):
|
||||
import unittest
|
||||
else:
|
||||
if sys.version_info < (2, 7):
|
||||
# In py26, we must always use unittest2.
|
||||
import unittest2 as unittest
|
||||
else:
|
||||
# Otherwise, use whichever version of unittest was imported in
|
||||
# tornado.testing.
|
||||
from tornado.testing import unittest
|
||||
|
||||
skipIfNonUnix = unittest.skipIf(os.name != 'posix' or sys.platform == 'cygwin',
|
||||
"non-unix platform")
|
||||
|
|
@ -17,3 +21,10 @@ skipIfNonUnix = unittest.skipIf(os.name != 'posix' or sys.platform == 'cygwin',
|
|||
# timing-related tests unreliable.
|
||||
skipOnTravis = unittest.skipIf('TRAVIS' in os.environ,
|
||||
'timing tests unreliable on travis')
|
||||
|
||||
# Set the environment variable NO_NETWORK=1 to disable any tests that
|
||||
# depend on an external network.
|
||||
skipIfNoNetwork = unittest.skipIf('NO_NETWORK' in os.environ,
|
||||
'network access disabled')
|
||||
|
||||
skipIfNoIPv6 = unittest.skipIf(not socket.has_ipv6, 'ipv6 support not present')
|
||||
|
|
|
|||
|
|
@ -151,14 +151,22 @@ class ArgReplacerTest(unittest.TestCase):
|
|||
self.replacer = ArgReplacer(function, 'callback')
|
||||
|
||||
def test_omitted(self):
|
||||
self.assertEqual(self.replacer.replace('new', (1, 2), dict()),
|
||||
args = (1, 2)
|
||||
kwargs = dict()
|
||||
self.assertIs(self.replacer.get_old_value(args, kwargs), None)
|
||||
self.assertEqual(self.replacer.replace('new', args, kwargs),
|
||||
(None, (1, 2), dict(callback='new')))
|
||||
|
||||
def test_position(self):
|
||||
self.assertEqual(self.replacer.replace('new', (1, 2, 'old', 3), dict()),
|
||||
args = (1, 2, 'old', 3)
|
||||
kwargs = dict()
|
||||
self.assertEqual(self.replacer.get_old_value(args, kwargs), 'old')
|
||||
self.assertEqual(self.replacer.replace('new', args, kwargs),
|
||||
('old', [1, 2, 'new', 3], dict()))
|
||||
|
||||
def test_keyword(self):
|
||||
self.assertEqual(self.replacer.replace('new', (1,),
|
||||
dict(y=2, callback='old', z=3)),
|
||||
args = (1,)
|
||||
kwargs = dict(y=2, callback='old', z=3)
|
||||
self.assertEqual(self.replacer.get_old_value(args, kwargs), 'old')
|
||||
self.assertEqual(self.replacer.replace('new', args, kwargs),
|
||||
('old', (1,), dict(y=2, callback='new', z=3)))
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,21 +1,66 @@
|
|||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
import traceback
|
||||
|
||||
from tornado.concurrent import Future
|
||||
from tornado import gen
|
||||
from tornado.httpclient import HTTPError
|
||||
from tornado.log import gen_log
|
||||
from tornado.httpclient import HTTPError, HTTPRequest
|
||||
from tornado.log import gen_log, app_log
|
||||
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
|
||||
from tornado.test.util import unittest
|
||||
from tornado.web import Application, RequestHandler
|
||||
from tornado.util import u
|
||||
|
||||
try:
|
||||
import tornado.websocket
|
||||
from tornado.util import _websocket_mask_python
|
||||
except ImportError:
|
||||
# The unittest module presents misleading errors on ImportError
|
||||
# (it acts as if websocket_test could not be found, hiding the underlying
|
||||
# error). If we get an ImportError here (which could happen due to
|
||||
# TORNADO_EXTENSION=1), print some extra information before failing.
|
||||
traceback.print_exc()
|
||||
raise
|
||||
|
||||
from tornado.websocket import WebSocketHandler, websocket_connect, WebSocketError
|
||||
|
||||
try:
|
||||
from tornado import speedups
|
||||
except ImportError:
|
||||
speedups = None
|
||||
|
||||
class EchoHandler(WebSocketHandler):
|
||||
|
||||
class TestWebSocketHandler(WebSocketHandler):
|
||||
"""Base class for testing handlers that exposes the on_close event.
|
||||
|
||||
This allows for deterministic cleanup of the associated socket.
|
||||
"""
|
||||
def initialize(self, close_future):
|
||||
self.close_future = close_future
|
||||
|
||||
def on_close(self):
|
||||
self.close_future.set_result((self.close_code, self.close_reason))
|
||||
|
||||
|
||||
class EchoHandler(TestWebSocketHandler):
|
||||
def on_message(self, message):
|
||||
self.write_message(message, isinstance(message, bytes))
|
||||
|
||||
def on_close(self):
|
||||
self.close_future.set_result(None)
|
||||
|
||||
class ErrorInOnMessageHandler(TestWebSocketHandler):
|
||||
def on_message(self, message):
|
||||
1/0
|
||||
|
||||
|
||||
class HeaderHandler(TestWebSocketHandler):
|
||||
def open(self):
|
||||
try:
|
||||
# In a websocket context, many RequestHandler methods
|
||||
# raise RuntimeErrors.
|
||||
self.set_status(503)
|
||||
raise Exception("did not get expected exception")
|
||||
except RuntimeError:
|
||||
pass
|
||||
self.write_message(self.request.headers.get('X-Test', ''))
|
||||
|
||||
|
||||
class NonWebSocketHandler(RequestHandler):
|
||||
|
|
@ -23,14 +68,29 @@ class NonWebSocketHandler(RequestHandler):
|
|||
self.write('ok')
|
||||
|
||||
|
||||
class CloseReasonHandler(TestWebSocketHandler):
|
||||
def open(self):
|
||||
self.close(1001, "goodbye")
|
||||
|
||||
|
||||
class WebSocketTest(AsyncHTTPTestCase):
|
||||
def get_app(self):
|
||||
self.close_future = Future()
|
||||
return Application([
|
||||
('/echo', EchoHandler, dict(close_future=self.close_future)),
|
||||
('/non_ws', NonWebSocketHandler),
|
||||
('/header', HeaderHandler, dict(close_future=self.close_future)),
|
||||
('/close_reason', CloseReasonHandler,
|
||||
dict(close_future=self.close_future)),
|
||||
('/error_in_on_message', ErrorInOnMessageHandler,
|
||||
dict(close_future=self.close_future)),
|
||||
])
|
||||
|
||||
def test_http_request(self):
|
||||
# WS server, HTTP client.
|
||||
response = self.fetch('/echo')
|
||||
self.assertEqual(response.code, 400)
|
||||
|
||||
@gen_test
|
||||
def test_websocket_gen(self):
|
||||
ws = yield websocket_connect(
|
||||
|
|
@ -39,6 +99,8 @@ class WebSocketTest(AsyncHTTPTestCase):
|
|||
ws.write_message('hello')
|
||||
response = yield ws.read_message()
|
||||
self.assertEqual(response, 'hello')
|
||||
ws.close()
|
||||
yield self.close_future
|
||||
|
||||
def test_websocket_callbacks(self):
|
||||
websocket_connect(
|
||||
|
|
@ -49,6 +111,40 @@ class WebSocketTest(AsyncHTTPTestCase):
|
|||
ws.read_message(self.stop)
|
||||
response = self.wait().result()
|
||||
self.assertEqual(response, 'hello')
|
||||
self.close_future.add_done_callback(lambda f: self.stop())
|
||||
ws.close()
|
||||
self.wait()
|
||||
|
||||
@gen_test
|
||||
def test_binary_message(self):
|
||||
ws = yield websocket_connect(
|
||||
'ws://localhost:%d/echo' % self.get_http_port())
|
||||
ws.write_message(b'hello \xe9', binary=True)
|
||||
response = yield ws.read_message()
|
||||
self.assertEqual(response, b'hello \xe9')
|
||||
ws.close()
|
||||
yield self.close_future
|
||||
|
||||
@gen_test
|
||||
def test_unicode_message(self):
|
||||
ws = yield websocket_connect(
|
||||
'ws://localhost:%d/echo' % self.get_http_port())
|
||||
ws.write_message(u('hello \u00e9'))
|
||||
response = yield ws.read_message()
|
||||
self.assertEqual(response, u('hello \u00e9'))
|
||||
ws.close()
|
||||
yield self.close_future
|
||||
|
||||
@gen_test
|
||||
def test_error_in_on_message(self):
|
||||
ws = yield websocket_connect(
|
||||
'ws://localhost:%d/error_in_on_message' % self.get_http_port())
|
||||
ws.write_message('hello')
|
||||
with ExpectLog(app_log, "Uncaught exception"):
|
||||
response = yield ws.read_message()
|
||||
self.assertIs(response, None)
|
||||
ws.close()
|
||||
yield self.close_future
|
||||
|
||||
@gen_test
|
||||
def test_websocket_http_fail(self):
|
||||
|
|
@ -69,13 +165,12 @@ class WebSocketTest(AsyncHTTPTestCase):
|
|||
def test_websocket_network_fail(self):
|
||||
sock, port = bind_unused_port()
|
||||
sock.close()
|
||||
with self.assertRaises(HTTPError) as cm:
|
||||
with self.assertRaises(IOError):
|
||||
with ExpectLog(gen_log, ".*"):
|
||||
yield websocket_connect(
|
||||
'ws://localhost:%d/' % port,
|
||||
io_loop=self.io_loop,
|
||||
connect_timeout=0.01)
|
||||
self.assertEqual(cm.exception.code, 599)
|
||||
connect_timeout=3600)
|
||||
|
||||
@gen_test
|
||||
def test_websocket_close_buffered_data(self):
|
||||
|
|
@ -85,3 +180,134 @@ class WebSocketTest(AsyncHTTPTestCase):
|
|||
ws.write_message('world')
|
||||
ws.stream.close()
|
||||
yield self.close_future
|
||||
|
||||
@gen_test
|
||||
def test_websocket_headers(self):
|
||||
# Ensure that arbitrary headers can be passed through websocket_connect.
|
||||
ws = yield websocket_connect(
|
||||
HTTPRequest('ws://localhost:%d/header' % self.get_http_port(),
|
||||
headers={'X-Test': 'hello'}))
|
||||
response = yield ws.read_message()
|
||||
self.assertEqual(response, 'hello')
|
||||
ws.close()
|
||||
yield self.close_future
|
||||
|
||||
@gen_test
|
||||
def test_server_close_reason(self):
|
||||
ws = yield websocket_connect(
|
||||
'ws://localhost:%d/close_reason' % self.get_http_port())
|
||||
msg = yield ws.read_message()
|
||||
# A message of None means the other side closed the connection.
|
||||
self.assertIs(msg, None)
|
||||
self.assertEqual(ws.close_code, 1001)
|
||||
self.assertEqual(ws.close_reason, "goodbye")
|
||||
|
||||
@gen_test
|
||||
def test_client_close_reason(self):
|
||||
ws = yield websocket_connect(
|
||||
'ws://localhost:%d/echo' % self.get_http_port())
|
||||
ws.close(1001, 'goodbye')
|
||||
code, reason = yield self.close_future
|
||||
self.assertEqual(code, 1001)
|
||||
self.assertEqual(reason, 'goodbye')
|
||||
|
||||
@gen_test
|
||||
def test_check_origin_valid_no_path(self):
|
||||
port = self.get_http_port()
|
||||
|
||||
url = 'ws://localhost:%d/echo' % port
|
||||
headers = {'Origin': 'http://localhost:%d' % port}
|
||||
|
||||
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
|
||||
io_loop=self.io_loop)
|
||||
ws.write_message('hello')
|
||||
response = yield ws.read_message()
|
||||
self.assertEqual(response, 'hello')
|
||||
ws.close()
|
||||
yield self.close_future
|
||||
|
||||
@gen_test
|
||||
def test_check_origin_valid_with_path(self):
|
||||
port = self.get_http_port()
|
||||
|
||||
url = 'ws://localhost:%d/echo' % port
|
||||
headers = {'Origin': 'http://localhost:%d/something' % port}
|
||||
|
||||
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
|
||||
io_loop=self.io_loop)
|
||||
ws.write_message('hello')
|
||||
response = yield ws.read_message()
|
||||
self.assertEqual(response, 'hello')
|
||||
ws.close()
|
||||
yield self.close_future
|
||||
|
||||
@gen_test
|
||||
def test_check_origin_invalid_partial_url(self):
|
||||
port = self.get_http_port()
|
||||
|
||||
url = 'ws://localhost:%d/echo' % port
|
||||
headers = {'Origin': 'localhost:%d' % port}
|
||||
|
||||
with self.assertRaises(HTTPError) as cm:
|
||||
yield websocket_connect(HTTPRequest(url, headers=headers),
|
||||
io_loop=self.io_loop)
|
||||
self.assertEqual(cm.exception.code, 403)
|
||||
|
||||
@gen_test
|
||||
def test_check_origin_invalid(self):
|
||||
port = self.get_http_port()
|
||||
|
||||
url = 'ws://localhost:%d/echo' % port
|
||||
# Host is localhost, which should not be accessible from some other
|
||||
# domain
|
||||
headers = {'Origin': 'http://somewhereelse.com'}
|
||||
|
||||
with self.assertRaises(HTTPError) as cm:
|
||||
yield websocket_connect(HTTPRequest(url, headers=headers),
|
||||
io_loop=self.io_loop)
|
||||
|
||||
self.assertEqual(cm.exception.code, 403)
|
||||
|
||||
@gen_test
|
||||
def test_check_origin_invalid_subdomains(self):
|
||||
port = self.get_http_port()
|
||||
|
||||
url = 'ws://localhost:%d/echo' % port
|
||||
# Subdomains should be disallowed by default. If we could pass a
|
||||
# resolver to websocket_connect we could test sibling domains as well.
|
||||
headers = {'Origin': 'http://subtenant.localhost'}
|
||||
|
||||
with self.assertRaises(HTTPError) as cm:
|
||||
yield websocket_connect(HTTPRequest(url, headers=headers),
|
||||
io_loop=self.io_loop)
|
||||
|
||||
self.assertEqual(cm.exception.code, 403)
|
||||
|
||||
|
||||
class MaskFunctionMixin(object):
|
||||
# Subclasses should define self.mask(mask, data)
|
||||
def test_mask(self):
|
||||
self.assertEqual(self.mask(b'abcd', b''), b'')
|
||||
self.assertEqual(self.mask(b'abcd', b'b'), b'\x03')
|
||||
self.assertEqual(self.mask(b'abcd', b'54321'), b'TVPVP')
|
||||
self.assertEqual(self.mask(b'ZXCV', b'98765432'), b'c`t`olpd')
|
||||
# Include test cases with \x00 bytes (to ensure that the C
|
||||
# extension isn't depending on null-terminated strings) and
|
||||
# bytes with the high bit set (to smoke out signedness issues).
|
||||
self.assertEqual(self.mask(b'\x00\x01\x02\x03',
|
||||
b'\xff\xfb\xfd\xfc\xfe\xfa'),
|
||||
b'\xff\xfa\xff\xff\xfe\xfb')
|
||||
self.assertEqual(self.mask(b'\xff\xfb\xfd\xfc',
|
||||
b'\x00\x01\x02\x03\x04\x05'),
|
||||
b'\xff\xfa\xff\xff\xfb\xfe')
|
||||
|
||||
|
||||
class PythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
|
||||
def mask(self, mask, data):
|
||||
return _websocket_mask_python(mask, data)
|
||||
|
||||
|
||||
@unittest.skipIf(speedups is None, "tornado.speedups module not present")
|
||||
class CythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
|
||||
def mask(self, mask, data):
|
||||
return speedups.websocket_mask(mask, data)
|
||||
|
|
|
|||
|
|
@ -5,8 +5,8 @@ from tornado.escape import json_decode
|
|||
from tornado.test.httpserver_test import TypeCheckHandler
|
||||
from tornado.testing import AsyncHTTPTestCase
|
||||
from tornado.util import u
|
||||
from tornado.web import RequestHandler
|
||||
from tornado.wsgi import WSGIApplication, WSGIContainer
|
||||
from tornado.web import RequestHandler, Application
|
||||
from tornado.wsgi import WSGIApplication, WSGIContainer, WSGIAdapter
|
||||
|
||||
|
||||
class WSGIContainerTest(AsyncHTTPTestCase):
|
||||
|
|
@ -74,14 +74,27 @@ class WSGIConnectionTest(httpserver_test.HTTPConnectionTest):
|
|||
return WSGIContainer(validator(WSGIApplication(self.get_handlers())))
|
||||
|
||||
|
||||
def wrap_web_tests():
|
||||
def wrap_web_tests_application():
|
||||
result = {}
|
||||
for cls in web_test.wsgi_safe_tests:
|
||||
class WSGIWrappedTest(cls):
|
||||
class WSGIApplicationWrappedTest(cls):
|
||||
def get_app(self):
|
||||
self.app = WSGIApplication(self.get_handlers(),
|
||||
**self.get_app_kwargs())
|
||||
return WSGIContainer(validator(self.app))
|
||||
result["WSGIWrapped_" + cls.__name__] = WSGIWrappedTest
|
||||
result["WSGIApplication_" + cls.__name__] = WSGIApplicationWrappedTest
|
||||
return result
|
||||
globals().update(wrap_web_tests())
|
||||
globals().update(wrap_web_tests_application())
|
||||
|
||||
|
||||
def wrap_web_tests_adapter():
|
||||
result = {}
|
||||
for cls in web_test.wsgi_safe_tests:
|
||||
class WSGIAdapterWrappedTest(cls):
|
||||
def get_app(self):
|
||||
self.app = Application(self.get_handlers(),
|
||||
**self.get_app_kwargs())
|
||||
return WSGIContainer(validator(WSGIAdapter(self.app)))
|
||||
result["WSGIAdapter_" + cls.__name__] = WSGIAdapterWrappedTest
|
||||
return result
|
||||
globals().update(wrap_web_tests_adapter())
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ try:
|
|||
from tornado.httpclient import AsyncHTTPClient
|
||||
from tornado.httpserver import HTTPServer
|
||||
from tornado.simple_httpclient import SimpleAsyncHTTPClient
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.ioloop import IOLoop, TimeoutError
|
||||
from tornado import netutil
|
||||
except ImportError:
|
||||
# These modules are not importable on app engine. Parts of this module
|
||||
|
|
@ -38,6 +38,7 @@ import re
|
|||
import signal
|
||||
import socket
|
||||
import sys
|
||||
import types
|
||||
|
||||
try:
|
||||
from cStringIO import StringIO # py2
|
||||
|
|
@ -48,10 +49,16 @@ except ImportError:
|
|||
# (either py27+ or unittest2) so tornado.test.util enforces
|
||||
# this requirement, but for other users of tornado.testing we want
|
||||
# to allow the older version if unitest2 is not available.
|
||||
try:
|
||||
import unittest2 as unittest
|
||||
except ImportError:
|
||||
if sys.version_info >= (3,):
|
||||
# On python 3, mixing unittest2 and unittest (including doctest)
|
||||
# doesn't seem to work, so always use unittest.
|
||||
import unittest
|
||||
else:
|
||||
# On python 2, prefer unittest2 when available.
|
||||
try:
|
||||
import unittest2 as unittest
|
||||
except ImportError:
|
||||
import unittest
|
||||
|
||||
_next_port = 10000
|
||||
|
||||
|
|
@ -63,8 +70,8 @@ def get_unused_port():
|
|||
only that a series of get_unused_port calls in a single process return
|
||||
distinct ports.
|
||||
|
||||
**Deprecated**. Use bind_unused_port instead, which is guaranteed
|
||||
to find an unused port.
|
||||
.. deprecated::
|
||||
Use bind_unused_port instead, which is guaranteed to find an unused port.
|
||||
"""
|
||||
global _next_port
|
||||
port = _next_port
|
||||
|
|
@ -95,6 +102,36 @@ def get_async_test_timeout():
|
|||
return 5
|
||||
|
||||
|
||||
class _TestMethodWrapper(object):
|
||||
"""Wraps a test method to raise an error if it returns a value.
|
||||
|
||||
This is mainly used to detect undecorated generators (if a test
|
||||
method yields it must use a decorator to consume the generator),
|
||||
but will also detect other kinds of return values (these are not
|
||||
necessarily errors, but we alert anyway since there is no good
|
||||
reason to return a value from a test.
|
||||
"""
|
||||
def __init__(self, orig_method):
|
||||
self.orig_method = orig_method
|
||||
|
||||
def __call__(self):
|
||||
result = self.orig_method()
|
||||
if isinstance(result, types.GeneratorType):
|
||||
raise TypeError("Generator test methods should be decorated with "
|
||||
"tornado.testing.gen_test")
|
||||
elif result is not None:
|
||||
raise ValueError("Return value from test method ignored: %r" %
|
||||
result)
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Proxy all unknown attributes to the original method.
|
||||
|
||||
This is important for some of the decorators in the `unittest`
|
||||
module, such as `unittest.skipIf`.
|
||||
"""
|
||||
return getattr(self.orig_method, name)
|
||||
|
||||
|
||||
class AsyncTestCase(unittest.TestCase):
|
||||
"""`~unittest.TestCase` subclass for testing `.IOLoop`-based
|
||||
asynchronous code.
|
||||
|
|
@ -157,14 +194,20 @@ class AsyncTestCase(unittest.TestCase):
|
|||
self.assertIn("FriendFeed", response.body)
|
||||
self.stop()
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(AsyncTestCase, self).__init__(*args, **kwargs)
|
||||
def __init__(self, methodName='runTest', **kwargs):
|
||||
super(AsyncTestCase, self).__init__(methodName, **kwargs)
|
||||
self.__stopped = False
|
||||
self.__running = False
|
||||
self.__failure = None
|
||||
self.__stop_args = None
|
||||
self.__timeout = None
|
||||
|
||||
# It's easy to forget the @gen_test decorator, but if you do
|
||||
# the test will silently be ignored because nothing will consume
|
||||
# the generator. Replace the test method with a wrapper that will
|
||||
# make sure it's not an undecorated generator.
|
||||
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
|
||||
|
||||
def setUp(self):
|
||||
super(AsyncTestCase, self).setUp()
|
||||
self.io_loop = self.get_new_ioloop()
|
||||
|
|
@ -352,6 +395,7 @@ class AsyncHTTPTestCase(AsyncTestCase):
|
|||
|
||||
def tearDown(self):
|
||||
self.http_server.stop()
|
||||
self.io_loop.run_sync(self.http_server.close_all_connections)
|
||||
if (not IOLoop.initialized() or
|
||||
self.http_client.io_loop is not IOLoop.instance()):
|
||||
self.http_client.close()
|
||||
|
|
@ -414,18 +458,50 @@ def gen_test(func=None, timeout=None):
|
|||
.. versionadded:: 3.1
|
||||
The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment
|
||||
variable.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
The wrapper now passes along ``*args, **kwargs`` so it can be used
|
||||
on functions with arguments.
|
||||
"""
|
||||
if timeout is None:
|
||||
timeout = get_async_test_timeout()
|
||||
|
||||
def wrap(f):
|
||||
f = gen.coroutine(f)
|
||||
|
||||
# Stack up several decorators to allow us to access the generator
|
||||
# object itself. In the innermost wrapper, we capture the generator
|
||||
# and save it in an attribute of self. Next, we run the wrapped
|
||||
# function through @gen.coroutine. Finally, the coroutine is
|
||||
# wrapped again to make it synchronous with run_sync.
|
||||
#
|
||||
# This is a good case study arguing for either some sort of
|
||||
# extensibility in the gen decorators or cancellation support.
|
||||
@functools.wraps(f)
|
||||
def wrapper(self):
|
||||
return self.io_loop.run_sync(
|
||||
functools.partial(f, self), timeout=timeout)
|
||||
return wrapper
|
||||
def pre_coroutine(self, *args, **kwargs):
|
||||
result = f(self, *args, **kwargs)
|
||||
if isinstance(result, types.GeneratorType):
|
||||
self._test_generator = result
|
||||
else:
|
||||
self._test_generator = None
|
||||
return result
|
||||
|
||||
coro = gen.coroutine(pre_coroutine)
|
||||
|
||||
@functools.wraps(coro)
|
||||
def post_coroutine(self, *args, **kwargs):
|
||||
try:
|
||||
return self.io_loop.run_sync(
|
||||
functools.partial(coro, self, *args, **kwargs),
|
||||
timeout=timeout)
|
||||
except TimeoutError as e:
|
||||
# run_sync raises an error with an unhelpful traceback.
|
||||
# If we throw it back into the generator the stack trace
|
||||
# will be replaced by the point where the test is stopped.
|
||||
self._test_generator.throw(e)
|
||||
# In case the test contains an overly broad except clause,
|
||||
# we may get back here. In this case re-raise the original
|
||||
# exception, which is better than nothing.
|
||||
raise
|
||||
return post_coroutine
|
||||
|
||||
if func is not None:
|
||||
# Used like:
|
||||
|
|
|
|||
|
|
@ -12,11 +12,19 @@ and `.Resolver`.
|
|||
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
import array
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
import zlib
|
||||
|
||||
|
||||
try:
|
||||
xrange # py2
|
||||
except NameError:
|
||||
xrange = range # py3
|
||||
|
||||
|
||||
class ObjectDict(dict):
|
||||
"""Makes a dictionary behave like an object, with attribute-style access.
|
||||
"""
|
||||
|
|
@ -33,7 +41,7 @@ class ObjectDict(dict):
|
|||
class GzipDecompressor(object):
|
||||
"""Streaming gzip decompressor.
|
||||
|
||||
The interface is like that of `zlib.decompressobj` (without the
|
||||
The interface is like that of `zlib.decompressobj` (without some of the
|
||||
optional arguments, but it understands gzip headers and checksums.
|
||||
"""
|
||||
def __init__(self):
|
||||
|
|
@ -42,14 +50,24 @@ class GzipDecompressor(object):
|
|||
# This works on cpython and pypy, but not jython.
|
||||
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
|
||||
|
||||
def decompress(self, value):
|
||||
def decompress(self, value, max_length=None):
|
||||
"""Decompress a chunk, returning newly-available data.
|
||||
|
||||
Some data may be buffered for later processing; `flush` must
|
||||
be called when there is no more input data to ensure that
|
||||
all data was processed.
|
||||
|
||||
If ``max_length`` is given, some input data may be left over
|
||||
in ``unconsumed_tail``; you must retrieve this value and pass
|
||||
it back to a future call to `decompress` if it is not empty.
|
||||
"""
|
||||
return self.decompressobj.decompress(value)
|
||||
return self.decompressobj.decompress(value, max_length)
|
||||
|
||||
@property
|
||||
def unconsumed_tail(self):
|
||||
"""Returns the unconsumed portion left over
|
||||
"""
|
||||
return self.decompressobj.unconsumed_tail
|
||||
|
||||
def flush(self):
|
||||
"""Return any remaining buffered data not yet returned by decompress.
|
||||
|
|
@ -132,6 +150,24 @@ def exec_in(code, glob, loc=None):
|
|||
""")
|
||||
|
||||
|
||||
def errno_from_exception(e):
|
||||
"""Provides the errno from an Exception object.
|
||||
|
||||
There are cases that the errno attribute was not set so we pull
|
||||
the errno out of the args but if someone instatiates an Exception
|
||||
without any args you will get a tuple error. So this function
|
||||
abstracts all that behavior to give you a safe way to get the
|
||||
errno.
|
||||
"""
|
||||
|
||||
if hasattr(e, 'errno'):
|
||||
return e.errno
|
||||
elif e.args:
|
||||
return e.args[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class Configurable(object):
|
||||
"""Base class for configurable interfaces.
|
||||
|
||||
|
|
@ -243,6 +279,16 @@ class ArgReplacer(object):
|
|||
# Not a positional parameter
|
||||
self.arg_pos = None
|
||||
|
||||
def get_old_value(self, args, kwargs, default=None):
|
||||
"""Returns the old value of the named argument without replacing it.
|
||||
|
||||
Returns ``default`` if the argument is not present.
|
||||
"""
|
||||
if self.arg_pos is not None and len(args) > self.arg_pos:
|
||||
return args[self.arg_pos]
|
||||
else:
|
||||
return kwargs.get(self.name, default)
|
||||
|
||||
def replace(self, new_value, args, kwargs):
|
||||
"""Replace the named argument in ``args, kwargs`` with ``new_value``.
|
||||
|
||||
|
|
@ -265,6 +311,46 @@ class ArgReplacer(object):
|
|||
return old_value, args, kwargs
|
||||
|
||||
|
||||
def timedelta_to_seconds(td):
|
||||
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
|
||||
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
|
||||
|
||||
|
||||
def _websocket_mask_python(mask, data):
|
||||
"""Websocket masking function.
|
||||
|
||||
`mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length.
|
||||
Returns a `bytes` object of the same length as `data` with the mask applied
|
||||
as specified in section 5.3 of RFC 6455.
|
||||
|
||||
This pure-python implementation may be replaced by an optimized version when available.
|
||||
"""
|
||||
mask = array.array("B", mask)
|
||||
unmasked = array.array("B", data)
|
||||
for i in xrange(len(data)):
|
||||
unmasked[i] = unmasked[i] ^ mask[i % 4]
|
||||
if hasattr(unmasked, 'tobytes'):
|
||||
# tostring was deprecated in py32. It hasn't been removed,
|
||||
# but since we turn on deprecation warnings in our tests
|
||||
# we need to use the right one.
|
||||
return unmasked.tobytes()
|
||||
else:
|
||||
return unmasked.tostring()
|
||||
|
||||
if (os.environ.get('TORNADO_NO_EXTENSION') or
|
||||
os.environ.get('TORNADO_EXTENSION') == '0'):
|
||||
# These environment variables exist to make it easier to do performance
|
||||
# comparisons; they are not guaranteed to remain supported in the future.
|
||||
_websocket_mask = _websocket_mask_python
|
||||
else:
|
||||
try:
|
||||
from tornado.speedups import websocket_mask as _websocket_mask
|
||||
except ImportError:
|
||||
if os.environ.get('TORNADO_EXTENSION') == '1':
|
||||
raise
|
||||
_websocket_mask = _websocket_mask_python
|
||||
|
||||
|
||||
def doctests():
|
||||
import doctest
|
||||
return doctest.DocTestSuite()
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -3,43 +3,44 @@
|
|||
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
|
||||
communication between the browser and server.
|
||||
|
||||
.. warning::
|
||||
WebSockets are supported in the current versions of all major browsers,
|
||||
although older versions that do not support WebSockets are still in use
|
||||
(refer to http://caniuse.com/websockets for details).
|
||||
|
||||
The WebSocket protocol was recently finalized as `RFC 6455
|
||||
<http://tools.ietf.org/html/rfc6455>`_ and is not yet supported in
|
||||
all browsers. Refer to http://caniuse.com/websockets for details
|
||||
on compatibility. In addition, during development the protocol
|
||||
went through several incompatible versions, and some browsers only
|
||||
support older versions. By default this module only supports the
|
||||
latest version of the protocol, but optional support for an older
|
||||
version (known as "draft 76" or "hixie-76") can be enabled by
|
||||
overriding `WebSocketHandler.allow_draft76` (see that method's
|
||||
documentation for caveats).
|
||||
This module implements the final version of the WebSocket protocol as
|
||||
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
|
||||
browser versions (notably Safari 5.x) implemented an earlier draft of
|
||||
the protocol (known as "draft 76") and are not compatible with this module.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
Removed support for the draft 76 protocol version.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
# Author: Jacob Kristhammar, 2010
|
||||
|
||||
import array
|
||||
import base64
|
||||
import collections
|
||||
import functools
|
||||
import hashlib
|
||||
import os
|
||||
import struct
|
||||
import time
|
||||
import tornado.escape
|
||||
import tornado.web
|
||||
|
||||
from tornado.concurrent import Future
|
||||
from tornado.escape import utf8, native_str
|
||||
from tornado import httpclient
|
||||
from tornado.concurrent import TracebackFuture
|
||||
from tornado.escape import utf8, native_str, to_unicode
|
||||
from tornado import httpclient, httputil
|
||||
from tornado.ioloop import IOLoop
|
||||
from tornado.iostream import StreamClosedError
|
||||
from tornado.log import gen_log, app_log
|
||||
from tornado.netutil import Resolver
|
||||
from tornado import simple_httpclient
|
||||
from tornado.util import bytes_type, unicode_type
|
||||
from tornado.tcpclient import TCPClient
|
||||
from tornado.util import bytes_type, _websocket_mask
|
||||
|
||||
try:
|
||||
from urllib.parse import urlparse # py2
|
||||
except ImportError:
|
||||
from urlparse import urlparse # py3
|
||||
|
||||
try:
|
||||
xrange # py2
|
||||
|
|
@ -51,6 +52,14 @@ class WebSocketError(Exception):
|
|||
pass
|
||||
|
||||
|
||||
class WebSocketClosedError(WebSocketError):
|
||||
"""Raised by operations on a closed connection.
|
||||
|
||||
.. versionadded:: 3.2
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class WebSocketHandler(tornado.web.RequestHandler):
|
||||
"""Subclass this class to create a basic WebSocket handler.
|
||||
|
||||
|
|
@ -100,28 +109,20 @@ class WebSocketHandler(tornado.web.RequestHandler):
|
|||
def __init__(self, application, request, **kwargs):
|
||||
tornado.web.RequestHandler.__init__(self, application, request,
|
||||
**kwargs)
|
||||
self.stream = request.connection.stream
|
||||
self.ws_connection = None
|
||||
self.close_code = None
|
||||
self.close_reason = None
|
||||
self.stream = None
|
||||
|
||||
def _execute(self, transforms, *args, **kwargs):
|
||||
@tornado.web.asynchronous
|
||||
def get(self, *args, **kwargs):
|
||||
self.open_args = args
|
||||
self.open_kwargs = kwargs
|
||||
|
||||
# Websocket only supports GET method
|
||||
if self.request.method != 'GET':
|
||||
self.stream.write(tornado.escape.utf8(
|
||||
"HTTP/1.1 405 Method Not Allowed\r\n\r\n"
|
||||
))
|
||||
self.stream.close()
|
||||
return
|
||||
|
||||
# Upgrade header should be present and should be equal to WebSocket
|
||||
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
|
||||
self.stream.write(tornado.escape.utf8(
|
||||
"HTTP/1.1 400 Bad Request\r\n\r\n"
|
||||
"Can \"Upgrade\" only to \"WebSocket\"."
|
||||
))
|
||||
self.stream.close()
|
||||
self.set_status(400)
|
||||
self.finish("Can \"Upgrade\" only to \"WebSocket\".")
|
||||
return
|
||||
|
||||
# Connection header should be upgrade. Some proxy servers/load balancers
|
||||
|
|
@ -129,29 +130,41 @@ class WebSocketHandler(tornado.web.RequestHandler):
|
|||
headers = self.request.headers
|
||||
connection = map(lambda s: s.strip().lower(), headers.get("Connection", "").split(","))
|
||||
if 'upgrade' not in connection:
|
||||
self.stream.write(tornado.escape.utf8(
|
||||
"HTTP/1.1 400 Bad Request\r\n\r\n"
|
||||
"\"Connection\" must be \"Upgrade\"."
|
||||
))
|
||||
self.stream.close()
|
||||
self.set_status(400)
|
||||
self.finish("\"Connection\" must be \"Upgrade\".")
|
||||
return
|
||||
|
||||
# Handle WebSocket Origin naming convention differences
|
||||
# The difference between version 8 and 13 is that in 8 the
|
||||
# client sends a "Sec-Websocket-Origin" header and in 13 it's
|
||||
# simply "Origin".
|
||||
if "Origin" in self.request.headers:
|
||||
origin = self.request.headers.get("Origin")
|
||||
else:
|
||||
origin = self.request.headers.get("Sec-Websocket-Origin", None)
|
||||
|
||||
|
||||
# If there was an origin header, check to make sure it matches
|
||||
# according to check_origin. When the origin is None, we assume it
|
||||
# did not come from a browser and that it can be passed on.
|
||||
if origin is not None and not self.check_origin(origin):
|
||||
self.set_status(403)
|
||||
self.finish("Cross origin websockets not allowed")
|
||||
return
|
||||
|
||||
self.stream = self.request.connection.detach()
|
||||
self.stream.set_close_callback(self.on_connection_close)
|
||||
|
||||
if self.request.headers.get("Sec-WebSocket-Version") in ("7", "8", "13"):
|
||||
self.ws_connection = WebSocketProtocol13(self)
|
||||
self.ws_connection.accept_connection()
|
||||
elif (self.allow_draft76() and
|
||||
"Sec-WebSocket-Version" not in self.request.headers):
|
||||
self.ws_connection = WebSocketProtocol76(self)
|
||||
self.ws_connection.accept_connection()
|
||||
else:
|
||||
self.stream.write(tornado.escape.utf8(
|
||||
"HTTP/1.1 426 Upgrade Required\r\n"
|
||||
"Sec-WebSocket-Version: 8\r\n\r\n"))
|
||||
self.stream.close()
|
||||
|
||||
|
||||
def write_message(self, message, binary=False):
|
||||
"""Sends the given message to the client of this Web Socket.
|
||||
|
||||
|
|
@ -159,7 +172,15 @@ class WebSocketHandler(tornado.web.RequestHandler):
|
|||
encoded as json). If the ``binary`` argument is false, the
|
||||
message will be sent as utf8; in binary mode any byte string
|
||||
is allowed.
|
||||
|
||||
If the connection is already closed, raises `WebSocketClosedError`.
|
||||
|
||||
.. versionchanged:: 3.2
|
||||
`WebSocketClosedError` was added (previously a closed connection
|
||||
would raise an `AttributeError`)
|
||||
"""
|
||||
if self.ws_connection is None:
|
||||
raise WebSocketClosedError()
|
||||
if isinstance(message, dict):
|
||||
message = tornado.escape.json_encode(message)
|
||||
self.ws_connection.write_message(message, binary=binary)
|
||||
|
|
@ -195,6 +216,8 @@ class WebSocketHandler(tornado.web.RequestHandler):
|
|||
|
||||
def ping(self, data):
|
||||
"""Send ping frame to the remote end."""
|
||||
if self.ws_connection is None:
|
||||
raise WebSocketClosedError()
|
||||
self.ws_connection.write_ping(data)
|
||||
|
||||
def on_pong(self, data):
|
||||
|
|
@ -202,31 +225,66 @@ class WebSocketHandler(tornado.web.RequestHandler):
|
|||
pass
|
||||
|
||||
def on_close(self):
|
||||
"""Invoked when the WebSocket is closed."""
|
||||
"""Invoked when the WebSocket is closed.
|
||||
|
||||
If the connection was closed cleanly and a status code or reason
|
||||
phrase was supplied, these values will be available as the attributes
|
||||
``self.close_code`` and ``self.close_reason``.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
|
||||
Added ``close_code`` and ``close_reason`` attributes.
|
||||
"""
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
def close(self, code=None, reason=None):
|
||||
"""Closes this Web Socket.
|
||||
|
||||
Once the close handshake is successful the socket will be closed.
|
||||
|
||||
``code`` may be a numeric status code, taken from the values
|
||||
defined in `RFC 6455 section 7.4.1
|
||||
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
|
||||
``reason`` may be a textual message about why the connection is
|
||||
closing. These values are made available to the client, but are
|
||||
not otherwise interpreted by the websocket protocol.
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
|
||||
Added the ``code`` and ``reason`` arguments.
|
||||
"""
|
||||
self.ws_connection.close()
|
||||
self.ws_connection = None
|
||||
if self.ws_connection:
|
||||
self.ws_connection.close(code, reason)
|
||||
self.ws_connection = None
|
||||
|
||||
def allow_draft76(self):
|
||||
"""Override to enable support for the older "draft76" protocol.
|
||||
def check_origin(self, origin):
|
||||
"""Override to enable support for allowing alternate origins.
|
||||
|
||||
The draft76 version of the websocket protocol is disabled by
|
||||
default due to security concerns, but it can be enabled by
|
||||
overriding this method to return True.
|
||||
The ``origin`` argument is the value of the ``Origin`` HTTP
|
||||
header, the url responsible for initiating this request. This
|
||||
method is not called for clients that do not send this header;
|
||||
such requests are always allowed (because all browsers that
|
||||
implement WebSockets support this header, and non-browser
|
||||
clients do not have the same cross-site security concerns).
|
||||
|
||||
Connections using the draft76 protocol do not support the
|
||||
``binary=True`` flag to `write_message`.
|
||||
Should return True to accept the request or False to reject it.
|
||||
By default, rejects all requests with an origin on a host other
|
||||
than this one.
|
||||
|
||||
Support for the draft76 protocol is deprecated and will be
|
||||
removed in a future version of Tornado.
|
||||
This is a security protection against cross site scripting attacks on
|
||||
browsers, since WebSockets are allowed to bypass the usual same-origin
|
||||
policies and don't use CORS headers.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
return False
|
||||
parsed_origin = urlparse(origin)
|
||||
origin = parsed_origin.netloc
|
||||
origin = origin.lower()
|
||||
|
||||
host = self.request.headers.get("Host")
|
||||
|
||||
# Check to see that origin matches host directly, including ports
|
||||
return origin == host
|
||||
|
||||
def set_nodelay(self, value):
|
||||
"""Set the no-delay flag for this stream.
|
||||
|
|
@ -244,29 +302,6 @@ class WebSocketHandler(tornado.web.RequestHandler):
|
|||
"""
|
||||
self.stream.set_nodelay(value)
|
||||
|
||||
def get_websocket_scheme(self):
|
||||
"""Return the url scheme used for this request, either "ws" or "wss".
|
||||
|
||||
This is normally decided by HTTPServer, but applications
|
||||
may wish to override this if they are using an SSL proxy
|
||||
that does not provide the X-Scheme header as understood
|
||||
by HTTPServer.
|
||||
|
||||
Note that this is only used by the draft76 protocol.
|
||||
"""
|
||||
return "wss" if self.request.protocol == "https" else "ws"
|
||||
|
||||
def async_callback(self, callback, *args, **kwargs):
|
||||
"""Obsolete - catches exceptions from the wrapped function.
|
||||
|
||||
This function is normally unncecessary thanks to
|
||||
`tornado.stack_context`.
|
||||
"""
|
||||
return self.ws_connection.async_callback(callback, *args, **kwargs)
|
||||
|
||||
def _not_supported(self, *args, **kwargs):
|
||||
raise Exception("Method not supported for Web Sockets")
|
||||
|
||||
def on_connection_close(self):
|
||||
if self.ws_connection:
|
||||
self.ws_connection.on_connection_close()
|
||||
|
|
@ -274,9 +309,17 @@ class WebSocketHandler(tornado.web.RequestHandler):
|
|||
self.on_close()
|
||||
|
||||
|
||||
def _wrap_method(method):
|
||||
def _disallow_for_websocket(self, *args, **kwargs):
|
||||
if self.stream is None:
|
||||
method(self, *args, **kwargs)
|
||||
else:
|
||||
raise RuntimeError("Method not supported for Web Sockets")
|
||||
return _disallow_for_websocket
|
||||
for method in ["write", "redirect", "set_header", "send_error", "set_cookie",
|
||||
"set_status", "flush", "finish"]:
|
||||
setattr(WebSocketHandler, method, WebSocketHandler._not_supported)
|
||||
setattr(WebSocketHandler, method,
|
||||
_wrap_method(getattr(WebSocketHandler, method)))
|
||||
|
||||
|
||||
class WebSocketProtocol(object):
|
||||
|
|
@ -289,23 +332,17 @@ class WebSocketProtocol(object):
|
|||
self.client_terminated = False
|
||||
self.server_terminated = False
|
||||
|
||||
def async_callback(self, callback, *args, **kwargs):
|
||||
"""Wrap callbacks with this if they are used on asynchronous requests.
|
||||
def _run_callback(self, callback, *args, **kwargs):
|
||||
"""Runs the given callback with exception handling.
|
||||
|
||||
Catches exceptions properly and closes this WebSocket if an exception
|
||||
is uncaught.
|
||||
On error, aborts the websocket connection and returns False.
|
||||
"""
|
||||
if args or kwargs:
|
||||
callback = functools.partial(callback, *args, **kwargs)
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return callback(*args, **kwargs)
|
||||
except Exception:
|
||||
app_log.error("Uncaught exception in %s",
|
||||
self.request.path, exc_info=True)
|
||||
self._abort()
|
||||
return wrapper
|
||||
try:
|
||||
callback(*args, **kwargs)
|
||||
except Exception:
|
||||
app_log.error("Uncaught exception in %s",
|
||||
self.request.path, exc_info=True)
|
||||
self._abort()
|
||||
|
||||
def on_connection_close(self):
|
||||
self._abort()
|
||||
|
|
@ -318,174 +355,6 @@ class WebSocketProtocol(object):
|
|||
self.close() # let the subclass cleanup
|
||||
|
||||
|
||||
class WebSocketProtocol76(WebSocketProtocol):
|
||||
"""Implementation of the WebSockets protocol, version hixie-76.
|
||||
|
||||
This class provides basic functionality to process WebSockets requests as
|
||||
specified in
|
||||
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
|
||||
"""
|
||||
def __init__(self, handler):
|
||||
WebSocketProtocol.__init__(self, handler)
|
||||
self.challenge = None
|
||||
self._waiting = None
|
||||
|
||||
def accept_connection(self):
|
||||
try:
|
||||
self._handle_websocket_headers()
|
||||
except ValueError:
|
||||
gen_log.debug("Malformed WebSocket request received")
|
||||
self._abort()
|
||||
return
|
||||
|
||||
scheme = self.handler.get_websocket_scheme()
|
||||
|
||||
# draft76 only allows a single subprotocol
|
||||
subprotocol_header = ''
|
||||
subprotocol = self.request.headers.get("Sec-WebSocket-Protocol", None)
|
||||
if subprotocol:
|
||||
selected = self.handler.select_subprotocol([subprotocol])
|
||||
if selected:
|
||||
assert selected == subprotocol
|
||||
subprotocol_header = "Sec-WebSocket-Protocol: %s\r\n" % selected
|
||||
|
||||
# Write the initial headers before attempting to read the challenge.
|
||||
# This is necessary when using proxies (such as HAProxy), which
|
||||
# need to see the Upgrade headers before passing through the
|
||||
# non-HTTP traffic that follows.
|
||||
self.stream.write(tornado.escape.utf8(
|
||||
"HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
|
||||
"Upgrade: WebSocket\r\n"
|
||||
"Connection: Upgrade\r\n"
|
||||
"Server: TornadoServer/%(version)s\r\n"
|
||||
"Sec-WebSocket-Origin: %(origin)s\r\n"
|
||||
"Sec-WebSocket-Location: %(scheme)s://%(host)s%(uri)s\r\n"
|
||||
"%(subprotocol)s"
|
||||
"\r\n" % (dict(
|
||||
version=tornado.version,
|
||||
origin=self.request.headers["Origin"],
|
||||
scheme=scheme,
|
||||
host=self.request.host,
|
||||
uri=self.request.uri,
|
||||
subprotocol=subprotocol_header))))
|
||||
self.stream.read_bytes(8, self._handle_challenge)
|
||||
|
||||
def challenge_response(self, challenge):
|
||||
"""Generates the challenge response that's needed in the handshake
|
||||
|
||||
The challenge parameter should be the raw bytes as sent from the
|
||||
client.
|
||||
"""
|
||||
key_1 = self.request.headers.get("Sec-Websocket-Key1")
|
||||
key_2 = self.request.headers.get("Sec-Websocket-Key2")
|
||||
try:
|
||||
part_1 = self._calculate_part(key_1)
|
||||
part_2 = self._calculate_part(key_2)
|
||||
except ValueError:
|
||||
raise ValueError("Invalid Keys/Challenge")
|
||||
return self._generate_challenge_response(part_1, part_2, challenge)
|
||||
|
||||
def _handle_challenge(self, challenge):
|
||||
try:
|
||||
challenge_response = self.challenge_response(challenge)
|
||||
except ValueError:
|
||||
gen_log.debug("Malformed key data in WebSocket request")
|
||||
self._abort()
|
||||
return
|
||||
self._write_response(challenge_response)
|
||||
|
||||
def _write_response(self, challenge):
|
||||
self.stream.write(challenge)
|
||||
self.async_callback(self.handler.open)(*self.handler.open_args, **self.handler.open_kwargs)
|
||||
self._receive_message()
|
||||
|
||||
def _handle_websocket_headers(self):
|
||||
"""Verifies all invariant- and required headers
|
||||
|
||||
If a header is missing or have an incorrect value ValueError will be
|
||||
raised
|
||||
"""
|
||||
fields = ("Origin", "Host", "Sec-Websocket-Key1",
|
||||
"Sec-Websocket-Key2")
|
||||
if not all(map(lambda f: self.request.headers.get(f), fields)):
|
||||
raise ValueError("Missing/Invalid WebSocket headers")
|
||||
|
||||
def _calculate_part(self, key):
|
||||
"""Processes the key headers and calculates their key value.
|
||||
|
||||
Raises ValueError when feed invalid key."""
|
||||
# pyflakes complains about variable reuse if both of these lines use 'c'
|
||||
number = int(''.join(c for c in key if c.isdigit()))
|
||||
spaces = len([c2 for c2 in key if c2.isspace()])
|
||||
try:
|
||||
key_number = number // spaces
|
||||
except (ValueError, ZeroDivisionError):
|
||||
raise ValueError
|
||||
return struct.pack(">I", key_number)
|
||||
|
||||
def _generate_challenge_response(self, part_1, part_2, part_3):
|
||||
m = hashlib.md5()
|
||||
m.update(part_1)
|
||||
m.update(part_2)
|
||||
m.update(part_3)
|
||||
return m.digest()
|
||||
|
||||
def _receive_message(self):
|
||||
self.stream.read_bytes(1, self._on_frame_type)
|
||||
|
||||
def _on_frame_type(self, byte):
|
||||
frame_type = ord(byte)
|
||||
if frame_type == 0x00:
|
||||
self.stream.read_until(b"\xff", self._on_end_delimiter)
|
||||
elif frame_type == 0xff:
|
||||
self.stream.read_bytes(1, self._on_length_indicator)
|
||||
else:
|
||||
self._abort()
|
||||
|
||||
def _on_end_delimiter(self, frame):
|
||||
if not self.client_terminated:
|
||||
self.async_callback(self.handler.on_message)(
|
||||
frame[:-1].decode("utf-8", "replace"))
|
||||
if not self.client_terminated:
|
||||
self._receive_message()
|
||||
|
||||
def _on_length_indicator(self, byte):
|
||||
if ord(byte) != 0x00:
|
||||
self._abort()
|
||||
return
|
||||
self.client_terminated = True
|
||||
self.close()
|
||||
|
||||
def write_message(self, message, binary=False):
|
||||
"""Sends the given message to the client of this Web Socket."""
|
||||
if binary:
|
||||
raise ValueError(
|
||||
"Binary messages not supported by this version of websockets")
|
||||
if isinstance(message, unicode_type):
|
||||
message = message.encode("utf-8")
|
||||
assert isinstance(message, bytes_type)
|
||||
self.stream.write(b"\x00" + message + b"\xff")
|
||||
|
||||
def write_ping(self, data):
|
||||
"""Send ping frame."""
|
||||
raise ValueError("Ping messages not supported by this version of websockets")
|
||||
|
||||
def close(self):
|
||||
"""Closes the WebSocket connection."""
|
||||
if not self.server_terminated:
|
||||
if not self.stream.closed():
|
||||
self.stream.write("\xff\x00")
|
||||
self.server_terminated = True
|
||||
if self.client_terminated:
|
||||
if self._waiting is not None:
|
||||
self.stream.io_loop.remove_timeout(self._waiting)
|
||||
self._waiting = None
|
||||
self.stream.close()
|
||||
elif self._waiting is None:
|
||||
self._waiting = self.stream.io_loop.add_timeout(
|
||||
time.time() + 5, self._abort)
|
||||
|
||||
|
||||
class WebSocketProtocol13(WebSocketProtocol):
|
||||
"""Implementation of the WebSocket protocol from RFC 6455.
|
||||
|
||||
|
|
@ -555,7 +424,8 @@ class WebSocketProtocol13(WebSocketProtocol):
|
|||
"%s"
|
||||
"\r\n" % (self._challenge_response(), subprotocol_header)))
|
||||
|
||||
self.async_callback(self.handler.open)(*self.handler.open_args, **self.handler.open_kwargs)
|
||||
self._run_callback(self.handler.open, *self.handler.open_args,
|
||||
**self.handler.open_kwargs)
|
||||
self._receive_frame()
|
||||
|
||||
def _write_frame(self, fin, opcode, data):
|
||||
|
|
@ -577,7 +447,7 @@ class WebSocketProtocol13(WebSocketProtocol):
|
|||
frame += struct.pack("!BQ", 127 | mask_bit, l)
|
||||
if self.mask_outgoing:
|
||||
mask = os.urandom(4)
|
||||
data = mask + self._apply_mask(mask, data)
|
||||
data = mask + _websocket_mask(mask, data)
|
||||
frame += data
|
||||
self.stream.write(frame)
|
||||
|
||||
|
|
@ -662,21 +532,8 @@ class WebSocketProtocol13(WebSocketProtocol):
|
|||
except StreamClosedError:
|
||||
self._abort()
|
||||
|
||||
def _apply_mask(self, mask, data):
|
||||
mask = array.array("B", mask)
|
||||
unmasked = array.array("B", data)
|
||||
for i in xrange(len(data)):
|
||||
unmasked[i] = unmasked[i] ^ mask[i % 4]
|
||||
if hasattr(unmasked, 'tobytes'):
|
||||
# tostring was deprecated in py32. It hasn't been removed,
|
||||
# but since we turn on deprecation warnings in our tests
|
||||
# we need to use the right one.
|
||||
return unmasked.tobytes()
|
||||
else:
|
||||
return unmasked.tostring()
|
||||
|
||||
def _on_masked_frame_data(self, data):
|
||||
self._on_frame_data(self._apply_mask(self._frame_mask, data))
|
||||
self._on_frame_data(_websocket_mask(self._frame_mask, data))
|
||||
|
||||
def _on_frame_data(self, data):
|
||||
if self._frame_opcode_is_control:
|
||||
|
|
@ -726,28 +583,40 @@ class WebSocketProtocol13(WebSocketProtocol):
|
|||
except UnicodeDecodeError:
|
||||
self._abort()
|
||||
return
|
||||
self.async_callback(self.handler.on_message)(decoded)
|
||||
self._run_callback(self.handler.on_message, decoded)
|
||||
elif opcode == 0x2:
|
||||
# Binary data
|
||||
self.async_callback(self.handler.on_message)(data)
|
||||
self._run_callback(self.handler.on_message, data)
|
||||
elif opcode == 0x8:
|
||||
# Close
|
||||
self.client_terminated = True
|
||||
if len(data) >= 2:
|
||||
self.handler.close_code = struct.unpack('>H', data[:2])[0]
|
||||
if len(data) > 2:
|
||||
self.handler.close_reason = to_unicode(data[2:])
|
||||
self.close()
|
||||
elif opcode == 0x9:
|
||||
# Ping
|
||||
self._write_frame(True, 0xA, data)
|
||||
elif opcode == 0xA:
|
||||
# Pong
|
||||
self.async_callback(self.handler.on_pong)(data)
|
||||
self._run_callback(self.handler.on_pong, data)
|
||||
else:
|
||||
self._abort()
|
||||
|
||||
def close(self):
|
||||
def close(self, code=None, reason=None):
|
||||
"""Closes the WebSocket connection."""
|
||||
if not self.server_terminated:
|
||||
if not self.stream.closed():
|
||||
self._write_frame(True, 0x8, b"")
|
||||
if code is None and reason is not None:
|
||||
code = 1000 # "normal closure" status code
|
||||
if code is None:
|
||||
close_data = b''
|
||||
else:
|
||||
close_data = struct.pack('>H', code)
|
||||
if reason is not None:
|
||||
close_data += utf8(reason)
|
||||
self._write_frame(True, 0x8, close_data)
|
||||
self.server_terminated = True
|
||||
if self.client_terminated:
|
||||
if self._waiting is not None:
|
||||
|
|
@ -762,9 +631,13 @@ class WebSocketProtocol13(WebSocketProtocol):
|
|||
|
||||
|
||||
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
|
||||
"""WebSocket client connection."""
|
||||
"""WebSocket client connection.
|
||||
|
||||
This class should not be instantiated directly; use the
|
||||
`websocket_connect` function instead.
|
||||
"""
|
||||
def __init__(self, io_loop, request):
|
||||
self.connect_future = Future()
|
||||
self.connect_future = TracebackFuture()
|
||||
self.read_future = None
|
||||
self.read_queue = collections.deque()
|
||||
self.key = base64.b64encode(os.urandom(16))
|
||||
|
|
@ -779,14 +652,31 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
|
|||
'Sec-WebSocket-Version': '13',
|
||||
})
|
||||
|
||||
self.resolver = Resolver(io_loop=io_loop)
|
||||
self.tcp_client = TCPClient(io_loop=io_loop)
|
||||
super(WebSocketClientConnection, self).__init__(
|
||||
io_loop, None, request, lambda: None, self._on_http_response,
|
||||
104857600, self.resolver)
|
||||
104857600, self.tcp_client, 65536)
|
||||
|
||||
def close(self, code=None, reason=None):
|
||||
"""Closes the websocket connection.
|
||||
|
||||
``code`` and ``reason`` are documented under
|
||||
`WebSocketHandler.close`.
|
||||
|
||||
.. versionadded:: 3.2
|
||||
|
||||
.. versionchanged:: 4.0
|
||||
|
||||
Added the ``code`` and ``reason`` arguments.
|
||||
"""
|
||||
if self.protocol is not None:
|
||||
self.protocol.close(code, reason)
|
||||
self.protocol = None
|
||||
|
||||
def _on_close(self):
|
||||
self.on_message(None)
|
||||
self.resolver.close()
|
||||
super(WebSocketClientConnection, self)._on_close()
|
||||
|
||||
def _on_http_response(self, response):
|
||||
if not self.connect_future.done():
|
||||
|
|
@ -796,8 +686,12 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
|
|||
self.connect_future.set_exception(WebSocketError(
|
||||
"Non-websocket response"))
|
||||
|
||||
def _handle_1xx(self, code):
|
||||
assert code == 101
|
||||
def headers_received(self, start_line, headers):
|
||||
if start_line.code != 101:
|
||||
return super(WebSocketClientConnection, self).headers_received(
|
||||
start_line, headers)
|
||||
|
||||
self.headers = headers
|
||||
assert self.headers['Upgrade'].lower() == 'websocket'
|
||||
assert self.headers['Connection'].lower() == 'upgrade'
|
||||
accept = WebSocketProtocol13.compute_accept_value(self.key)
|
||||
|
|
@ -810,6 +704,9 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
|
|||
self.io_loop.remove_timeout(self._timeout)
|
||||
self._timeout = None
|
||||
|
||||
self.stream = self.connection.detach()
|
||||
self.stream.set_close_callback(self._on_close)
|
||||
|
||||
self.connect_future.set_result(self)
|
||||
|
||||
def write_message(self, message, binary=False):
|
||||
|
|
@ -825,7 +722,7 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection):
|
|||
ready.
|
||||
"""
|
||||
assert self.read_future is None
|
||||
future = Future()
|
||||
future = TracebackFuture()
|
||||
if self.read_queue:
|
||||
future.set_result(self.read_queue.popleft())
|
||||
else:
|
||||
|
|
@ -850,10 +747,20 @@ def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None):
|
|||
|
||||
Takes a url and returns a Future whose result is a
|
||||
`WebSocketClientConnection`.
|
||||
|
||||
.. versionchanged:: 3.2
|
||||
Also accepts ``HTTPRequest`` objects in place of urls.
|
||||
"""
|
||||
if io_loop is None:
|
||||
io_loop = IOLoop.current()
|
||||
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
|
||||
if isinstance(url, httpclient.HTTPRequest):
|
||||
assert connect_timeout is None
|
||||
request = url
|
||||
# Copy and convert the headers dict/object (see comments in
|
||||
# AsyncHTTPClient.fetch)
|
||||
request.headers = httputil.HTTPHeaders(request.headers)
|
||||
else:
|
||||
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
|
||||
request = httpclient._RequestProxy(
|
||||
request, httpclient.HTTPRequest._DEFAULTS)
|
||||
conn = WebSocketClientConnection(io_loop, request)
|
||||
|
|
|
|||
|
|
@ -20,9 +20,9 @@ WSGI is the Python standard for web servers, and allows for interoperability
|
|||
between Tornado and other Python web frameworks and servers. This module
|
||||
provides WSGI support in two ways:
|
||||
|
||||
* `WSGIApplication` is a version of `tornado.web.Application` that can run
|
||||
inside a WSGI server. This is useful for running a Tornado app on another
|
||||
HTTP server, such as Google App Engine. See the `WSGIApplication` class
|
||||
* `WSGIAdapter` converts a `tornado.web.Application` to the WSGI application
|
||||
interface. This is useful for running a Tornado app on another
|
||||
HTTP server, such as Google App Engine. See the `WSGIAdapter` class
|
||||
documentation for limitations that apply.
|
||||
* `WSGIContainer` lets you run other WSGI applications and frameworks on the
|
||||
Tornado HTTP server. For example, with this class you can mix Django
|
||||
|
|
@ -32,14 +32,14 @@ provides WSGI support in two ways:
|
|||
from __future__ import absolute_import, division, print_function, with_statement
|
||||
|
||||
import sys
|
||||
import time
|
||||
import tornado
|
||||
|
||||
from tornado.concurrent import Future
|
||||
from tornado import escape
|
||||
from tornado import httputil
|
||||
from tornado.log import access_log
|
||||
from tornado import web
|
||||
from tornado.escape import native_str, parse_qs_bytes
|
||||
from tornado.escape import native_str
|
||||
from tornado.util import bytes_type, unicode_type
|
||||
|
||||
try:
|
||||
|
|
@ -47,11 +47,6 @@ try:
|
|||
except ImportError:
|
||||
from cStringIO import StringIO as BytesIO # python 2
|
||||
|
||||
try:
|
||||
import Cookie # py2
|
||||
except ImportError:
|
||||
import http.cookies as Cookie # py3
|
||||
|
||||
try:
|
||||
import urllib.parse as urllib_parse # py3
|
||||
except ImportError:
|
||||
|
|
@ -82,11 +77,84 @@ else:
|
|||
class WSGIApplication(web.Application):
|
||||
"""A WSGI equivalent of `tornado.web.Application`.
|
||||
|
||||
`WSGIApplication` is very similar to `tornado.web.Application`,
|
||||
except no asynchronous methods are supported (since WSGI does not
|
||||
support non-blocking requests properly). If you call
|
||||
``self.flush()`` or other asynchronous methods in your request
|
||||
handlers running in a `WSGIApplication`, we throw an exception.
|
||||
.. deprecated:: 4.0
|
||||
|
||||
Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
|
||||
"""
|
||||
def __call__(self, environ, start_response):
|
||||
return WSGIAdapter(self)(environ, start_response)
|
||||
|
||||
|
||||
# WSGI has no facilities for flow control, so just return an already-done
|
||||
# Future when the interface requires it.
|
||||
_dummy_future = Future()
|
||||
_dummy_future.set_result(None)
|
||||
|
||||
|
||||
class _WSGIConnection(httputil.HTTPConnection):
|
||||
def __init__(self, method, start_response, context):
|
||||
self.method = method
|
||||
self.start_response = start_response
|
||||
self.context = context
|
||||
self._write_buffer = []
|
||||
self._finished = False
|
||||
self._expected_content_remaining = None
|
||||
self._error = None
|
||||
|
||||
def set_close_callback(self, callback):
|
||||
# WSGI has no facility for detecting a closed connection mid-request,
|
||||
# so we can simply ignore the callback.
|
||||
pass
|
||||
|
||||
def write_headers(self, start_line, headers, chunk=None, callback=None):
|
||||
if self.method == 'HEAD':
|
||||
self._expected_content_remaining = 0
|
||||
elif 'Content-Length' in headers:
|
||||
self._expected_content_remaining = int(headers['Content-Length'])
|
||||
else:
|
||||
self._expected_content_remaining = None
|
||||
self.start_response(
|
||||
'%s %s' % (start_line.code, start_line.reason),
|
||||
[(native_str(k), native_str(v)) for (k, v) in headers.get_all()])
|
||||
if chunk is not None:
|
||||
self.write(chunk, callback)
|
||||
elif callback is not None:
|
||||
callback()
|
||||
return _dummy_future
|
||||
|
||||
def write(self, chunk, callback=None):
|
||||
if self._expected_content_remaining is not None:
|
||||
self._expected_content_remaining -= len(chunk)
|
||||
if self._expected_content_remaining < 0:
|
||||
self._error = httputil.HTTPOutputError(
|
||||
"Tried to write more data than Content-Length")
|
||||
raise self._error
|
||||
self._write_buffer.append(chunk)
|
||||
if callback is not None:
|
||||
callback()
|
||||
return _dummy_future
|
||||
|
||||
def finish(self):
|
||||
if (self._expected_content_remaining is not None and
|
||||
self._expected_content_remaining != 0):
|
||||
self._error = httputil.HTTPOutputError(
|
||||
"Tried to write %d bytes less than Content-Length" %
|
||||
self._expected_content_remaining)
|
||||
raise self._error
|
||||
self._finished = True
|
||||
|
||||
|
||||
class _WSGIRequestContext(object):
|
||||
def __init__(self, remote_ip, protocol):
|
||||
self.remote_ip = remote_ip
|
||||
self.protocol = protocol
|
||||
|
||||
def __str__(self):
|
||||
return self.remote_ip
|
||||
|
||||
|
||||
class WSGIAdapter(object):
|
||||
"""Converts a `tornado.web.Application` instance into a WSGI application.
|
||||
|
||||
Example usage::
|
||||
|
||||
|
|
@ -99,115 +167,83 @@ class WSGIApplication(web.Application):
|
|||
self.write("Hello, world")
|
||||
|
||||
if __name__ == "__main__":
|
||||
application = tornado.wsgi.WSGIApplication([
|
||||
application = tornado.web.Application([
|
||||
(r"/", MainHandler),
|
||||
])
|
||||
server = wsgiref.simple_server.make_server('', 8888, application)
|
||||
wsgi_app = tornado.wsgi.WSGIAdapter(application)
|
||||
server = wsgiref.simple_server.make_server('', 8888, wsgi_app)
|
||||
server.serve_forever()
|
||||
|
||||
See the `appengine demo
|
||||
<https://github.com/facebook/tornado/tree/master/demos/appengine>`_
|
||||
<https://github.com/tornadoweb/tornado/tree/stable/demos/appengine>`_
|
||||
for an example of using this module to run a Tornado app on Google
|
||||
App Engine.
|
||||
|
||||
WSGI applications use the same `.RequestHandler` class, but not
|
||||
``@asynchronous`` methods or ``flush()``. This means that it is
|
||||
not possible to use `.AsyncHTTPClient`, or the `tornado.auth` or
|
||||
`tornado.websocket` modules.
|
||||
In WSGI mode asynchronous methods are not supported. This means
|
||||
that it is not possible to use `.AsyncHTTPClient`, or the
|
||||
`tornado.auth` or `tornado.websocket` modules.
|
||||
|
||||
.. versionadded:: 4.0
|
||||
"""
|
||||
def __init__(self, handlers=None, default_host="", **settings):
|
||||
web.Application.__init__(self, handlers, default_host, transforms=[],
|
||||
wsgi=True, **settings)
|
||||
def __init__(self, application):
|
||||
if isinstance(application, WSGIApplication):
|
||||
self.application = lambda request: web.Application.__call__(
|
||||
application, request)
|
||||
else:
|
||||
self.application = application
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
handler = web.Application.__call__(self, HTTPRequest(environ))
|
||||
assert handler._finished
|
||||
reason = handler._reason
|
||||
status = str(handler._status_code) + " " + reason
|
||||
headers = list(handler._headers.get_all())
|
||||
if hasattr(handler, "_new_cookie"):
|
||||
for cookie in handler._new_cookie.values():
|
||||
headers.append(("Set-Cookie", cookie.OutputString(None)))
|
||||
start_response(status,
|
||||
[(native_str(k), native_str(v)) for (k, v) in headers])
|
||||
return handler._write_buffer
|
||||
|
||||
|
||||
class HTTPRequest(object):
|
||||
"""Mimics `tornado.httpserver.HTTPRequest` for WSGI applications."""
|
||||
def __init__(self, environ):
|
||||
"""Parses the given WSGI environment to construct the request."""
|
||||
self.method = environ["REQUEST_METHOD"]
|
||||
self.path = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
|
||||
self.path += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
|
||||
self.uri = self.path
|
||||
self.arguments = {}
|
||||
self.query = environ.get("QUERY_STRING", "")
|
||||
if self.query:
|
||||
self.uri += "?" + self.query
|
||||
self.arguments = parse_qs_bytes(native_str(self.query),
|
||||
keep_blank_values=True)
|
||||
self.version = "HTTP/1.1"
|
||||
self.headers = httputil.HTTPHeaders()
|
||||
method = environ["REQUEST_METHOD"]
|
||||
uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
|
||||
uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
|
||||
if environ.get("QUERY_STRING"):
|
||||
uri += "?" + environ["QUERY_STRING"]
|
||||
headers = httputil.HTTPHeaders()
|
||||
if environ.get("CONTENT_TYPE"):
|
||||
self.headers["Content-Type"] = environ["CONTENT_TYPE"]
|
||||
headers["Content-Type"] = environ["CONTENT_TYPE"]
|
||||
if environ.get("CONTENT_LENGTH"):
|
||||
self.headers["Content-Length"] = environ["CONTENT_LENGTH"]
|
||||
headers["Content-Length"] = environ["CONTENT_LENGTH"]
|
||||
for key in environ:
|
||||
if key.startswith("HTTP_"):
|
||||
self.headers[key[5:].replace("_", "-")] = environ[key]
|
||||
if self.headers.get("Content-Length"):
|
||||
self.body = environ["wsgi.input"].read(
|
||||
int(self.headers["Content-Length"]))
|
||||
headers[key[5:].replace("_", "-")] = environ[key]
|
||||
if headers.get("Content-Length"):
|
||||
body = environ["wsgi.input"].read(
|
||||
int(headers["Content-Length"]))
|
||||
else:
|
||||
self.body = ""
|
||||
self.protocol = environ["wsgi.url_scheme"]
|
||||
self.remote_ip = environ.get("REMOTE_ADDR", "")
|
||||
body = ""
|
||||
protocol = environ["wsgi.url_scheme"]
|
||||
remote_ip = environ.get("REMOTE_ADDR", "")
|
||||
if environ.get("HTTP_HOST"):
|
||||
self.host = environ["HTTP_HOST"]
|
||||
host = environ["HTTP_HOST"]
|
||||
else:
|
||||
self.host = environ["SERVER_NAME"]
|
||||
|
||||
# Parse request body
|
||||
self.files = {}
|
||||
httputil.parse_body_arguments(self.headers.get("Content-Type", ""),
|
||||
self.body, self.arguments, self.files)
|
||||
|
||||
self._start_time = time.time()
|
||||
self._finish_time = None
|
||||
|
||||
def supports_http_1_1(self):
|
||||
"""Returns True if this request supports HTTP/1.1 semantics"""
|
||||
return self.version == "HTTP/1.1"
|
||||
|
||||
@property
|
||||
def cookies(self):
|
||||
"""A dictionary of Cookie.Morsel objects."""
|
||||
if not hasattr(self, "_cookies"):
|
||||
self._cookies = Cookie.SimpleCookie()
|
||||
if "Cookie" in self.headers:
|
||||
try:
|
||||
self._cookies.load(
|
||||
native_str(self.headers["Cookie"]))
|
||||
except Exception:
|
||||
self._cookies = None
|
||||
return self._cookies
|
||||
|
||||
def full_url(self):
|
||||
"""Reconstructs the full URL for this request."""
|
||||
return self.protocol + "://" + self.host + self.uri
|
||||
|
||||
def request_time(self):
|
||||
"""Returns the amount of time it took for this request to execute."""
|
||||
if self._finish_time is None:
|
||||
return time.time() - self._start_time
|
||||
else:
|
||||
return self._finish_time - self._start_time
|
||||
host = environ["SERVER_NAME"]
|
||||
connection = _WSGIConnection(method, start_response,
|
||||
_WSGIRequestContext(remote_ip, protocol))
|
||||
request = httputil.HTTPServerRequest(
|
||||
method, uri, "HTTP/1.1", headers=headers, body=body,
|
||||
host=host, connection=connection)
|
||||
request._parse_body()
|
||||
self.application(request)
|
||||
if connection._error:
|
||||
raise connection._error
|
||||
if not connection._finished:
|
||||
raise Exception("request did not finish synchronously")
|
||||
return connection._write_buffer
|
||||
|
||||
|
||||
class WSGIContainer(object):
|
||||
r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
|
||||
|
||||
.. warning::
|
||||
|
||||
WSGI is a *synchronous* interface, while Tornado's concurrency model
|
||||
is based on single-threaded asynchronous execution. This means that
|
||||
running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
|
||||
than running the same app in a multi-threaded WSGI server like
|
||||
``gunicorn`` or ``uwsgi``. Use `WSGIContainer` only when there are
|
||||
benefits to combining Tornado and WSGI in the same process that
|
||||
outweigh the reduced scalability.
|
||||
|
||||
Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
|
||||
run it. For example::
|
||||
|
||||
|
|
@ -242,10 +278,12 @@ class WSGIContainer(object):
|
|||
return response.append
|
||||
app_response = self.wsgi_application(
|
||||
WSGIContainer.environ(request), start_response)
|
||||
response.extend(app_response)
|
||||
body = b"".join(response)
|
||||
if hasattr(app_response, "close"):
|
||||
app_response.close()
|
||||
try:
|
||||
response.extend(app_response)
|
||||
body = b"".join(response)
|
||||
finally:
|
||||
if hasattr(app_response, "close"):
|
||||
app_response.close()
|
||||
if not data:
|
||||
raise Exception("WSGI app did not call start_response")
|
||||
|
||||
|
|
@ -272,7 +310,7 @@ class WSGIContainer(object):
|
|||
|
||||
@staticmethod
|
||||
def environ(request):
|
||||
"""Converts a `tornado.httpserver.HTTPRequest` to a WSGI environment.
|
||||
"""Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
|
||||
"""
|
||||
hostport = request.host.split(":")
|
||||
if len(hostport) == 2:
|
||||
|
|
@ -285,7 +323,7 @@ class WSGIContainer(object):
|
|||
"REQUEST_METHOD": request.method,
|
||||
"SCRIPT_NAME": "",
|
||||
"PATH_INFO": to_wsgi_str(escape.url_unescape(
|
||||
request.path, encoding=None, plus=False)),
|
||||
request.path, encoding=None, plus=False)),
|
||||
"QUERY_STRING": request.query,
|
||||
"REMOTE_ADDR": request.remote_ip,
|
||||
"SERVER_NAME": host,
|
||||
|
|
@ -318,3 +356,6 @@ class WSGIContainer(object):
|
|||
summary = request.method + " " + request.uri + " (" + \
|
||||
request.remote_ip + ")"
|
||||
log_method("%d %s %.2fms", status_code, summary, request_time)
|
||||
|
||||
|
||||
HTTPRequest = httputil.HTTPServerRequest
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue