add 64/32bit linux split
154
Linux_x86_64/lib/python2.7/site-packages/werkzeug/__init__.py
Normal file
|
|
@ -0,0 +1,154 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug
|
||||
~~~~~~~~
|
||||
|
||||
Werkzeug is the Swiss Army knife of Python web development.
|
||||
|
||||
It provides useful classes and functions for any WSGI application to make
|
||||
the life of a python web developer much easier. All of the provided
|
||||
classes are independent from each other so you can mix it with any other
|
||||
library.
|
||||
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from types import ModuleType
|
||||
import sys
|
||||
|
||||
from werkzeug._compat import iteritems
|
||||
|
||||
# the version. Usually set automatically by a script.
|
||||
__version__ = '0.9.4'
|
||||
|
||||
|
||||
# This import magic raises concerns quite often which is why the implementation
|
||||
# and motivation is explained here in detail now.
|
||||
#
|
||||
# The majority of the functions and classes provided by Werkzeug work on the
|
||||
# HTTP and WSGI layer. There is no useful grouping for those which is why
|
||||
# they are all importable from "werkzeug" instead of the modules where they are
|
||||
# implemented. The downside of that is, that now everything would be loaded at
|
||||
# once, even if unused.
|
||||
#
|
||||
# The implementation of a lazy-loading module in this file replaces the
|
||||
# werkzeug package when imported from within. Attribute access to the werkzeug
|
||||
# module will then lazily import from the modules that implement the objects.
|
||||
|
||||
|
||||
# import mapping to objects in other modules
|
||||
all_by_module = {
|
||||
'werkzeug.debug': ['DebuggedApplication'],
|
||||
'werkzeug.local': ['Local', 'LocalManager', 'LocalProxy',
|
||||
'LocalStack', 'release_local'],
|
||||
'werkzeug.serving': ['run_simple'],
|
||||
'werkzeug.test': ['Client', 'EnvironBuilder', 'create_environ',
|
||||
'run_wsgi_app'],
|
||||
'werkzeug.testapp': ['test_app'],
|
||||
'werkzeug.exceptions': ['abort', 'Aborter'],
|
||||
'werkzeug.urls': ['url_decode', 'url_encode', 'url_quote',
|
||||
'url_quote_plus', 'url_unquote',
|
||||
'url_unquote_plus', 'url_fix', 'Href',
|
||||
'iri_to_uri', 'uri_to_iri'],
|
||||
'werkzeug.formparser': ['parse_form_data'],
|
||||
'werkzeug.utils': ['escape', 'environ_property',
|
||||
'append_slash_redirect', 'redirect',
|
||||
'cached_property', 'import_string',
|
||||
'dump_cookie', 'parse_cookie', 'unescape',
|
||||
'format_string', 'find_modules', 'header_property',
|
||||
'html', 'xhtml', 'HTMLBuilder',
|
||||
'validate_arguments', 'ArgumentValidationError',
|
||||
'bind_arguments', 'secure_filename'],
|
||||
'werkzeug.wsgi': ['get_current_url', 'get_host', 'pop_path_info',
|
||||
'peek_path_info', 'SharedDataMiddleware',
|
||||
'DispatcherMiddleware', 'ClosingIterator',
|
||||
'FileWrapper', 'make_line_iter', 'LimitedStream',
|
||||
'responder', 'wrap_file', 'extract_path_info'],
|
||||
'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers',
|
||||
'EnvironHeaders', 'ImmutableList',
|
||||
'ImmutableDict', 'ImmutableMultiDict',
|
||||
'TypeConversionDict', 'ImmutableTypeConversionDict',
|
||||
'Accept', 'MIMEAccept', 'CharsetAccept',
|
||||
'LanguageAccept', 'RequestCacheControl',
|
||||
'ResponseCacheControl', 'ETags', 'HeaderSet',
|
||||
'WWWAuthenticate', 'Authorization',
|
||||
'FileMultiDict', 'CallbackDict', 'FileStorage',
|
||||
'OrderedMultiDict', 'ImmutableOrderedMultiDict'],
|
||||
'werkzeug.useragents': ['UserAgent'],
|
||||
'werkzeug.http': ['parse_etags', 'parse_date', 'http_date',
|
||||
'cookie_date', 'parse_cache_control_header',
|
||||
'is_resource_modified', 'parse_accept_header',
|
||||
'parse_set_header', 'quote_etag', 'unquote_etag',
|
||||
'generate_etag', 'dump_header',
|
||||
'parse_list_header', 'parse_dict_header',
|
||||
'parse_authorization_header',
|
||||
'parse_www_authenticate_header',
|
||||
'remove_entity_headers', 'is_entity_header',
|
||||
'remove_hop_by_hop_headers', 'parse_options_header',
|
||||
'dump_options_header', 'is_hop_by_hop_header',
|
||||
'unquote_header_value',
|
||||
'quote_header_value', 'HTTP_STATUS_CODES'],
|
||||
'werkzeug.wrappers': ['BaseResponse', 'BaseRequest', 'Request',
|
||||
'Response', 'AcceptMixin', 'ETagRequestMixin',
|
||||
'ETagResponseMixin', 'ResponseStreamMixin',
|
||||
'CommonResponseDescriptorsMixin',
|
||||
'UserAgentMixin', 'AuthorizationMixin',
|
||||
'WWWAuthenticateMixin',
|
||||
'CommonRequestDescriptorsMixin'],
|
||||
'werkzeug.security': ['generate_password_hash', 'check_password_hash'],
|
||||
# the undocumented easteregg ;-)
|
||||
'werkzeug._internal': ['_easteregg']
|
||||
}
|
||||
|
||||
# modules that should be imported when accessed as attributes of werkzeug
|
||||
attribute_modules = frozenset(['exceptions', 'routing', 'script'])
|
||||
|
||||
|
||||
object_origins = {}
|
||||
for module, items in iteritems(all_by_module):
|
||||
for item in items:
|
||||
object_origins[item] = module
|
||||
|
||||
|
||||
class module(ModuleType):
|
||||
"""Automatically import objects from the modules."""
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in object_origins:
|
||||
module = __import__(object_origins[name], None, None, [name])
|
||||
for extra_name in all_by_module[module.__name__]:
|
||||
setattr(self, extra_name, getattr(module, extra_name))
|
||||
return getattr(module, name)
|
||||
elif name in attribute_modules:
|
||||
__import__('werkzeug.' + name)
|
||||
return ModuleType.__getattribute__(self, name)
|
||||
|
||||
def __dir__(self):
|
||||
"""Just show what we want to show."""
|
||||
result = list(new_module.__all__)
|
||||
result.extend(('__file__', '__path__', '__doc__', '__all__',
|
||||
'__docformat__', '__name__', '__path__',
|
||||
'__package__', '__version__'))
|
||||
return result
|
||||
|
||||
# keep a reference to this module so that it's not garbage collected
|
||||
old_module = sys.modules['werkzeug']
|
||||
|
||||
|
||||
# setup the new module and patch it into the dict of loaded modules
|
||||
new_module = sys.modules['werkzeug'] = module('werkzeug')
|
||||
new_module.__dict__.update({
|
||||
'__file__': __file__,
|
||||
'__package__': 'werkzeug',
|
||||
'__path__': __path__,
|
||||
'__doc__': __doc__,
|
||||
'__version__': __version__,
|
||||
'__all__': tuple(object_origins) + tuple(attribute_modules),
|
||||
'__docformat__': 'restructuredtext en'
|
||||
})
|
||||
|
||||
|
||||
# Due to bootstrapping issues we need to import exceptions here.
|
||||
# Don't ask :-(
|
||||
__import__('werkzeug.exceptions')
|
||||
202
Linux_x86_64/lib/python2.7/site-packages/werkzeug/_compat.py
Normal file
|
|
@ -0,0 +1,202 @@
|
|||
import sys
|
||||
import operator
|
||||
import functools
|
||||
try:
|
||||
import builtins
|
||||
except ImportError:
|
||||
import __builtin__ as builtins
|
||||
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
|
||||
_identity = lambda x: x
|
||||
|
||||
if PY2:
|
||||
unichr = unichr
|
||||
text_type = unicode
|
||||
string_types = (str, unicode)
|
||||
integer_types = (int, long)
|
||||
int_to_byte = chr
|
||||
|
||||
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
|
||||
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
|
||||
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
|
||||
|
||||
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
|
||||
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
|
||||
|
||||
iter_bytes = lambda x: iter(x)
|
||||
|
||||
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
|
||||
|
||||
def fix_tuple_repr(obj):
|
||||
def __repr__(self):
|
||||
cls = self.__class__
|
||||
return '%s(%s)' % (cls.__name__, ', '.join(
|
||||
'%s=%r' % (field, self[index])
|
||||
for index, field in enumerate(cls._fields)
|
||||
))
|
||||
obj.__repr__ = __repr__
|
||||
return obj
|
||||
|
||||
def implements_iterator(cls):
|
||||
cls.next = cls.__next__
|
||||
del cls.__next__
|
||||
return cls
|
||||
|
||||
def implements_to_string(cls):
|
||||
cls.__unicode__ = cls.__str__
|
||||
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
|
||||
return cls
|
||||
|
||||
def native_string_result(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
return func(*args, **kwargs).encode('utf-8')
|
||||
return functools.update_wrapper(wrapper, func)
|
||||
|
||||
def implements_bool(cls):
|
||||
cls.__nonzero__ = cls.__bool__
|
||||
del cls.__bool__
|
||||
return cls
|
||||
|
||||
from itertools import imap, izip, ifilter
|
||||
range_type = xrange
|
||||
|
||||
from StringIO import StringIO
|
||||
from cStringIO import StringIO as BytesIO
|
||||
NativeStringIO = BytesIO
|
||||
|
||||
def make_literal_wrapper(reference):
|
||||
return lambda x: x
|
||||
|
||||
def normalize_string_tuple(tup):
|
||||
"""Normalizes a string tuple to a common type. Following Python 2
|
||||
rules, upgrades to unicode are implicit.
|
||||
"""
|
||||
if any(isinstance(x, text_type) for x in tup):
|
||||
return tuple(to_unicode(x) for x in tup)
|
||||
return tup
|
||||
|
||||
def try_coerce_native(s):
|
||||
"""Try to coerce a unicode string to native if possible. Otherwise,
|
||||
leave it as unicode.
|
||||
"""
|
||||
try:
|
||||
return str(s)
|
||||
except UnicodeError:
|
||||
return s
|
||||
|
||||
wsgi_get_bytes = _identity
|
||||
|
||||
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
|
||||
return s.decode(charset, errors)
|
||||
|
||||
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
|
||||
if isinstance(s, bytes):
|
||||
return s
|
||||
return s.encode(charset, errors)
|
||||
|
||||
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
|
||||
if x is None:
|
||||
return None
|
||||
if isinstance(x, (bytes, bytearray, buffer)):
|
||||
return bytes(x)
|
||||
if isinstance(x, unicode):
|
||||
return x.encode(charset, errors)
|
||||
raise TypeError('Expected bytes')
|
||||
|
||||
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
|
||||
if x is None or isinstance(x, str):
|
||||
return x
|
||||
return x.encode(charset, errors)
|
||||
|
||||
else:
|
||||
unichr = chr
|
||||
text_type = str
|
||||
string_types = (str, )
|
||||
integer_types = (int, )
|
||||
|
||||
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
|
||||
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
|
||||
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
|
||||
|
||||
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
|
||||
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
|
||||
|
||||
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
|
||||
|
||||
def iter_bytes(b):
|
||||
return map(int_to_byte, b)
|
||||
|
||||
def reraise(tp, value, tb=None):
|
||||
if value.__traceback__ is not tb:
|
||||
raise value.with_traceback(tb)
|
||||
raise value
|
||||
|
||||
fix_tuple_repr = _identity
|
||||
implements_iterator = _identity
|
||||
implements_to_string = _identity
|
||||
implements_bool = _identity
|
||||
native_string_result = _identity
|
||||
imap = map
|
||||
izip = zip
|
||||
ifilter = filter
|
||||
range_type = range
|
||||
|
||||
from io import StringIO, BytesIO
|
||||
NativeStringIO = StringIO
|
||||
|
||||
def make_literal_wrapper(reference):
|
||||
if isinstance(reference, text_type):
|
||||
return lambda x: x
|
||||
return lambda x: x.encode('latin1')
|
||||
|
||||
def normalize_string_tuple(tup):
|
||||
"""Ensures that all types in the tuple are either strings
|
||||
or bytes.
|
||||
"""
|
||||
tupiter = iter(tup)
|
||||
is_text = isinstance(next(tupiter, None), text_type)
|
||||
for arg in tupiter:
|
||||
if isinstance(arg, text_type) != is_text:
|
||||
raise TypeError('Cannot mix str and bytes arguments (got %s)'
|
||||
% repr(tup))
|
||||
return tup
|
||||
|
||||
try_coerce_native = _identity
|
||||
|
||||
def wsgi_get_bytes(s):
|
||||
return s.encode('latin1')
|
||||
|
||||
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
|
||||
return s.encode('latin1').decode(charset, errors)
|
||||
|
||||
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
|
||||
if isinstance(s, bytes):
|
||||
return s.decode('latin1', errors)
|
||||
return s.encode(charset).decode('latin1', errors)
|
||||
|
||||
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
|
||||
if x is None:
|
||||
return None
|
||||
if isinstance(x, (bytes, bytearray, memoryview)):
|
||||
return bytes(x)
|
||||
if isinstance(x, str):
|
||||
return x.encode(charset, errors)
|
||||
raise TypeError('Expected bytes')
|
||||
|
||||
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
|
||||
if x is None or isinstance(x, str):
|
||||
return x
|
||||
return x.decode(charset, errors)
|
||||
|
||||
|
||||
def to_unicode(x, charset=sys.getdefaultencoding(), errors='strict',
|
||||
allow_none_charset=False):
|
||||
if x is None:
|
||||
return None
|
||||
if not isinstance(x, bytes):
|
||||
return text_type(x)
|
||||
if charset is None and allow_none_charset:
|
||||
return x
|
||||
return x.decode(charset, errors)
|
||||
412
Linux_x86_64/lib/python2.7/site-packages/werkzeug/_internal.py
Normal file
|
|
@ -0,0 +1,412 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug._internal
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module provides internally used helpers and constants.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
import string
|
||||
import inspect
|
||||
from weakref import WeakKeyDictionary
|
||||
from datetime import datetime, date
|
||||
from itertools import chain
|
||||
|
||||
from werkzeug._compat import iter_bytes, text_type, BytesIO, int_to_byte, \
|
||||
range_type, to_native
|
||||
|
||||
|
||||
_logger = None
|
||||
_empty_stream = BytesIO()
|
||||
_signature_cache = WeakKeyDictionary()
|
||||
_epoch_ord = date(1970, 1, 1).toordinal()
|
||||
_cookie_params = set((b'expires', b'path', b'comment',
|
||||
b'max-age', b'secure', b'httponly',
|
||||
b'version'))
|
||||
_legal_cookie_chars = (string.ascii_letters +
|
||||
string.digits +
|
||||
u"!#$%&'*+-.^_`|~:").encode('ascii')
|
||||
|
||||
_cookie_quoting_map = {
|
||||
b',' : b'\\054',
|
||||
b';' : b'\\073',
|
||||
b'"' : b'\\"',
|
||||
b'\\' : b'\\\\',
|
||||
}
|
||||
for _i in chain(range_type(32), range_type(127, 256)):
|
||||
_cookie_quoting_map[int_to_byte(_i)] = ('\\%03o' % _i).encode('latin1')
|
||||
|
||||
|
||||
_octal_re = re.compile(b'\\\\[0-3][0-7][0-7]')
|
||||
_quote_re = re.compile(b'[\\\\].')
|
||||
_legal_cookie_chars_re = b'[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]'
|
||||
_cookie_re = re.compile(b"""(?x)
|
||||
(?P<key>[^=]+)
|
||||
\s*=\s*
|
||||
(?P<val>
|
||||
"(?:[^\\\\"]|\\\\.)*" |
|
||||
(?:.*?)
|
||||
)
|
||||
\s*;
|
||||
""")
|
||||
|
||||
|
||||
class _Missing(object):
|
||||
|
||||
def __repr__(self):
|
||||
return 'no value'
|
||||
|
||||
def __reduce__(self):
|
||||
return '_missing'
|
||||
|
||||
_missing = _Missing()
|
||||
|
||||
|
||||
def _get_environ(obj):
|
||||
env = getattr(obj, 'environ', obj)
|
||||
assert isinstance(env, dict), \
|
||||
'%r is not a WSGI environment (has to be a dict)' % type(obj).__name__
|
||||
return env
|
||||
|
||||
|
||||
def _log(type, message, *args, **kwargs):
|
||||
"""Log into the internal werkzeug logger."""
|
||||
global _logger
|
||||
if _logger is None:
|
||||
import logging
|
||||
_logger = logging.getLogger('werkzeug')
|
||||
# Only set up a default log handler if the
|
||||
# end-user application didn't set anything up.
|
||||
if not logging.root.handlers and _logger.level == logging.NOTSET:
|
||||
_logger.setLevel(logging.INFO)
|
||||
handler = logging.StreamHandler()
|
||||
_logger.addHandler(handler)
|
||||
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
|
||||
|
||||
|
||||
def _parse_signature(func):
|
||||
"""Return a signature object for the function."""
|
||||
if hasattr(func, 'im_func'):
|
||||
func = func.im_func
|
||||
|
||||
# if we have a cached validator for this function, return it
|
||||
parse = _signature_cache.get(func)
|
||||
if parse is not None:
|
||||
return parse
|
||||
|
||||
# inspect the function signature and collect all the information
|
||||
positional, vararg_var, kwarg_var, defaults = inspect.getargspec(func)
|
||||
defaults = defaults or ()
|
||||
arg_count = len(positional)
|
||||
arguments = []
|
||||
for idx, name in enumerate(positional):
|
||||
if isinstance(name, list):
|
||||
raise TypeError('cannot parse functions that unpack tuples '
|
||||
'in the function signature')
|
||||
try:
|
||||
default = defaults[idx - arg_count]
|
||||
except IndexError:
|
||||
param = (name, False, None)
|
||||
else:
|
||||
param = (name, True, default)
|
||||
arguments.append(param)
|
||||
arguments = tuple(arguments)
|
||||
|
||||
def parse(args, kwargs):
|
||||
new_args = []
|
||||
missing = []
|
||||
extra = {}
|
||||
|
||||
# consume as many arguments as positional as possible
|
||||
for idx, (name, has_default, default) in enumerate(arguments):
|
||||
try:
|
||||
new_args.append(args[idx])
|
||||
except IndexError:
|
||||
try:
|
||||
new_args.append(kwargs.pop(name))
|
||||
except KeyError:
|
||||
if has_default:
|
||||
new_args.append(default)
|
||||
else:
|
||||
missing.append(name)
|
||||
else:
|
||||
if name in kwargs:
|
||||
extra[name] = kwargs.pop(name)
|
||||
|
||||
# handle extra arguments
|
||||
extra_positional = args[arg_count:]
|
||||
if vararg_var is not None:
|
||||
new_args.extend(extra_positional)
|
||||
extra_positional = ()
|
||||
if kwargs and not kwarg_var is not None:
|
||||
extra.update(kwargs)
|
||||
kwargs = {}
|
||||
|
||||
return new_args, kwargs, missing, extra, extra_positional, \
|
||||
arguments, vararg_var, kwarg_var
|
||||
_signature_cache[func] = parse
|
||||
return parse
|
||||
|
||||
|
||||
def _date_to_unix(arg):
|
||||
"""Converts a timetuple, integer or datetime object into the seconds from
|
||||
epoch in utc.
|
||||
"""
|
||||
if isinstance(arg, datetime):
|
||||
arg = arg.utctimetuple()
|
||||
elif isinstance(arg, (int, long, float)):
|
||||
return int(arg)
|
||||
year, month, day, hour, minute, second = arg[:6]
|
||||
days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
|
||||
hours = days * 24 + hour
|
||||
minutes = hours * 60 + minute
|
||||
seconds = minutes * 60 + second
|
||||
return seconds
|
||||
|
||||
|
||||
class _DictAccessorProperty(object):
|
||||
"""Baseclass for `environ_property` and `header_property`."""
|
||||
read_only = False
|
||||
|
||||
def __init__(self, name, default=None, load_func=None, dump_func=None,
|
||||
read_only=None, doc=None):
|
||||
self.name = name
|
||||
self.default = default
|
||||
self.load_func = load_func
|
||||
self.dump_func = dump_func
|
||||
if read_only is not None:
|
||||
self.read_only = read_only
|
||||
self.__doc__ = doc
|
||||
|
||||
def __get__(self, obj, type=None):
|
||||
if obj is None:
|
||||
return self
|
||||
storage = self.lookup(obj)
|
||||
if self.name not in storage:
|
||||
return self.default
|
||||
rv = storage[self.name]
|
||||
if self.load_func is not None:
|
||||
try:
|
||||
rv = self.load_func(rv)
|
||||
except (ValueError, TypeError):
|
||||
rv = self.default
|
||||
return rv
|
||||
|
||||
def __set__(self, obj, value):
|
||||
if self.read_only:
|
||||
raise AttributeError('read only property')
|
||||
if self.dump_func is not None:
|
||||
value = self.dump_func(value)
|
||||
self.lookup(obj)[self.name] = value
|
||||
|
||||
def __delete__(self, obj):
|
||||
if self.read_only:
|
||||
raise AttributeError('read only property')
|
||||
self.lookup(obj).pop(self.name, None)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s>' % (
|
||||
self.__class__.__name__,
|
||||
self.name
|
||||
)
|
||||
|
||||
|
||||
def _cookie_quote(b):
|
||||
buf = bytearray()
|
||||
all_legal = True
|
||||
_lookup = _cookie_quoting_map.get
|
||||
_push = buf.extend
|
||||
|
||||
for char in iter_bytes(b):
|
||||
if char not in _legal_cookie_chars:
|
||||
all_legal = False
|
||||
char = _lookup(char, char)
|
||||
_push(char)
|
||||
|
||||
if all_legal:
|
||||
return bytes(buf)
|
||||
return bytes(b'"' + buf + b'"')
|
||||
|
||||
|
||||
def _cookie_unquote(b):
|
||||
if len(b) < 2:
|
||||
return b
|
||||
if b[:1] != b'"' or b[-1:] != b'"':
|
||||
return b
|
||||
|
||||
b = b[1:-1]
|
||||
|
||||
i = 0
|
||||
n = len(b)
|
||||
rv = bytearray()
|
||||
_push = rv.extend
|
||||
|
||||
while 0 <= i < n:
|
||||
o_match = _octal_re.search(b, i)
|
||||
q_match = _quote_re.search(b, i)
|
||||
if not o_match and not q_match:
|
||||
rv.extend(b[i:])
|
||||
break
|
||||
j = k = -1
|
||||
if o_match:
|
||||
j = o_match.start(0)
|
||||
if q_match:
|
||||
k = q_match.start(0)
|
||||
if q_match and (not o_match or k < j):
|
||||
_push(b[i:k])
|
||||
_push(b[k + 1:k + 2])
|
||||
i = k + 2
|
||||
else:
|
||||
_push(b[i:j])
|
||||
rv.append(int(b[j + 1:j + 4], 8))
|
||||
i = j + 4
|
||||
|
||||
return bytes(rv)
|
||||
|
||||
|
||||
def _cookie_parse_impl(b):
|
||||
"""Lowlevel cookie parsing facility that operates on bytes."""
|
||||
i = 0
|
||||
n = len(b)
|
||||
|
||||
while i < n:
|
||||
match = _cookie_re.search(b + b';', i)
|
||||
if not match:
|
||||
break
|
||||
|
||||
key = match.group('key').strip()
|
||||
value = match.group('val')
|
||||
i = match.end(0)
|
||||
|
||||
# Ignore parameters. We have no interest in them.
|
||||
if key.lower() not in _cookie_params:
|
||||
yield _cookie_unquote(key), _cookie_unquote(value)
|
||||
|
||||
|
||||
def _encode_idna(domain):
|
||||
# If we're given bytes, make sure they fit into ASCII
|
||||
if not isinstance(domain, text_type):
|
||||
domain.decode('ascii')
|
||||
return domain
|
||||
|
||||
# Otherwise check if it's already ascii, then return
|
||||
try:
|
||||
return domain.encode('ascii')
|
||||
except UnicodeError:
|
||||
pass
|
||||
|
||||
# Otherwise encode each part separately
|
||||
parts = domain.split('.')
|
||||
for idx, part in enumerate(parts):
|
||||
parts[idx] = part.encode('idna')
|
||||
return b'.'.join(parts)
|
||||
|
||||
|
||||
def _decode_idna(domain):
|
||||
# If the input is a string try to encode it to ascii to
|
||||
# do the idna decoding. if that fails because of an
|
||||
# unicode error, then we already have a decoded idna domain
|
||||
if isinstance(domain, text_type):
|
||||
try:
|
||||
domain = domain.encode('ascii')
|
||||
except UnicodeError:
|
||||
return domain
|
||||
|
||||
# Decode each part separately. If a part fails, try to
|
||||
# decode it with ascii and silently ignore errors. This makes
|
||||
# most sense because the idna codec does not have error handling
|
||||
parts = domain.split(b'.')
|
||||
for idx, part in enumerate(parts):
|
||||
try:
|
||||
parts[idx] = part.decode('idna')
|
||||
except UnicodeError:
|
||||
parts[idx] = part.decode('ascii', 'ignore')
|
||||
|
||||
return '.'.join(parts)
|
||||
|
||||
|
||||
def _make_cookie_domain(domain):
|
||||
if domain is None:
|
||||
return None
|
||||
domain = _encode_idna(domain)
|
||||
if b':' in domain:
|
||||
domain = domain.split(b':', 1)[0]
|
||||
if b'.' in domain:
|
||||
return domain
|
||||
raise ValueError(
|
||||
'Setting \'domain\' for a cookie on a server running localy (ex: '
|
||||
'localhost) is not supportted by complying browsers. You should '
|
||||
'have something like: \'127.0.0.1 localhost dev.localhost\' on '
|
||||
'your hosts file and then point your server to run on '
|
||||
'\'dev.localhost\' and also set \'domain\' for \'dev.localhost\''
|
||||
)
|
||||
|
||||
|
||||
def _easteregg(app=None):
|
||||
"""Like the name says. But who knows how it works?"""
|
||||
def bzzzzzzz(gyver):
|
||||
import base64
|
||||
import zlib
|
||||
return zlib.decompress(base64.b64decode(gyver)).decode('ascii')
|
||||
gyver = u'\n'.join([x + (77 - len(x)) * u' ' for x in bzzzzzzz(b'''
|
||||
eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
|
||||
9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
|
||||
4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
|
||||
jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
|
||||
q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
|
||||
jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
|
||||
8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
|
||||
v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
|
||||
XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
|
||||
LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
|
||||
iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
|
||||
tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
|
||||
1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
|
||||
GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
|
||||
Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
|
||||
QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
|
||||
8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
|
||||
jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
|
||||
DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
|
||||
MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
|
||||
GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
|
||||
RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
|
||||
Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
|
||||
NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
|
||||
pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
|
||||
sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
|
||||
p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
|
||||
krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
|
||||
nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
|
||||
mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
|
||||
7f2zLkGNv8b191cD/3vs9Q833z8t''').splitlines()])
|
||||
def easteregged(environ, start_response):
|
||||
def injecting_start_response(status, headers, exc_info=None):
|
||||
headers.append(('X-Powered-By', 'Werkzeug'))
|
||||
return start_response(status, headers, exc_info)
|
||||
if app is not None and environ.get('QUERY_STRING') != 'macgybarchakku':
|
||||
return app(environ, injecting_start_response)
|
||||
injecting_start_response('200 OK', [('Content-Type', 'text/html')])
|
||||
return [(u'''
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>About Werkzeug</title>
|
||||
<style type="text/css">
|
||||
body { font: 15px Georgia, serif; text-align: center; }
|
||||
a { color: #333; text-decoration: none; }
|
||||
h1 { font-size: 30px; margin: 20px 0 10px 0; }
|
||||
p { margin: 0 0 30px 0; }
|
||||
pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
|
||||
<p>the Swiss Army knife of Python web development.</p>
|
||||
<pre>%s\n\n\n</pre>
|
||||
</body>
|
||||
</html>''' % gyver).encode('latin1')]
|
||||
return easteregged
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Contains user-submitted code that other users may find useful, but which
|
||||
is not part of the Werkzeug core. Anyone can write code for inclusion in
|
||||
the `contrib` package. All modules in this package are distributed as an
|
||||
add-on library and thus are not part of Werkzeug itself.
|
||||
|
||||
This file itself is mostly for informational purposes and to tell the
|
||||
Python interpreter that `contrib` is a package.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
|
@ -0,0 +1,347 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib.atom
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module provides a class called :class:`AtomFeed` which can be
|
||||
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
|
||||
|
||||
Example::
|
||||
|
||||
def atom_feed(request):
|
||||
feed = AtomFeed("My Blog", feed_url=request.url,
|
||||
url=request.host_url,
|
||||
subtitle="My example blog for a feed test.")
|
||||
for post in Post.query.limit(10).all():
|
||||
feed.add(post.title, post.body, content_type='html',
|
||||
author=post.author, url=post.url, id=post.uid,
|
||||
updated=post.last_update, published=post.pub_date)
|
||||
return feed.get_response()
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
from werkzeug.utils import escape
|
||||
from werkzeug.wrappers import BaseResponse
|
||||
from werkzeug._compat import implements_to_string, string_types
|
||||
|
||||
|
||||
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
|
||||
|
||||
|
||||
def _make_text_block(name, content, content_type=None):
|
||||
"""Helper function for the builder that creates an XML text block."""
|
||||
if content_type == 'xhtml':
|
||||
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
|
||||
(name, XHTML_NAMESPACE, content, name)
|
||||
if not content_type:
|
||||
return u'<%s>%s</%s>\n' % (name, escape(content), name)
|
||||
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
|
||||
escape(content), name)
|
||||
|
||||
|
||||
def format_iso8601(obj):
|
||||
"""Format a datetime object for iso8601"""
|
||||
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class AtomFeed(object):
|
||||
"""A helper class that creates Atom feeds.
|
||||
|
||||
:param title: the title of the feed. Required.
|
||||
:param title_type: the type attribute for the title element. One of
|
||||
``'html'``, ``'text'`` or ``'xhtml'``.
|
||||
:param url: the url for the feed (not the url *of* the feed)
|
||||
:param id: a globally unique id for the feed. Must be an URI. If
|
||||
not present the `feed_url` is used, but one of both is
|
||||
required.
|
||||
:param updated: the time the feed was modified the last time. Must
|
||||
be a :class:`datetime.datetime` object. If not
|
||||
present the latest entry's `updated` is used.
|
||||
:param feed_url: the URL to the feed. Should be the URL that was
|
||||
requested.
|
||||
:param author: the author of the feed. Must be either a string (the
|
||||
name) or a dict with name (required) and uri or
|
||||
email (both optional). Can be a list of (may be
|
||||
mixed, too) strings and dicts, too, if there are
|
||||
multiple authors. Required if not every entry has an
|
||||
author element.
|
||||
:param icon: an icon for the feed.
|
||||
:param logo: a logo for the feed.
|
||||
:param rights: copyright information for the feed.
|
||||
:param rights_type: the type attribute for the rights element. One of
|
||||
``'html'``, ``'text'`` or ``'xhtml'``. Default is
|
||||
``'text'``.
|
||||
:param subtitle: a short description of the feed.
|
||||
:param subtitle_type: the type attribute for the subtitle element.
|
||||
One of ``'text'``, ``'html'``, ``'text'``
|
||||
or ``'xhtml'``. Default is ``'text'``.
|
||||
:param links: additional links. Must be a list of dictionaries with
|
||||
href (required) and rel, type, hreflang, title, length
|
||||
(all optional)
|
||||
:param generator: the software that generated this feed. This must be
|
||||
a tuple in the form ``(name, url, version)``. If
|
||||
you don't want to specify one of them, set the item
|
||||
to `None`.
|
||||
:param entries: a list with the entries for the feed. Entries can also
|
||||
be added later with :meth:`add`.
|
||||
|
||||
For more information on the elements see
|
||||
http://www.atomenabled.org/developers/syndication/
|
||||
|
||||
Everywhere where a list is demanded, any iterable can be used.
|
||||
"""
|
||||
|
||||
default_generator = ('Werkzeug', None, None)
|
||||
|
||||
def __init__(self, title=None, entries=None, **kwargs):
|
||||
self.title = title
|
||||
self.title_type = kwargs.get('title_type', 'text')
|
||||
self.url = kwargs.get('url')
|
||||
self.feed_url = kwargs.get('feed_url', self.url)
|
||||
self.id = kwargs.get('id', self.feed_url)
|
||||
self.updated = kwargs.get('updated')
|
||||
self.author = kwargs.get('author', ())
|
||||
self.icon = kwargs.get('icon')
|
||||
self.logo = kwargs.get('logo')
|
||||
self.rights = kwargs.get('rights')
|
||||
self.rights_type = kwargs.get('rights_type')
|
||||
self.subtitle = kwargs.get('subtitle')
|
||||
self.subtitle_type = kwargs.get('subtitle_type', 'text')
|
||||
self.generator = kwargs.get('generator')
|
||||
if self.generator is None:
|
||||
self.generator = self.default_generator
|
||||
self.links = kwargs.get('links', [])
|
||||
self.entries = entries and list(entries) or []
|
||||
|
||||
if not hasattr(self.author, '__iter__') \
|
||||
or isinstance(self.author, string_types + (dict,)):
|
||||
self.author = [self.author]
|
||||
for i, author in enumerate(self.author):
|
||||
if not isinstance(author, dict):
|
||||
self.author[i] = {'name': author}
|
||||
|
||||
if not self.title:
|
||||
raise ValueError('title is required')
|
||||
if not self.id:
|
||||
raise ValueError('id is required')
|
||||
for author in self.author:
|
||||
if 'name' not in author:
|
||||
raise TypeError('author must contain at least a name')
|
||||
|
||||
def add(self, *args, **kwargs):
|
||||
"""Add a new entry to the feed. This function can either be called
|
||||
with a :class:`FeedEntry` or some keyword and positional arguments
|
||||
that are forwarded to the :class:`FeedEntry` constructor.
|
||||
"""
|
||||
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
|
||||
self.entries.append(args[0])
|
||||
else:
|
||||
kwargs['feed_url'] = self.feed_url
|
||||
self.entries.append(FeedEntry(*args, **kwargs))
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %r (%d entries)>' % (
|
||||
self.__class__.__name__,
|
||||
self.title,
|
||||
len(self.entries)
|
||||
)
|
||||
|
||||
def generate(self):
|
||||
"""Return a generator that yields pieces of XML."""
|
||||
# atom demands either an author element in every entry or a global one
|
||||
if not self.author:
|
||||
if False in map(lambda e: bool(e.author), self.entries):
|
||||
self.author = ({'name': 'Unknown author'},)
|
||||
|
||||
if not self.updated:
|
||||
dates = sorted([entry.updated for entry in self.entries])
|
||||
self.updated = dates and dates[-1] or datetime.utcnow()
|
||||
|
||||
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
|
||||
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
|
||||
yield ' ' + _make_text_block('title', self.title, self.title_type)
|
||||
yield u' <id>%s</id>\n' % escape(self.id)
|
||||
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
|
||||
if self.url:
|
||||
yield u' <link href="%s" />\n' % escape(self.url)
|
||||
if self.feed_url:
|
||||
yield u' <link href="%s" rel="self" />\n' % \
|
||||
escape(self.feed_url)
|
||||
for link in self.links:
|
||||
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
|
||||
(k, escape(link[k])) for k in link)
|
||||
for author in self.author:
|
||||
yield u' <author>\n'
|
||||
yield u' <name>%s</name>\n' % escape(author['name'])
|
||||
if 'uri' in author:
|
||||
yield u' <uri>%s</uri>\n' % escape(author['uri'])
|
||||
if 'email' in author:
|
||||
yield ' <email>%s</email>\n' % escape(author['email'])
|
||||
yield ' </author>\n'
|
||||
if self.subtitle:
|
||||
yield ' ' + _make_text_block('subtitle', self.subtitle,
|
||||
self.subtitle_type)
|
||||
if self.icon:
|
||||
yield u' <icon>%s</icon>\n' % escape(self.icon)
|
||||
if self.logo:
|
||||
yield u' <logo>%s</logo>\n' % escape(self.logo)
|
||||
if self.rights:
|
||||
yield ' ' + _make_text_block('rights', self.rights,
|
||||
self.rights_type)
|
||||
generator_name, generator_url, generator_version = self.generator
|
||||
if generator_name or generator_url or generator_version:
|
||||
tmp = [u' <generator']
|
||||
if generator_url:
|
||||
tmp.append(u' uri="%s"' % escape(generator_url))
|
||||
if generator_version:
|
||||
tmp.append(u' version="%s"' % escape(generator_version))
|
||||
tmp.append(u'>%s</generator>\n' % escape(generator_name))
|
||||
yield u''.join(tmp)
|
||||
for entry in self.entries:
|
||||
for line in entry.generate():
|
||||
yield u' ' + line
|
||||
yield u'</feed>\n'
|
||||
|
||||
def to_string(self):
|
||||
"""Convert the feed into a string."""
|
||||
return u''.join(self.generate())
|
||||
|
||||
def get_response(self):
|
||||
"""Return a response object for the feed."""
|
||||
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
"""Use the class as WSGI response object."""
|
||||
return self.get_response()(environ, start_response)
|
||||
|
||||
def __str__(self):
|
||||
return self.to_string()
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class FeedEntry(object):
|
||||
"""Represents a single entry in a feed.
|
||||
|
||||
:param title: the title of the entry. Required.
|
||||
:param title_type: the type attribute for the title element. One of
|
||||
``'html'``, ``'text'`` or ``'xhtml'``.
|
||||
:param content: the content of the entry.
|
||||
:param content_type: the type attribute for the content element. One
|
||||
of ``'html'``, ``'text'`` or ``'xhtml'``.
|
||||
:param summary: a summary of the entry's content.
|
||||
:param summary_type: the type attribute for the summary element. One
|
||||
of ``'html'``, ``'text'`` or ``'xhtml'``.
|
||||
:param url: the url for the entry.
|
||||
:param id: a globally unique id for the entry. Must be an URI. If
|
||||
not present the URL is used, but one of both is required.
|
||||
:param updated: the time the entry was modified the last time. Must
|
||||
be a :class:`datetime.datetime` object. Required.
|
||||
:param author: the author of the entry. Must be either a string (the
|
||||
name) or a dict with name (required) and uri or
|
||||
email (both optional). Can be a list of (may be
|
||||
mixed, too) strings and dicts, too, if there are
|
||||
multiple authors. Required if the feed does not have an
|
||||
author element.
|
||||
:param published: the time the entry was initially published. Must
|
||||
be a :class:`datetime.datetime` object.
|
||||
:param rights: copyright information for the entry.
|
||||
:param rights_type: the type attribute for the rights element. One of
|
||||
``'html'``, ``'text'`` or ``'xhtml'``. Default is
|
||||
``'text'``.
|
||||
:param links: additional links. Must be a list of dictionaries with
|
||||
href (required) and rel, type, hreflang, title, length
|
||||
(all optional)
|
||||
:param categories: categories for the entry. Must be a list of dictionaries
|
||||
with term (required), scheme and label (all optional)
|
||||
:param xml_base: The xml base (url) for this feed item. If not provided
|
||||
it will default to the item url.
|
||||
|
||||
For more information on the elements see
|
||||
http://www.atomenabled.org/developers/syndication/
|
||||
|
||||
Everywhere where a list is demanded, any iterable can be used.
|
||||
"""
|
||||
|
||||
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
|
||||
self.title = title
|
||||
self.title_type = kwargs.get('title_type', 'text')
|
||||
self.content = content
|
||||
self.content_type = kwargs.get('content_type', 'html')
|
||||
self.url = kwargs.get('url')
|
||||
self.id = kwargs.get('id', self.url)
|
||||
self.updated = kwargs.get('updated')
|
||||
self.summary = kwargs.get('summary')
|
||||
self.summary_type = kwargs.get('summary_type', 'html')
|
||||
self.author = kwargs.get('author', ())
|
||||
self.published = kwargs.get('published')
|
||||
self.rights = kwargs.get('rights')
|
||||
self.links = kwargs.get('links', [])
|
||||
self.categories = kwargs.get('categories', [])
|
||||
self.xml_base = kwargs.get('xml_base', feed_url)
|
||||
|
||||
if not hasattr(self.author, '__iter__') \
|
||||
or isinstance(self.author, string_types + (dict,)):
|
||||
self.author = [self.author]
|
||||
for i, author in enumerate(self.author):
|
||||
if not isinstance(author, dict):
|
||||
self.author[i] = {'name': author}
|
||||
|
||||
if not self.title:
|
||||
raise ValueError('title is required')
|
||||
if not self.id:
|
||||
raise ValueError('id is required')
|
||||
if not self.updated:
|
||||
raise ValueError('updated is required')
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %r>' % (
|
||||
self.__class__.__name__,
|
||||
self.title
|
||||
)
|
||||
|
||||
def generate(self):
|
||||
"""Yields pieces of ATOM XML."""
|
||||
base = ''
|
||||
if self.xml_base:
|
||||
base = ' xml:base="%s"' % escape(self.xml_base)
|
||||
yield u'<entry%s>\n' % base
|
||||
yield u' ' + _make_text_block('title', self.title, self.title_type)
|
||||
yield u' <id>%s</id>\n' % escape(self.id)
|
||||
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
|
||||
if self.published:
|
||||
yield u' <published>%s</published>\n' % \
|
||||
format_iso8601(self.published)
|
||||
if self.url:
|
||||
yield u' <link href="%s" />\n' % escape(self.url)
|
||||
for author in self.author:
|
||||
yield u' <author>\n'
|
||||
yield u' <name>%s</name>\n' % escape(author['name'])
|
||||
if 'uri' in author:
|
||||
yield u' <uri>%s</uri>\n' % escape(author['uri'])
|
||||
if 'email' in author:
|
||||
yield u' <email>%s</email>\n' % escape(author['email'])
|
||||
yield u' </author>\n'
|
||||
for link in self.links:
|
||||
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
|
||||
(k, escape(link[k])) for k in link)
|
||||
for category in self.categories:
|
||||
yield u' <category %s/>\n' % ''.join('%s="%s" ' % \
|
||||
(k, escape(category[k])) for k in category)
|
||||
if self.summary:
|
||||
yield u' ' + _make_text_block('summary', self.summary,
|
||||
self.summary_type)
|
||||
if self.content:
|
||||
yield u' ' + _make_text_block('content', self.content,
|
||||
self.content_type)
|
||||
yield u'</entry>\n'
|
||||
|
||||
def to_string(self):
|
||||
"""Convert the feed item into a unicode object."""
|
||||
return u''.join(self.generate())
|
||||
|
||||
def __str__(self):
|
||||
return self.to_string()
|
||||
|
|
@ -0,0 +1,679 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib.cache
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The main problem with dynamic Web sites is, well, they're dynamic. Each
|
||||
time a user requests a page, the webserver executes a lot of code, queries
|
||||
the database, renders templates until the visitor gets the page he sees.
|
||||
|
||||
This is a lot more expensive than just loading a file from the file system
|
||||
and sending it to the visitor.
|
||||
|
||||
For most Web applications, this overhead isn't a big deal but once it
|
||||
becomes, you will be glad to have a cache system in place.
|
||||
|
||||
How Caching Works
|
||||
=================
|
||||
|
||||
Caching is pretty simple. Basically you have a cache object lurking around
|
||||
somewhere that is connected to a remote cache or the file system or
|
||||
something else. When the request comes in you check if the current page
|
||||
is already in the cache and if so, you're returning it from the cache.
|
||||
Otherwise you generate the page and put it into the cache. (Or a fragment
|
||||
of the page, you don't have to cache the full thing)
|
||||
|
||||
Here is a simple example of how to cache a sidebar for a template::
|
||||
|
||||
def get_sidebar(user):
|
||||
identifier = 'sidebar_for/user%d' % user.id
|
||||
value = cache.get(identifier)
|
||||
if value is not None:
|
||||
return value
|
||||
value = generate_sidebar_for(user=user)
|
||||
cache.set(identifier, value, timeout=60 * 5)
|
||||
return value
|
||||
|
||||
Creating a Cache Object
|
||||
=======================
|
||||
|
||||
To create a cache object you just import the cache system of your choice
|
||||
from the cache module and instantiate it. Then you can start working
|
||||
with that object:
|
||||
|
||||
>>> from werkzeug.contrib.cache import SimpleCache
|
||||
>>> c = SimpleCache()
|
||||
>>> c.set("foo", "value")
|
||||
>>> c.get("foo")
|
||||
'value'
|
||||
>>> c.get("missing") is None
|
||||
True
|
||||
|
||||
Please keep in mind that you have to create the cache and put it somewhere
|
||||
you have access to it (either as a module global you can import or you just
|
||||
put it into your WSGI application).
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
from hashlib import md5
|
||||
from time import time
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
from werkzeug._compat import iteritems, string_types, text_type, \
|
||||
integer_types, to_bytes
|
||||
from werkzeug.posixemulation import rename
|
||||
|
||||
|
||||
def _items(mappingorseq):
|
||||
"""Wrapper for efficient iteration over mappings represented by dicts
|
||||
or sequences::
|
||||
|
||||
>>> for k, v in _items((i, i*i) for i in xrange(5)):
|
||||
... assert k*k == v
|
||||
|
||||
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
|
||||
... assert k*k == v
|
||||
|
||||
"""
|
||||
if hasattr(mappingorseq, "iteritems"):
|
||||
return mappingorseq.iteritems()
|
||||
elif hasattr(mappingorseq, "items"):
|
||||
return mappingorseq.items()
|
||||
return mappingorseq
|
||||
|
||||
|
||||
class BaseCache(object):
|
||||
"""Baseclass for the cache systems. All the cache systems implement this
|
||||
API or a superset of it.
|
||||
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`set`.
|
||||
"""
|
||||
|
||||
def __init__(self, default_timeout=300):
|
||||
self.default_timeout = default_timeout
|
||||
|
||||
def get(self, key):
|
||||
"""Looks up key in the cache and returns the value for it.
|
||||
If the key does not exist `None` is returned instead.
|
||||
|
||||
:param key: the key to be looked up.
|
||||
"""
|
||||
return None
|
||||
|
||||
def delete(self, key):
|
||||
"""Deletes `key` from the cache. If it does not exist in the cache
|
||||
nothing happens.
|
||||
|
||||
:param key: the key to delete.
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_many(self, *keys):
|
||||
"""Returns a list of values for the given keys.
|
||||
For each key a item in the list is created. Example::
|
||||
|
||||
foo, bar = cache.get_many("foo", "bar")
|
||||
|
||||
If a key can't be looked up `None` is returned for that key
|
||||
instead.
|
||||
|
||||
:param keys: The function accepts multiple keys as positional
|
||||
arguments.
|
||||
"""
|
||||
return map(self.get, keys)
|
||||
|
||||
def get_dict(self, *keys):
|
||||
"""Works like :meth:`get_many` but returns a dict::
|
||||
|
||||
d = cache.get_dict("foo", "bar")
|
||||
foo = d["foo"]
|
||||
bar = d["bar"]
|
||||
|
||||
:param keys: The function accepts multiple keys as positional
|
||||
arguments.
|
||||
"""
|
||||
return dict(zip(keys, self.get_many(*keys)))
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
"""Adds a new key/value to the cache (overwrites value, if key already
|
||||
exists in the cache).
|
||||
|
||||
:param key: the key to set
|
||||
:param value: the value for the key
|
||||
:param timeout: the cache timeout for the key (if not specified,
|
||||
it uses the default timeout).
|
||||
"""
|
||||
pass
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
"""Works like :meth:`set` but does not overwrite the values of already
|
||||
existing keys.
|
||||
|
||||
:param key: the key to set
|
||||
:param value: the value for the key
|
||||
:param timeout: the cache timeout for the key or the default
|
||||
timeout if not specified.
|
||||
"""
|
||||
pass
|
||||
|
||||
def set_many(self, mapping, timeout=None):
|
||||
"""Sets multiple keys and values from a mapping.
|
||||
|
||||
:param mapping: a mapping with the keys/values to set.
|
||||
:param timeout: the cache timeout for the key (if not specified,
|
||||
it uses the default timeout).
|
||||
"""
|
||||
for key, value in _items(mapping):
|
||||
self.set(key, value, timeout)
|
||||
|
||||
def delete_many(self, *keys):
|
||||
"""Deletes multiple keys at once.
|
||||
|
||||
:param keys: The function accepts multiple keys as positional
|
||||
arguments.
|
||||
"""
|
||||
for key in keys:
|
||||
self.delete(key)
|
||||
|
||||
def clear(self):
|
||||
"""Clears the cache. Keep in mind that not all caches support
|
||||
completely clearing the cache.
|
||||
"""
|
||||
pass
|
||||
|
||||
def inc(self, key, delta=1):
|
||||
"""Increments the value of a key by `delta`. If the key does
|
||||
not yet exist it is initialized with `delta`.
|
||||
|
||||
For supporting caches this is an atomic operation.
|
||||
|
||||
:param key: the key to increment.
|
||||
:param delta: the delta to add.
|
||||
"""
|
||||
self.set(key, (self.get(key) or 0) + delta)
|
||||
|
||||
def dec(self, key, delta=1):
|
||||
"""Decrements the value of a key by `delta`. If the key does
|
||||
not yet exist it is initialized with `-delta`.
|
||||
|
||||
For supporting caches this is an atomic operation.
|
||||
|
||||
:param key: the key to increment.
|
||||
:param delta: the delta to subtract.
|
||||
"""
|
||||
self.set(key, (self.get(key) or 0) - delta)
|
||||
|
||||
|
||||
class NullCache(BaseCache):
|
||||
"""A cache that doesn't cache. This can be useful for unit testing.
|
||||
|
||||
:param default_timeout: a dummy parameter that is ignored but exists
|
||||
for API compatibility with other caches.
|
||||
"""
|
||||
|
||||
|
||||
class SimpleCache(BaseCache):
|
||||
"""Simple memory cache for single process environments. This class exists
|
||||
mainly for the development server and is not 100% thread safe. It tries
|
||||
to use as many atomic operations as possible and no locks for simplicity
|
||||
but it could happen under heavy load that keys are added multiple times.
|
||||
|
||||
:param threshold: the maximum number of items the cache stores before
|
||||
it starts deleting some.
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`~BaseCache.set`.
|
||||
"""
|
||||
|
||||
def __init__(self, threshold=500, default_timeout=300):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
self._cache = {}
|
||||
self.clear = self._cache.clear
|
||||
self._threshold = threshold
|
||||
|
||||
def _prune(self):
|
||||
if len(self._cache) > self._threshold:
|
||||
now = time()
|
||||
for idx, (key, (expires, _)) in enumerate(self._cache.items()):
|
||||
if expires <= now or idx % 3 == 0:
|
||||
self._cache.pop(key, None)
|
||||
|
||||
def get(self, key):
|
||||
expires, value = self._cache.get(key, (0, None))
|
||||
if expires > time():
|
||||
return pickle.loads(value)
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.default_timeout
|
||||
self._prune()
|
||||
self._cache[key] = (time() + timeout, pickle.dumps(value,
|
||||
pickle.HIGHEST_PROTOCOL))
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.default_timeout
|
||||
if len(self._cache) > self._threshold:
|
||||
self._prune()
|
||||
item = (time() + timeout, pickle.dumps(value,
|
||||
pickle.HIGHEST_PROTOCOL))
|
||||
self._cache.setdefault(key, item)
|
||||
|
||||
def delete(self, key):
|
||||
self._cache.pop(key, None)
|
||||
|
||||
|
||||
_test_memcached_key = re.compile(br'[^\x00-\x21\xff]{1,250}$').match
|
||||
|
||||
class MemcachedCache(BaseCache):
|
||||
"""A cache that uses memcached as backend.
|
||||
|
||||
The first argument can either be an object that resembles the API of a
|
||||
:class:`memcache.Client` or a tuple/list of server addresses. In the
|
||||
event that a tuple/list is passed, Werkzeug tries to import the best
|
||||
available memcache library.
|
||||
|
||||
Implementation notes: This cache backend works around some limitations in
|
||||
memcached to simplify the interface. For example unicode keys are encoded
|
||||
to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return
|
||||
the keys in the same format as passed. Furthermore all get methods
|
||||
silently ignore key errors to not cause problems when untrusted user data
|
||||
is passed to the get methods which is often the case in web applications.
|
||||
|
||||
:param servers: a list or tuple of server addresses or alternatively
|
||||
a :class:`memcache.Client` or a compatible client.
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`~BaseCache.set`.
|
||||
:param key_prefix: a prefix that is added before all keys. This makes it
|
||||
possible to use the same memcached server for different
|
||||
applications. Keep in mind that
|
||||
:meth:`~BaseCache.clear` will also clear keys with a
|
||||
different prefix.
|
||||
"""
|
||||
|
||||
def __init__(self, servers=None, default_timeout=300, key_prefix=None):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
if servers is None or isinstance(servers, (list, tuple)):
|
||||
if servers is None:
|
||||
servers = ['127.0.0.1:11211']
|
||||
self._client = self.import_preferred_memcache_lib(servers)
|
||||
if self._client is None:
|
||||
raise RuntimeError('no memcache module found')
|
||||
else:
|
||||
# NOTE: servers is actually an already initialized memcache
|
||||
# client.
|
||||
self._client = servers
|
||||
|
||||
self.key_prefix = to_bytes(key_prefix)
|
||||
|
||||
def get(self, key):
|
||||
if isinstance(key, text_type):
|
||||
key = key.encode('utf-8')
|
||||
if self.key_prefix:
|
||||
key = self.key_prefix + key
|
||||
# memcached doesn't support keys longer than that. Because often
|
||||
# checks for so long keys can occour because it's tested from user
|
||||
# submitted data etc we fail silently for getting.
|
||||
if _test_memcached_key(key):
|
||||
return self._client.get(key)
|
||||
|
||||
def get_dict(self, *keys):
|
||||
key_mapping = {}
|
||||
have_encoded_keys = False
|
||||
for key in keys:
|
||||
if isinstance(key, unicode):
|
||||
encoded_key = key.encode('utf-8')
|
||||
have_encoded_keys = True
|
||||
else:
|
||||
encoded_key = key
|
||||
if self.key_prefix:
|
||||
encoded_key = self.key_prefix + encoded_key
|
||||
if _test_memcached_key(key):
|
||||
key_mapping[encoded_key] = key
|
||||
d = rv = self._client.get_multi(key_mapping.keys())
|
||||
if have_encoded_keys or self.key_prefix:
|
||||
rv = {}
|
||||
for key, value in iteritems(d):
|
||||
rv[key_mapping[key]] = value
|
||||
if len(rv) < len(keys):
|
||||
for key in keys:
|
||||
if key not in rv:
|
||||
rv[key] = None
|
||||
return rv
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.default_timeout
|
||||
if isinstance(key, text_type):
|
||||
key = key.encode('utf-8')
|
||||
if self.key_prefix:
|
||||
key = self.key_prefix + key
|
||||
self._client.add(key, value, timeout)
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.default_timeout
|
||||
if isinstance(key, text_type):
|
||||
key = key.encode('utf-8')
|
||||
if self.key_prefix:
|
||||
key = self.key_prefix + key
|
||||
self._client.set(key, value, timeout)
|
||||
|
||||
def get_many(self, *keys):
|
||||
d = self.get_dict(*keys)
|
||||
return [d[key] for key in keys]
|
||||
|
||||
def set_many(self, mapping, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.default_timeout
|
||||
new_mapping = {}
|
||||
for key, value in _items(mapping):
|
||||
if isinstance(key, text_type):
|
||||
key = key.encode('utf-8')
|
||||
if self.key_prefix:
|
||||
key = self.key_prefix + key
|
||||
new_mapping[key] = value
|
||||
self._client.set_multi(new_mapping, timeout)
|
||||
|
||||
def delete(self, key):
|
||||
if isinstance(key, unicode):
|
||||
key = key.encode('utf-8')
|
||||
if self.key_prefix:
|
||||
key = self.key_prefix + key
|
||||
if _test_memcached_key(key):
|
||||
self._client.delete(key)
|
||||
|
||||
def delete_many(self, *keys):
|
||||
new_keys = []
|
||||
for key in keys:
|
||||
if isinstance(key, unicode):
|
||||
key = key.encode('utf-8')
|
||||
if self.key_prefix:
|
||||
key = self.key_prefix + key
|
||||
if _test_memcached_key(key):
|
||||
new_keys.append(key)
|
||||
self._client.delete_multi(new_keys)
|
||||
|
||||
def clear(self):
|
||||
self._client.flush_all()
|
||||
|
||||
def inc(self, key, delta=1):
|
||||
if isinstance(key, unicode):
|
||||
key = key.encode('utf-8')
|
||||
if self.key_prefix:
|
||||
key = self.key_prefix + key
|
||||
self._client.incr(key, delta)
|
||||
|
||||
def dec(self, key, delta=1):
|
||||
if isinstance(key, unicode):
|
||||
key = key.encode('utf-8')
|
||||
if self.key_prefix:
|
||||
key = self.key_prefix + key
|
||||
self._client.decr(key, delta)
|
||||
|
||||
def import_preferred_memcache_lib(self, servers):
|
||||
"""Returns an initialized memcache client. Used by the constructor."""
|
||||
try:
|
||||
import pylibmc
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
return pylibmc.Client(servers)
|
||||
|
||||
try:
|
||||
from google.appengine.api import memcache
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
return memcache.Client()
|
||||
|
||||
try:
|
||||
import memcache
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
return memcache.Client(servers)
|
||||
|
||||
|
||||
# backwards compatibility
|
||||
GAEMemcachedCache = MemcachedCache
|
||||
|
||||
|
||||
class RedisCache(BaseCache):
|
||||
"""Uses the Redis key-value store as a cache backend.
|
||||
|
||||
The first argument can be either a string denoting address of the Redis
|
||||
server or an object resembling an instance of a redis.Redis class.
|
||||
|
||||
Note: Python Redis API already takes care of encoding unicode strings on
|
||||
the fly.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
|
||||
.. versionadded:: 0.8
|
||||
`key_prefix` was added.
|
||||
|
||||
.. versionchanged:: 0.8
|
||||
This cache backend now properly serializes objects.
|
||||
|
||||
.. versionchanged:: 0.8.3
|
||||
This cache backend now supports password authentication.
|
||||
|
||||
:param host: address of the Redis server or an object which API is
|
||||
compatible with the official Python Redis client (redis-py).
|
||||
:param port: port number on which Redis server listens for connections.
|
||||
:param password: password authentication for the Redis server.
|
||||
:param db: db (zero-based numeric index) on Redis Server to connect.
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`~BaseCache.set`.
|
||||
:param key_prefix: A prefix that should be added to all keys.
|
||||
"""
|
||||
|
||||
def __init__(self, host='localhost', port=6379, password=None,
|
||||
db=0, default_timeout=300, key_prefix=None):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
if isinstance(host, string_types):
|
||||
try:
|
||||
import redis
|
||||
except ImportError:
|
||||
raise RuntimeError('no redis module found')
|
||||
self._client = redis.Redis(host=host, port=port, password=password, db=db)
|
||||
else:
|
||||
self._client = host
|
||||
self.key_prefix = key_prefix or ''
|
||||
|
||||
def dump_object(self, value):
|
||||
"""Dumps an object into a string for redis. By default it serializes
|
||||
integers as regular string and pickle dumps everything else.
|
||||
"""
|
||||
t = type(value)
|
||||
if t in integer_types:
|
||||
return str(value).encode('ascii')
|
||||
return b'!' + pickle.dumps(value)
|
||||
|
||||
def load_object(self, value):
|
||||
"""The reversal of :meth:`dump_object`. This might be callde with
|
||||
None.
|
||||
"""
|
||||
if value is None:
|
||||
return None
|
||||
if value.startswith(b'!'):
|
||||
return pickle.loads(value[1:])
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError:
|
||||
# before 0.8 we did not have serialization. Still support that.
|
||||
return value
|
||||
|
||||
def get(self, key):
|
||||
return self.load_object(self._client.get(self.key_prefix + key))
|
||||
|
||||
def get_many(self, *keys):
|
||||
if self.key_prefix:
|
||||
keys = [self.key_prefix + key for key in keys]
|
||||
return [self.load_object(x) for x in self._client.mget(keys)]
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.default_timeout
|
||||
dump = self.dump_object(value)
|
||||
self._client.setex(self.key_prefix + key, dump, timeout)
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.default_timeout
|
||||
dump = self.dump_object(value)
|
||||
added = self._client.setnx(self.key_prefix + key, dump)
|
||||
if added:
|
||||
self._client.expire(self.key_prefix + key, timeout)
|
||||
|
||||
def set_many(self, mapping, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.default_timeout
|
||||
pipe = self._client.pipeline()
|
||||
for key, value in _items(mapping):
|
||||
dump = self.dump_object(value)
|
||||
pipe.setex(self.key_prefix + key, dump, timeout)
|
||||
pipe.execute()
|
||||
|
||||
def delete(self, key):
|
||||
self._client.delete(self.key_prefix + key)
|
||||
|
||||
def delete_many(self, *keys):
|
||||
if not keys:
|
||||
return
|
||||
if self.key_prefix:
|
||||
keys = [self.key_prefix + key for key in keys]
|
||||
self._client.delete(*keys)
|
||||
|
||||
def clear(self):
|
||||
if self.key_prefix:
|
||||
keys = self._client.keys(self.key_prefix + '*')
|
||||
if keys:
|
||||
self._client.delete(*keys)
|
||||
else:
|
||||
self._client.flushdb()
|
||||
|
||||
def inc(self, key, delta=1):
|
||||
return self._client.incr(self.key_prefix + key, delta)
|
||||
|
||||
def dec(self, key, delta=1):
|
||||
return self._client.decr(self.key_prefix + key, delta)
|
||||
|
||||
|
||||
class FileSystemCache(BaseCache):
|
||||
"""A cache that stores the items on the file system. This cache depends
|
||||
on being the only user of the `cache_dir`. Make absolutely sure that
|
||||
nobody but this cache stores files there or otherwise the cache will
|
||||
randomly delete files therein.
|
||||
|
||||
:param cache_dir: the directory where cache files are stored.
|
||||
:param threshold: the maximum number of items the cache stores before
|
||||
it starts deleting some.
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`~BaseCache.set`.
|
||||
:param mode: the file mode wanted for the cache files, default 0600
|
||||
"""
|
||||
|
||||
#: used for temporary files by the FileSystemCache
|
||||
_fs_transaction_suffix = '.__wz_cache'
|
||||
|
||||
def __init__(self, cache_dir, threshold=500, default_timeout=300, mode=0o600):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
self._path = cache_dir
|
||||
self._threshold = threshold
|
||||
self._mode = mode
|
||||
if not os.path.exists(self._path):
|
||||
os.makedirs(self._path)
|
||||
|
||||
def _list_dir(self):
|
||||
"""return a list of (fully qualified) cache filenames
|
||||
"""
|
||||
return [os.path.join(self._path, fn) for fn in os.listdir(self._path)
|
||||
if not fn.endswith(self._fs_transaction_suffix)]
|
||||
|
||||
def _prune(self):
|
||||
entries = self._list_dir()
|
||||
if len(entries) > self._threshold:
|
||||
now = time()
|
||||
for idx, fname in enumerate(entries):
|
||||
remove = False
|
||||
f = None
|
||||
try:
|
||||
try:
|
||||
f = open(fname, 'rb')
|
||||
expires = pickle.load(f)
|
||||
remove = expires <= now or idx % 3 == 0
|
||||
finally:
|
||||
if f is not None:
|
||||
f.close()
|
||||
except Exception:
|
||||
pass
|
||||
if remove:
|
||||
try:
|
||||
os.remove(fname)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
def clear(self):
|
||||
for fname in self._list_dir():
|
||||
try:
|
||||
os.remove(fname)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
def _get_filename(self, key):
|
||||
if isinstance(key, text_type):
|
||||
key = key.encode('utf-8') #XXX unicode review
|
||||
hash = md5(key).hexdigest()
|
||||
return os.path.join(self._path, hash)
|
||||
|
||||
def get(self, key):
|
||||
filename = self._get_filename(key)
|
||||
try:
|
||||
f = open(filename, 'rb')
|
||||
try:
|
||||
if pickle.load(f) >= time():
|
||||
return pickle.load(f)
|
||||
finally:
|
||||
f.close()
|
||||
os.remove(filename)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
filename = self._get_filename(key)
|
||||
if not os.path.exists(filename):
|
||||
self.set(key, value, timeout)
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
if timeout is None:
|
||||
timeout = self.default_timeout
|
||||
filename = self._get_filename(key)
|
||||
self._prune()
|
||||
try:
|
||||
fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix,
|
||||
dir=self._path)
|
||||
f = os.fdopen(fd, 'wb')
|
||||
try:
|
||||
pickle.dump(int(time() + timeout), f, 1)
|
||||
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
|
||||
finally:
|
||||
f.close()
|
||||
rename(tmp, filename)
|
||||
os.chmod(filename, self._mode)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
def delete(self, key):
|
||||
try:
|
||||
os.remove(self._get_filename(key))
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
|
@ -0,0 +1,244 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib.fixers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
This module includes various helpers that fix bugs in web servers. They may
|
||||
be necessary for some versions of a buggy web server but not others. We try
|
||||
to stay updated with the status of the bugs as good as possible but you have
|
||||
to make sure whether they fix the problem you encounter.
|
||||
|
||||
If you notice bugs in webservers not fixed in this module consider
|
||||
contributing a patch.
|
||||
|
||||
:copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
try:
|
||||
from urllib import unquote
|
||||
except ImportError:
|
||||
from urllib.parse import unquote
|
||||
|
||||
from werkzeug.http import parse_options_header, parse_cache_control_header, \
|
||||
parse_set_header
|
||||
from werkzeug.useragents import UserAgent
|
||||
from werkzeug.datastructures import Headers, ResponseCacheControl
|
||||
|
||||
class CGIRootFix(object):
|
||||
"""Wrap the application in this middleware if you are using FastCGI or CGI
|
||||
and you have problems with your app root being set to the cgi script's path
|
||||
instead of the path users are going to visit
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
Added `app_root` parameter and renamed from `LighttpdCGIRootFix`.
|
||||
|
||||
:param app: the WSGI application
|
||||
:param app_root: Defaulting to ``'/'``, you can set this to something else
|
||||
if your app is mounted somewhere else.
|
||||
"""
|
||||
|
||||
def __init__(self, app, app_root='/'):
|
||||
self.app = app
|
||||
self.app_root = app_root
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
# only set PATH_INFO for older versions of Lighty or if no
|
||||
# server software is provided. That's because the test was
|
||||
# added in newer Werkzeug versions and we don't want to break
|
||||
# people's code if they are using this fixer in a test that
|
||||
# does not set the SERVER_SOFTWARE key.
|
||||
if 'SERVER_SOFTWARE' not in environ or \
|
||||
environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28':
|
||||
environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
|
||||
environ.get('PATH_INFO', '')
|
||||
environ['SCRIPT_NAME'] = self.app_root.strip('/')
|
||||
return self.app(environ, start_response)
|
||||
|
||||
# backwards compatibility
|
||||
LighttpdCGIRootFix = CGIRootFix
|
||||
|
||||
|
||||
class PathInfoFromRequestUriFix(object):
|
||||
"""On windows environment variables are limited to the system charset
|
||||
which makes it impossible to store the `PATH_INFO` variable in the
|
||||
environment without loss of information on some systems.
|
||||
|
||||
This is for example a problem for CGI scripts on a Windows Apache.
|
||||
|
||||
This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
|
||||
`REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
|
||||
fix can only be applied if the webserver supports either of these
|
||||
variables.
|
||||
|
||||
:param app: the WSGI application
|
||||
"""
|
||||
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
|
||||
if key not in environ:
|
||||
continue
|
||||
request_uri = unquote(environ[key])
|
||||
script_name = unquote(environ.get('SCRIPT_NAME', ''))
|
||||
if request_uri.startswith(script_name):
|
||||
environ['PATH_INFO'] = request_uri[len(script_name):] \
|
||||
.split('?', 1)[0]
|
||||
break
|
||||
return self.app(environ, start_response)
|
||||
|
||||
|
||||
class ProxyFix(object):
|
||||
"""This middleware can be applied to add HTTP proxy support to an
|
||||
application that was not designed with HTTP proxies in mind. It
|
||||
sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers.
|
||||
|
||||
If you have more than one proxy server in front of your app, set
|
||||
`num_proxies` accordingly.
|
||||
|
||||
Do not use this middleware in non-proxy setups for security reasons.
|
||||
|
||||
The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
|
||||
the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
|
||||
`werkzeug.proxy_fix.orig_http_host`.
|
||||
|
||||
:param app: the WSGI application
|
||||
:param num_proxies: the number of proxy servers in front of the app.
|
||||
"""
|
||||
|
||||
def __init__(self, app, num_proxies=1):
|
||||
self.app = app
|
||||
self.num_proxies = num_proxies
|
||||
|
||||
def get_remote_addr(self, forwarded_for):
|
||||
"""Selects the new remote addr from the given list of ips in
|
||||
X-Forwarded-For. By default it picks the one that the `num_proxies`
|
||||
proxy server provides. Before 0.9 it would always pick the first.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
if len(forwarded_for) >= self.num_proxies:
|
||||
return forwarded_for[-1 * self.num_proxies]
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
getter = environ.get
|
||||
forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '')
|
||||
forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
|
||||
forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
|
||||
environ.update({
|
||||
'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'),
|
||||
'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'),
|
||||
'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST')
|
||||
})
|
||||
forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x]
|
||||
remote_addr = self.get_remote_addr(forwarded_for)
|
||||
if remote_addr is not None:
|
||||
environ['REMOTE_ADDR'] = remote_addr
|
||||
if forwarded_host:
|
||||
environ['HTTP_HOST'] = forwarded_host
|
||||
if forwarded_proto:
|
||||
environ['wsgi.url_scheme'] = forwarded_proto
|
||||
return self.app(environ, start_response)
|
||||
|
||||
|
||||
class HeaderRewriterFix(object):
|
||||
"""This middleware can remove response headers and add others. This
|
||||
is for example useful to remove the `Date` header from responses if you
|
||||
are using a server that adds that header, no matter if it's present or
|
||||
not or to add `X-Powered-By` headers::
|
||||
|
||||
app = HeaderRewriterFix(app, remove_headers=['Date'],
|
||||
add_headers=[('X-Powered-By', 'WSGI')])
|
||||
|
||||
:param app: the WSGI application
|
||||
:param remove_headers: a sequence of header keys that should be
|
||||
removed.
|
||||
:param add_headers: a sequence of ``(key, value)`` tuples that should
|
||||
be added.
|
||||
"""
|
||||
|
||||
def __init__(self, app, remove_headers=None, add_headers=None):
|
||||
self.app = app
|
||||
self.remove_headers = set(x.lower() for x in (remove_headers or ()))
|
||||
self.add_headers = list(add_headers or ())
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
def rewriting_start_response(status, headers, exc_info=None):
|
||||
new_headers = []
|
||||
for key, value in headers:
|
||||
if key.lower() not in self.remove_headers:
|
||||
new_headers.append((key, value))
|
||||
new_headers += self.add_headers
|
||||
return start_response(status, new_headers, exc_info)
|
||||
return self.app(environ, rewriting_start_response)
|
||||
|
||||
|
||||
class InternetExplorerFix(object):
|
||||
"""This middleware fixes a couple of bugs with Microsoft Internet
|
||||
Explorer. Currently the following fixes are applied:
|
||||
|
||||
- removing of `Vary` headers for unsupported mimetypes which
|
||||
causes troubles with caching. Can be disabled by passing
|
||||
``fix_vary=False`` to the constructor.
|
||||
see: http://support.microsoft.com/kb/824847/en-us
|
||||
|
||||
- removes offending headers to work around caching bugs in
|
||||
Internet Explorer if `Content-Disposition` is set. Can be
|
||||
disabled by passing ``fix_attach=False`` to the constructor.
|
||||
|
||||
If it does not detect affected Internet Explorer versions it won't touch
|
||||
the request / response.
|
||||
"""
|
||||
|
||||
# This code was inspired by Django fixers for the same bugs. The
|
||||
# fix_vary and fix_attach fixers were originally implemented in Django
|
||||
# by Michael Axiak and is available as part of the Django project:
|
||||
# http://code.djangoproject.com/ticket/4148
|
||||
|
||||
def __init__(self, app, fix_vary=True, fix_attach=True):
|
||||
self.app = app
|
||||
self.fix_vary = fix_vary
|
||||
self.fix_attach = fix_attach
|
||||
|
||||
def fix_headers(self, environ, headers, status=None):
|
||||
if self.fix_vary:
|
||||
header = headers.get('content-type', '')
|
||||
mimetype, options = parse_options_header(header)
|
||||
if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
|
||||
headers.pop('vary', None)
|
||||
|
||||
if self.fix_attach and 'content-disposition' in headers:
|
||||
pragma = parse_set_header(headers.get('pragma', ''))
|
||||
pragma.discard('no-cache')
|
||||
header = pragma.to_header()
|
||||
if not header:
|
||||
headers.pop('pragma', '')
|
||||
else:
|
||||
headers['Pragma'] = header
|
||||
header = headers.get('cache-control', '')
|
||||
if header:
|
||||
cc = parse_cache_control_header(header,
|
||||
cls=ResponseCacheControl)
|
||||
cc.no_cache = None
|
||||
cc.no_store = False
|
||||
header = cc.to_header()
|
||||
if not header:
|
||||
headers.pop('cache-control', '')
|
||||
else:
|
||||
headers['Cache-Control'] = header
|
||||
|
||||
def run_fixed(self, environ, start_response):
|
||||
def fixing_start_response(status, headers, exc_info=None):
|
||||
headers = Headers(headers)
|
||||
self.fix_headers(environ, headers, status)
|
||||
return start_response(status, headers.to_wsgi_list(), exc_info)
|
||||
return self.app(environ, fixing_start_response)
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
ua = UserAgent(environ)
|
||||
if ua.browser != 'msie':
|
||||
return self.app(environ, start_response)
|
||||
return self.run_fixed(environ, start_response)
|
||||
|
|
@ -0,0 +1,346 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
r"""
|
||||
werkzeug.contrib.iterio
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module implements a :class:`IterIO` that converts an iterator into
|
||||
a stream object and the other way round. Converting streams into
|
||||
iterators requires the `greenlet`_ module.
|
||||
|
||||
To convert an iterator into a stream all you have to do is to pass it
|
||||
directly to the :class:`IterIO` constructor. In this example we pass it
|
||||
a newly created generator::
|
||||
|
||||
def foo():
|
||||
yield "something\n"
|
||||
yield "otherthings"
|
||||
stream = IterIO(foo())
|
||||
print stream.read() # read the whole iterator
|
||||
|
||||
The other way round works a bit different because we have to ensure that
|
||||
the code execution doesn't take place yet. An :class:`IterIO` call with a
|
||||
callable as first argument does two things. The function itself is passed
|
||||
an :class:`IterIO` stream it can feed. The object returned by the
|
||||
:class:`IterIO` constructor on the other hand is not an stream object but
|
||||
an iterator::
|
||||
|
||||
def foo(stream):
|
||||
stream.write("some")
|
||||
stream.write("thing")
|
||||
stream.flush()
|
||||
stream.write("otherthing")
|
||||
iterator = IterIO(foo)
|
||||
print iterator.next() # prints something
|
||||
print iterator.next() # prints otherthing
|
||||
iterator.next() # raises StopIteration
|
||||
|
||||
.. _greenlet: http://codespeak.net/py/dist/greenlet.html
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
try:
|
||||
import greenlet
|
||||
except ImportError:
|
||||
greenlet = None
|
||||
|
||||
from werkzeug._compat import implements_iterator
|
||||
|
||||
|
||||
def _mixed_join(iterable, sentinel):
|
||||
"""concatenate any string type in an intelligent way."""
|
||||
iterator = iter(iterable)
|
||||
first_item = next(iterator, sentinel)
|
||||
if isinstance(first_item, bytes):
|
||||
return first_item + b''.join(iterator)
|
||||
return first_item + u''.join(iterator)
|
||||
|
||||
|
||||
def _newline(reference_string):
|
||||
if isinstance(reference_string, bytes):
|
||||
return b'\n'
|
||||
return u'\n'
|
||||
|
||||
|
||||
@implements_iterator
|
||||
class IterIO(object):
|
||||
"""Instances of this object implement an interface compatible with the
|
||||
standard Python :class:`file` object. Streams are either read-only or
|
||||
write-only depending on how the object is created.
|
||||
|
||||
If the first argument is an iterable a file like object is returned that
|
||||
returns the contents of the iterable. In case the iterable is empty
|
||||
read operations will return the sentinel value.
|
||||
|
||||
If the first argument is a callable then the stream object will be
|
||||
created and passed to that function. The caller itself however will
|
||||
not receive a stream but an iterable. The function will be be executed
|
||||
step by step as something iterates over the returned iterable. Each
|
||||
call to :meth:`flush` will create an item for the iterable. If
|
||||
:meth:`flush` is called without any writes in-between the sentinel
|
||||
value will be yielded.
|
||||
|
||||
Note for Python 3: due to the incompatible interface of bytes and
|
||||
streams you should set the sentinel value explicitly to an empty
|
||||
bytestring (``b''``) if you are expecting to deal with bytes as
|
||||
otherwise the end of the stream is marked with the wrong sentinel
|
||||
value.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
`sentinel` parameter was added.
|
||||
"""
|
||||
|
||||
def __new__(cls, obj, sentinel=''):
|
||||
try:
|
||||
iterator = iter(obj)
|
||||
except TypeError:
|
||||
return IterI(obj, sentinel)
|
||||
return IterO(iterator, sentinel)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def tell(self):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
return self.pos
|
||||
|
||||
def isatty(self):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
return False
|
||||
|
||||
def seek(self, pos, mode=0):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
raise IOError(9, 'Bad file descriptor')
|
||||
|
||||
def truncate(self, size=None):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
raise IOError(9, 'Bad file descriptor')
|
||||
|
||||
def write(self, s):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
raise IOError(9, 'Bad file descriptor')
|
||||
|
||||
def writelines(self, list):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
raise IOError(9, 'Bad file descriptor')
|
||||
|
||||
def read(self, n=-1):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
raise IOError(9, 'Bad file descriptor')
|
||||
|
||||
def readlines(self, sizehint=0):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
raise IOError(9, 'Bad file descriptor')
|
||||
|
||||
def readline(self, length=None):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
raise IOError(9, 'Bad file descriptor')
|
||||
|
||||
def flush(self):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
raise IOError(9, 'Bad file descriptor')
|
||||
|
||||
def __next__(self):
|
||||
if self.closed:
|
||||
raise StopIteration()
|
||||
line = self.readline()
|
||||
if not line:
|
||||
raise StopIteration()
|
||||
return line
|
||||
|
||||
|
||||
class IterI(IterIO):
|
||||
"""Convert an stream into an iterator."""
|
||||
|
||||
def __new__(cls, func, sentinel=''):
|
||||
if greenlet is None:
|
||||
raise RuntimeError('IterI requires greenlet support')
|
||||
stream = object.__new__(cls)
|
||||
stream._parent = greenlet.getcurrent()
|
||||
stream._buffer = []
|
||||
stream.closed = False
|
||||
stream.sentinel = sentinel
|
||||
stream.pos = 0
|
||||
|
||||
def run():
|
||||
func(stream)
|
||||
stream.close()
|
||||
|
||||
g = greenlet.greenlet(run, stream._parent)
|
||||
while 1:
|
||||
rv = g.switch()
|
||||
if not rv:
|
||||
return
|
||||
yield rv[0]
|
||||
|
||||
def close(self):
|
||||
if not self.closed:
|
||||
self.closed = True
|
||||
self._flush_impl()
|
||||
|
||||
def write(self, s):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
if s:
|
||||
self.pos += len(s)
|
||||
self._buffer.append(s)
|
||||
|
||||
def writelines(self, list):
|
||||
for item in list:
|
||||
self.write(item)
|
||||
|
||||
def flush(self):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
self._flush_impl()
|
||||
|
||||
def _flush_impl(self):
|
||||
data = _mixed_join(self._buffer, self.sentinel)
|
||||
self._buffer = []
|
||||
if not data and self.closed:
|
||||
self._parent.switch()
|
||||
else:
|
||||
self._parent.switch((data,))
|
||||
|
||||
|
||||
class IterO(IterIO):
|
||||
"""Iter output. Wrap an iterator and give it a stream like interface."""
|
||||
|
||||
def __new__(cls, gen, sentinel=''):
|
||||
self = object.__new__(cls)
|
||||
self._gen = gen
|
||||
self._buf = None
|
||||
self.sentinel = sentinel
|
||||
self.closed = False
|
||||
self.pos = 0
|
||||
return self
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def _buf_append(self, string):
|
||||
'''Replace string directly without appending to an empty string,
|
||||
avoiding type issues.'''
|
||||
if not self._buf:
|
||||
self._buf = string
|
||||
else:
|
||||
self._buf += string
|
||||
|
||||
def close(self):
|
||||
if not self.closed:
|
||||
self.closed = True
|
||||
if hasattr(self._gen, 'close'):
|
||||
self._gen.close()
|
||||
|
||||
def seek(self, pos, mode=0):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
if mode == 1:
|
||||
pos += self.pos
|
||||
elif mode == 2:
|
||||
self.read()
|
||||
self.pos = min(self.pos, self.pos + pos)
|
||||
return
|
||||
elif mode != 0:
|
||||
raise IOError('Invalid argument')
|
||||
buf = []
|
||||
try:
|
||||
tmp_end_pos = len(self._buf)
|
||||
while pos > tmp_end_pos:
|
||||
item = self._gen.next()
|
||||
tmp_end_pos += len(item)
|
||||
buf.append(item)
|
||||
except StopIteration:
|
||||
pass
|
||||
if buf:
|
||||
self._buf_append(_mixed_join(buf, self.sentinel))
|
||||
self.pos = max(0, pos)
|
||||
|
||||
def read(self, n=-1):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
if n < 0:
|
||||
self._buf_append(_mixed_join(self._gen, self.sentinel))
|
||||
result = self._buf[self.pos:]
|
||||
self.pos += len(result)
|
||||
return result
|
||||
new_pos = self.pos + n
|
||||
buf = []
|
||||
try:
|
||||
tmp_end_pos = 0 if self._buf is None else len(self._buf)
|
||||
while new_pos > tmp_end_pos or (self._buf is None and not buf):
|
||||
item = next(self._gen)
|
||||
tmp_end_pos += len(item)
|
||||
buf.append(item)
|
||||
except StopIteration:
|
||||
pass
|
||||
if buf:
|
||||
self._buf_append(_mixed_join(buf, self.sentinel))
|
||||
|
||||
if self._buf is None:
|
||||
return self.sentinel
|
||||
|
||||
new_pos = max(0, new_pos)
|
||||
try:
|
||||
return self._buf[self.pos:new_pos]
|
||||
finally:
|
||||
self.pos = min(new_pos, len(self._buf))
|
||||
|
||||
def readline(self, length=None):
|
||||
if self.closed:
|
||||
raise ValueError('I/O operation on closed file')
|
||||
|
||||
nl_pos = -1
|
||||
if self._buf:
|
||||
nl_pos = self._buf.find(_newline(self._buf), self.pos)
|
||||
buf = []
|
||||
try:
|
||||
pos = self.pos
|
||||
while nl_pos < 0:
|
||||
item = next(self._gen)
|
||||
local_pos = item.find(_newline(item))
|
||||
buf.append(item)
|
||||
if local_pos >= 0:
|
||||
nl_pos = pos + local_pos
|
||||
break
|
||||
pos += len(item)
|
||||
except StopIteration:
|
||||
pass
|
||||
if buf:
|
||||
self._buf_append(_mixed_join(buf, self.sentinel))
|
||||
|
||||
if self._buf is None:
|
||||
return self.sentinel
|
||||
|
||||
if nl_pos < 0:
|
||||
new_pos = len(self._buf)
|
||||
else:
|
||||
new_pos = nl_pos + 1
|
||||
if length is not None and self.pos + length < new_pos:
|
||||
new_pos = self.pos + length
|
||||
try:
|
||||
return self._buf[self.pos:new_pos]
|
||||
finally:
|
||||
self.pos = min(new_pos, len(self._buf))
|
||||
|
||||
def readlines(self, sizehint=0):
|
||||
total = 0
|
||||
lines = []
|
||||
line = self.readline()
|
||||
while line:
|
||||
lines.append(line)
|
||||
total += len(line)
|
||||
if 0 < sizehint <= total:
|
||||
break
|
||||
line = self.readline()
|
||||
return lines
|
||||
|
|
@ -0,0 +1,262 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib.jsrouting
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Addon module that allows to create a JavaScript function from a map
|
||||
that generates rules.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
try:
|
||||
from simplejson import dumps
|
||||
except ImportError:
|
||||
try:
|
||||
from json import dumps
|
||||
except ImportError:
|
||||
def dumps(*args):
|
||||
raise RuntimeError('simplejson required for jsrouting')
|
||||
|
||||
from inspect import getmro
|
||||
from werkzeug.routing import NumberConverter
|
||||
from werkzeug._compat import iteritems
|
||||
|
||||
|
||||
def render_template(name_parts, rules, converters):
|
||||
result = u''
|
||||
if name_parts:
|
||||
for idx in xrange(0, len(name_parts) - 1):
|
||||
name = u'.'.join(name_parts[:idx + 1])
|
||||
result += u"if (typeof %s === 'undefined') %s = {}\n" % (name, name)
|
||||
result += '%s = ' % '.'.join(name_parts)
|
||||
result += """(function (server_name, script_name, subdomain, url_scheme) {
|
||||
var converters = %(converters)s;
|
||||
var rules = $rules;
|
||||
function in_array(array, value) {
|
||||
if (array.indexOf != undefined) {
|
||||
return array.indexOf(value) != -1;
|
||||
}
|
||||
for (var i = 0; i < array.length; i++) {
|
||||
if (array[i] == value) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function array_diff(array1, array2) {
|
||||
array1 = array1.slice();
|
||||
for (var i = array1.length-1; i >= 0; i--) {
|
||||
if (in_array(array2, array1[i])) {
|
||||
array1.splice(i, 1);
|
||||
}
|
||||
}
|
||||
return array1;
|
||||
}
|
||||
function split_obj(obj) {
|
||||
var names = [];
|
||||
var values = [];
|
||||
for (var name in obj) {
|
||||
if (typeof(obj[name]) != 'function') {
|
||||
names.push(name);
|
||||
values.push(obj[name]);
|
||||
}
|
||||
}
|
||||
return {names: names, values: values, original: obj};
|
||||
}
|
||||
function suitable(rule, args) {
|
||||
var default_args = split_obj(rule.defaults || {});
|
||||
var diff_arg_names = array_diff(rule.arguments, default_args.names);
|
||||
|
||||
for (var i = 0; i < diff_arg_names.length; i++) {
|
||||
if (!in_array(args.names, diff_arg_names[i])) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (array_diff(rule.arguments, args.names).length == 0) {
|
||||
if (rule.defaults == null) {
|
||||
return true;
|
||||
}
|
||||
for (var i = 0; i < default_args.names.length; i++) {
|
||||
var key = default_args.names[i];
|
||||
var value = default_args.values[i];
|
||||
if (value != args.original[key]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
function build(rule, args) {
|
||||
var tmp = [];
|
||||
var processed = rule.arguments.slice();
|
||||
for (var i = 0; i < rule.trace.length; i++) {
|
||||
var part = rule.trace[i];
|
||||
if (part.is_dynamic) {
|
||||
var converter = converters[rule.converters[part.data]];
|
||||
var data = converter(args.original[part.data]);
|
||||
if (data == null) {
|
||||
return null;
|
||||
}
|
||||
tmp.push(data);
|
||||
processed.push(part.name);
|
||||
} else {
|
||||
tmp.push(part.data);
|
||||
}
|
||||
}
|
||||
tmp = tmp.join('');
|
||||
var pipe = tmp.indexOf('|');
|
||||
var subdomain = tmp.substring(0, pipe);
|
||||
var url = tmp.substring(pipe+1);
|
||||
|
||||
var unprocessed = array_diff(args.names, processed);
|
||||
var first_query_var = true;
|
||||
for (var i = 0; i < unprocessed.length; i++) {
|
||||
if (first_query_var) {
|
||||
url += '?';
|
||||
} else {
|
||||
url += '&';
|
||||
}
|
||||
first_query_var = false;
|
||||
url += encodeURIComponent(unprocessed[i]);
|
||||
url += '=';
|
||||
url += encodeURIComponent(args.original[unprocessed[i]]);
|
||||
}
|
||||
return {subdomain: subdomain, path: url};
|
||||
}
|
||||
function lstrip(s, c) {
|
||||
while (s && s.substring(0, 1) == c) {
|
||||
s = s.substring(1);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
function rstrip(s, c) {
|
||||
while (s && s.substring(s.length-1, s.length) == c) {
|
||||
s = s.substring(0, s.length-1);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
return function(endpoint, args, force_external) {
|
||||
args = split_obj(args);
|
||||
var rv = null;
|
||||
for (var i = 0; i < rules.length; i++) {
|
||||
var rule = rules[i];
|
||||
if (rule.endpoint != endpoint) continue;
|
||||
if (suitable(rule, args)) {
|
||||
rv = build(rule, args);
|
||||
if (rv != null) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (rv == null) {
|
||||
return null;
|
||||
}
|
||||
if (!force_external && rv.subdomain == subdomain) {
|
||||
return rstrip(script_name, '/') + '/' + lstrip(rv.path, '/');
|
||||
} else {
|
||||
return url_scheme + '://'
|
||||
+ (rv.subdomain ? rv.subdomain + '.' : '')
|
||||
+ server_name + rstrip(script_name, '/')
|
||||
+ '/' + lstrip(rv.path, '/');
|
||||
}
|
||||
};
|
||||
})""" % {'converters': u', '.join(converters)}
|
||||
return result
|
||||
|
||||
|
||||
def generate_map(map, name='url_map'):
|
||||
"""
|
||||
Generates a JavaScript function containing the rules defined in
|
||||
this map, to be used with a MapAdapter's generate_javascript
|
||||
method. If you don't pass a name the returned JavaScript code is
|
||||
an expression that returns a function. Otherwise it's a standalone
|
||||
script that assigns the function with that name. Dotted names are
|
||||
resolved (so you an use a name like 'obj.url_for')
|
||||
|
||||
In order to use JavaScript generation, simplejson must be installed.
|
||||
|
||||
Note that using this feature will expose the rules
|
||||
defined in your map to users. If your rules contain sensitive
|
||||
information, don't use JavaScript generation!
|
||||
"""
|
||||
from warnings import warn
|
||||
warn(DeprecationWarning('This module is deprecated'))
|
||||
map.update()
|
||||
rules = []
|
||||
converters = []
|
||||
for rule in map.iter_rules():
|
||||
trace = [{
|
||||
'is_dynamic': is_dynamic,
|
||||
'data': data
|
||||
} for is_dynamic, data in rule._trace]
|
||||
rule_converters = {}
|
||||
for key, converter in iteritems(rule._converters):
|
||||
js_func = js_to_url_function(converter)
|
||||
try:
|
||||
index = converters.index(js_func)
|
||||
except ValueError:
|
||||
converters.append(js_func)
|
||||
index = len(converters) - 1
|
||||
rule_converters[key] = index
|
||||
rules.append({
|
||||
u'endpoint': rule.endpoint,
|
||||
u'arguments': list(rule.arguments),
|
||||
u'converters': rule_converters,
|
||||
u'trace': trace,
|
||||
u'defaults': rule.defaults
|
||||
})
|
||||
|
||||
return render_template(name_parts=name and name.split('.') or [],
|
||||
rules=dumps(rules),
|
||||
converters=converters)
|
||||
|
||||
|
||||
def generate_adapter(adapter, name='url_for', map_name='url_map'):
|
||||
"""Generates the url building function for a map."""
|
||||
values = {
|
||||
u'server_name': dumps(adapter.server_name),
|
||||
u'script_name': dumps(adapter.script_name),
|
||||
u'subdomain': dumps(adapter.subdomain),
|
||||
u'url_scheme': dumps(adapter.url_scheme),
|
||||
u'name': name,
|
||||
u'map_name': map_name
|
||||
}
|
||||
return u'''\
|
||||
var %(name)s = %(map_name)s(
|
||||
%(server_name)s,
|
||||
%(script_name)s,
|
||||
%(subdomain)s,
|
||||
%(url_scheme)s
|
||||
);''' % values
|
||||
|
||||
|
||||
def js_to_url_function(converter):
|
||||
"""Get the JavaScript converter function from a rule."""
|
||||
if hasattr(converter, 'js_to_url_function'):
|
||||
data = converter.js_to_url_function()
|
||||
else:
|
||||
for cls in getmro(type(converter)):
|
||||
if cls in js_to_url_functions:
|
||||
data = js_to_url_functions[cls](converter)
|
||||
break
|
||||
else:
|
||||
return 'encodeURIComponent'
|
||||
return '(function(value) { %s })' % data
|
||||
|
||||
|
||||
def NumberConverter_js_to_url(conv):
|
||||
if conv.fixed_digits:
|
||||
return u'''\
|
||||
var result = value.toString();
|
||||
while (result.length < %s)
|
||||
result = '0' + result;
|
||||
return result;''' % conv.fixed_digits
|
||||
return u'return value.toString();'
|
||||
|
||||
|
||||
js_to_url_functions = {
|
||||
NumberConverter: NumberConverter_js_to_url
|
||||
}
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib.limiter
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A middleware that limits incoming data. This works around problems with
|
||||
Trac_ or Django_ because those directly stream into the memory.
|
||||
|
||||
.. _Trac: http://trac.edgewall.org/
|
||||
.. _Django: http://www.djangoproject.com/
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from warnings import warn
|
||||
|
||||
from werkzeug.wsgi import LimitedStream
|
||||
|
||||
|
||||
class StreamLimitMiddleware(object):
|
||||
"""Limits the input stream to a given number of bytes. This is useful if
|
||||
you have a WSGI application that reads form data into memory (django for
|
||||
example) and you don't want users to harm the server by uploading tons of
|
||||
data.
|
||||
|
||||
Default is 10MB
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
Deprecated middleware.
|
||||
"""
|
||||
|
||||
def __init__(self, app, maximum_size=1024 * 1024 * 10):
|
||||
warn(DeprecationWarning('This middleware is deprecated'))
|
||||
self.app = app
|
||||
self.maximum_size = maximum_size
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
limit = min(self.maximum_size, int(environ.get('CONTENT_LENGTH') or 0))
|
||||
environ['wsgi.input'] = LimitedStream(environ['wsgi.input'], limit)
|
||||
return self.app(environ, start_response)
|
||||
|
|
@ -0,0 +1,334 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib.lint
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
This module provides a middleware that performs sanity checks of the WSGI
|
||||
application. It checks that :pep:`333` is properly implemented and warns
|
||||
on some common HTTP errors such as non-empty responses for 304 status
|
||||
codes.
|
||||
|
||||
This module provides a middleware, the :class:`LintMiddleware`. Wrap your
|
||||
application with it and it will warn about common problems with WSGI and
|
||||
HTTP while your application is running.
|
||||
|
||||
It's strongly recommended to use it during development.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from urlparse import urlparse
|
||||
from warnings import warn
|
||||
|
||||
from werkzeug.datastructures import Headers
|
||||
from werkzeug.http import is_entity_header
|
||||
from werkzeug.wsgi import FileWrapper
|
||||
from werkzeug._compat import string_types
|
||||
|
||||
|
||||
class WSGIWarning(Warning):
|
||||
"""Warning class for WSGI warnings."""
|
||||
|
||||
|
||||
class HTTPWarning(Warning):
|
||||
"""Warning class for HTTP warnings."""
|
||||
|
||||
|
||||
def check_string(context, obj, stacklevel=3):
|
||||
if type(obj) is not str:
|
||||
warn(WSGIWarning('%s requires bytestrings, got %s' %
|
||||
(context, obj.__class__.__name__)))
|
||||
|
||||
|
||||
class InputStream(object):
|
||||
|
||||
def __init__(self, stream):
|
||||
self._stream = stream
|
||||
|
||||
def read(self, *args):
|
||||
if len(args) == 0:
|
||||
warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
|
||||
'input stream, thus making calls to '
|
||||
'wsgi.input.read() unsafe. Conforming servers '
|
||||
'may never return from this call.'),
|
||||
stacklevel=2)
|
||||
elif len(args) != 1:
|
||||
warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
|
||||
stacklevel=2)
|
||||
return self._stream.read(*args)
|
||||
|
||||
def readline(self, *args):
|
||||
if len(args) == 0:
|
||||
warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
|
||||
' are unsafe. Use wsgi.input.read() instead.'),
|
||||
stacklevel=2)
|
||||
elif len(args) == 1:
|
||||
warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
|
||||
'WSGI does not support this, although it\'s available '
|
||||
'on all major servers.'),
|
||||
stacklevel=2)
|
||||
else:
|
||||
raise TypeError('too many arguments passed to wsgi.input.readline()')
|
||||
return self._stream.readline(*args)
|
||||
|
||||
def __iter__(self):
|
||||
try:
|
||||
return iter(self._stream)
|
||||
except TypeError:
|
||||
warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
|
||||
return iter(())
|
||||
|
||||
def close(self):
|
||||
warn(WSGIWarning('application closed the input stream!'),
|
||||
stacklevel=2)
|
||||
self._stream.close()
|
||||
|
||||
|
||||
class ErrorStream(object):
|
||||
|
||||
def __init__(self, stream):
|
||||
self._stream = stream
|
||||
|
||||
def write(self, s):
|
||||
check_string('wsgi.error.write()', s)
|
||||
self._stream.write(s)
|
||||
|
||||
def flush(self):
|
||||
self._stream.flush()
|
||||
|
||||
def writelines(self, seq):
|
||||
for line in seq:
|
||||
self.write(seq)
|
||||
|
||||
def close(self):
|
||||
warn(WSGIWarning('application closed the error stream!'),
|
||||
stacklevel=2)
|
||||
self._stream.close()
|
||||
|
||||
|
||||
class GuardedWrite(object):
|
||||
|
||||
def __init__(self, write, chunks):
|
||||
self._write = write
|
||||
self._chunks = chunks
|
||||
|
||||
def __call__(self, s):
|
||||
check_string('write()', s)
|
||||
self._write.write(s)
|
||||
self._chunks.append(len(s))
|
||||
|
||||
|
||||
class GuardedIterator(object):
|
||||
|
||||
def __init__(self, iterator, headers_set, chunks):
|
||||
self._iterator = iterator
|
||||
self._next = iter(iterator).next
|
||||
self.closed = False
|
||||
self.headers_set = headers_set
|
||||
self.chunks = chunks
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
if self.closed:
|
||||
warn(WSGIWarning('iterated over closed app_iter'),
|
||||
stacklevel=2)
|
||||
rv = self._next()
|
||||
if not self.headers_set:
|
||||
warn(WSGIWarning('Application returned before it '
|
||||
'started the response'), stacklevel=2)
|
||||
check_string('application iterator items', rv)
|
||||
self.chunks.append(len(rv))
|
||||
return rv
|
||||
|
||||
def close(self):
|
||||
self.closed = True
|
||||
if hasattr(self._iterator, 'close'):
|
||||
self._iterator.close()
|
||||
|
||||
if self.headers_set:
|
||||
status_code, headers = self.headers_set
|
||||
bytes_sent = sum(self.chunks)
|
||||
content_length = headers.get('content-length', type=int)
|
||||
|
||||
if status_code == 304:
|
||||
for key, value in headers:
|
||||
key = key.lower()
|
||||
if key not in ('expires', 'content-location') and \
|
||||
is_entity_header(key):
|
||||
warn(HTTPWarning('entity header %r found in 304 '
|
||||
'response' % key))
|
||||
if bytes_sent:
|
||||
warn(HTTPWarning('304 responses must not have a body'))
|
||||
elif 100 <= status_code < 200 or status_code == 204:
|
||||
if content_length != 0:
|
||||
warn(HTTPWarning('%r responses must have an empty '
|
||||
'content length') % status_code)
|
||||
if bytes_sent:
|
||||
warn(HTTPWarning('%r responses must not have a body' %
|
||||
status_code))
|
||||
elif content_length is not None and content_length != bytes_sent:
|
||||
warn(WSGIWarning('Content-Length and the number of bytes '
|
||||
'sent to the client do not match.'))
|
||||
|
||||
def __del__(self):
|
||||
if not self.closed:
|
||||
try:
|
||||
warn(WSGIWarning('Iterator was garbage collected before '
|
||||
'it was closed.'))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
class LintMiddleware(object):
|
||||
"""This middleware wraps an application and warns on common errors.
|
||||
Among other thing it currently checks for the following problems:
|
||||
|
||||
- invalid status codes
|
||||
- non-bytestrings sent to the WSGI server
|
||||
- strings returned from the WSGI application
|
||||
- non-empty conditional responses
|
||||
- unquoted etags
|
||||
- relative URLs in the Location header
|
||||
- unsafe calls to wsgi.input
|
||||
- unclosed iterators
|
||||
|
||||
Detected errors are emitted using the standard Python :mod:`warnings`
|
||||
system and usually end up on :data:`stderr`.
|
||||
|
||||
::
|
||||
|
||||
from werkzeug.contrib.lint import LintMiddleware
|
||||
app = LintMiddleware(app)
|
||||
|
||||
:param app: the application to wrap
|
||||
"""
|
||||
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
|
||||
def check_environ(self, environ):
|
||||
if type(environ) is not dict:
|
||||
warn(WSGIWarning('WSGI environment is not a standard python dict.'),
|
||||
stacklevel=4)
|
||||
for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
|
||||
'wsgi.version', 'wsgi.input', 'wsgi.errors',
|
||||
'wsgi.multithread', 'wsgi.multiprocess',
|
||||
'wsgi.run_once'):
|
||||
if key not in environ:
|
||||
warn(WSGIWarning('required environment key %r not found'
|
||||
% key), stacklevel=3)
|
||||
if environ['wsgi.version'] != (1, 0):
|
||||
warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
|
||||
stacklevel=3)
|
||||
|
||||
script_name = environ.get('SCRIPT_NAME', '')
|
||||
if script_name and script_name[:1] != '/':
|
||||
warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
|
||||
% script_name), stacklevel=3)
|
||||
path_info = environ.get('PATH_INFO', '')
|
||||
if path_info[:1] != '/':
|
||||
warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
|
||||
% path_info), stacklevel=3)
|
||||
|
||||
|
||||
def check_start_response(self, status, headers, exc_info):
|
||||
check_string('status', status)
|
||||
status_code = status.split(None, 1)[0]
|
||||
if len(status_code) != 3 or not status_code.isdigit():
|
||||
warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
|
||||
if len(status) < 4 or status[3] != ' ':
|
||||
warn(WSGIWarning('Invalid value for status %r. Valid '
|
||||
'status strings are three digits, a space '
|
||||
'and a status explanation'), stacklevel=3)
|
||||
status_code = int(status_code)
|
||||
if status_code < 100:
|
||||
warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
|
||||
|
||||
if type(headers) is not list:
|
||||
warn(WSGIWarning('header list is not a list'), stacklevel=3)
|
||||
for item in headers:
|
||||
if type(item) is not tuple or len(item) != 2:
|
||||
warn(WSGIWarning('Headers must tuple 2-item tuples'),
|
||||
stacklevel=3)
|
||||
name, value = item
|
||||
if type(name) is not str or type(value) is not str:
|
||||
warn(WSGIWarning('header items must be strings'),
|
||||
stacklevel=3)
|
||||
if name.lower() == 'status':
|
||||
warn(WSGIWarning('The status header is not supported due to '
|
||||
'conflicts with the CGI spec.'),
|
||||
stacklevel=3)
|
||||
|
||||
if exc_info is not None and not isinstance(exc_info, tuple):
|
||||
warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
|
||||
|
||||
headers = Headers(headers)
|
||||
self.check_headers(headers)
|
||||
|
||||
return status_code, headers
|
||||
|
||||
def check_headers(self, headers):
|
||||
etag = headers.get('etag')
|
||||
if etag is not None:
|
||||
if etag.startswith('w/'):
|
||||
etag = etag[2:]
|
||||
if not (etag[:1] == etag[-1:] == '"'):
|
||||
warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
|
||||
|
||||
location = headers.get('location')
|
||||
if location is not None:
|
||||
if not urlparse(location).netloc:
|
||||
warn(HTTPWarning('absolute URLs required for location header'),
|
||||
stacklevel=4)
|
||||
|
||||
def check_iterator(self, app_iter):
|
||||
if isinstance(app_iter, string_types):
|
||||
warn(WSGIWarning('application returned string. Response will '
|
||||
'send character for character to the client '
|
||||
'which will kill the performance. Return a '
|
||||
'list or iterable instead.'), stacklevel=3)
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
if len(args) != 2:
|
||||
warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
|
||||
if kwargs:
|
||||
warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
|
||||
stacklevel=2)
|
||||
environ, start_response = args
|
||||
|
||||
self.check_environ(environ)
|
||||
environ['wsgi.input'] = InputStream(environ['wsgi.input'])
|
||||
environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
|
||||
|
||||
# hook our own file wrapper in so that applications will always
|
||||
# iterate to the end and we can check the content length
|
||||
environ['wsgi.file_wrapper'] = FileWrapper
|
||||
|
||||
headers_set = []
|
||||
chunks = []
|
||||
|
||||
def checking_start_response(*args, **kwargs):
|
||||
if len(args) not in (2, 3):
|
||||
warn(WSGIWarning('Invalid number of arguments: %s, expected '
|
||||
'2 or 3' % len(args), stacklevel=2))
|
||||
if kwargs:
|
||||
warn(WSGIWarning('no keyword arguments allowed.'))
|
||||
|
||||
status, headers = args[:2]
|
||||
if len(args) == 3:
|
||||
exc_info = args[2]
|
||||
else:
|
||||
exc_info = None
|
||||
|
||||
headers_set[:] = self.check_start_response(status, headers,
|
||||
exc_info)
|
||||
return GuardedWrite(start_response(status, headers, exc_info),
|
||||
chunks)
|
||||
|
||||
app_iter = self.app(environ, checking_start_response)
|
||||
self.check_iterator(app_iter)
|
||||
return GuardedIterator(app_iter, headers_set, chunks)
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib.profiler
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module provides a simple WSGI profiler middleware for finding
|
||||
bottlenecks in web application. It uses the :mod:`profile` or
|
||||
:mod:`cProfile` module to do the profiling and writes the stats to the
|
||||
stream provided (defaults to stderr).
|
||||
|
||||
Example usage::
|
||||
|
||||
from werkzeug.contrib.profiler import ProfilerMiddleware
|
||||
app = ProfilerMiddleware(app)
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import sys, time, os.path
|
||||
try:
|
||||
try:
|
||||
from cProfile import Profile
|
||||
except ImportError:
|
||||
from profile import Profile
|
||||
from pstats import Stats
|
||||
available = True
|
||||
except ImportError:
|
||||
available = False
|
||||
|
||||
|
||||
class MergeStream(object):
|
||||
"""An object that redirects `write` calls to multiple streams.
|
||||
Use this to log to both `sys.stdout` and a file::
|
||||
|
||||
f = open('profiler.log', 'w')
|
||||
stream = MergeStream(sys.stdout, f)
|
||||
profiler = ProfilerMiddleware(app, stream)
|
||||
"""
|
||||
|
||||
def __init__(self, *streams):
|
||||
if not streams:
|
||||
raise TypeError('at least one stream must be given')
|
||||
self.streams = streams
|
||||
|
||||
def write(self, data):
|
||||
for stream in self.streams:
|
||||
stream.write(data)
|
||||
|
||||
|
||||
class ProfilerMiddleware(object):
|
||||
"""Simple profiler middleware. Wraps a WSGI application and profiles
|
||||
a request. This intentionally buffers the response so that timings are
|
||||
more exact.
|
||||
|
||||
By giving the `profile_dir` argument, pstat.Stats files are saved to that
|
||||
directory, one file per request. Without it, a summary is printed to
|
||||
`stream` instead.
|
||||
|
||||
For the exact meaning of `sort_by` and `restrictions` consult the
|
||||
:mod:`profile` documentation.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
Added support for `restrictions` and `profile_dir`.
|
||||
|
||||
:param app: the WSGI application to profile.
|
||||
:param stream: the stream for the profiled stats. defaults to stderr.
|
||||
:param sort_by: a tuple of columns to sort the result by.
|
||||
:param restrictions: a tuple of profiling strictions, not used if dumping
|
||||
to `profile_dir`.
|
||||
:param profile_dir: directory name to save pstat files
|
||||
"""
|
||||
|
||||
def __init__(self, app, stream=None,
|
||||
sort_by=('time', 'calls'), restrictions=(), profile_dir=None):
|
||||
if not available:
|
||||
raise RuntimeError('the profiler is not available because '
|
||||
'profile or pstat is not installed.')
|
||||
self._app = app
|
||||
self._stream = stream or sys.stdout
|
||||
self._sort_by = sort_by
|
||||
self._restrictions = restrictions
|
||||
self._profile_dir = profile_dir
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
response_body = []
|
||||
|
||||
def catching_start_response(status, headers, exc_info=None):
|
||||
start_response(status, headers, exc_info)
|
||||
return response_body.append
|
||||
|
||||
def runapp():
|
||||
appiter = self._app(environ, catching_start_response)
|
||||
response_body.extend(appiter)
|
||||
if hasattr(appiter, 'close'):
|
||||
appiter.close()
|
||||
|
||||
p = Profile()
|
||||
start = time.time()
|
||||
p.runcall(runapp)
|
||||
body = ''.join(response_body)
|
||||
elapsed = time.time() - start
|
||||
|
||||
if self._profile_dir is not None:
|
||||
prof_filename = os.path.join(self._profile_dir,
|
||||
'%s.%s.%06dms.%d.prof' % (
|
||||
environ['REQUEST_METHOD'],
|
||||
environ.get('PATH_INFO').strip('/').replace('/', '.') or 'root',
|
||||
elapsed * 1000.0,
|
||||
time.time()
|
||||
))
|
||||
p.dump_stats(prof_filename)
|
||||
|
||||
else:
|
||||
stats = Stats(p, stream=self._stream)
|
||||
stats.sort_stats(*self._sort_by)
|
||||
|
||||
self._stream.write('-' * 80)
|
||||
self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
|
||||
stats.print_stats(*self._restrictions)
|
||||
self._stream.write('-' * 80 + '\n\n')
|
||||
|
||||
return [body]
|
||||
|
||||
|
||||
def make_action(app_factory, hostname='localhost', port=5000,
|
||||
threaded=False, processes=1, stream=None,
|
||||
sort_by=('time', 'calls'), restrictions=()):
|
||||
"""Return a new callback for :mod:`werkzeug.script` that starts a local
|
||||
server with the profiler enabled.
|
||||
|
||||
::
|
||||
|
||||
from werkzeug.contrib import profiler
|
||||
action_profile = profiler.make_action(make_app)
|
||||
"""
|
||||
def action(hostname=('h', hostname), port=('p', port),
|
||||
threaded=threaded, processes=processes):
|
||||
"""Start a new development server."""
|
||||
from werkzeug.serving import run_simple
|
||||
app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
|
||||
run_simple(hostname, port, app, False, None, threaded, processes)
|
||||
return action
|
||||
|
|
@ -0,0 +1,321 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
r"""
|
||||
werkzeug.contrib.securecookie
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module implements a cookie that is not alterable from the client
|
||||
because it adds a checksum the server checks for. You can use it as
|
||||
session replacement if all you have is a user id or something to mark
|
||||
a logged in user.
|
||||
|
||||
Keep in mind that the data is still readable from the client as a
|
||||
normal cookie is. However you don't have to store and flush the
|
||||
sessions you have at the server.
|
||||
|
||||
Example usage:
|
||||
|
||||
>>> from werkzeug.contrib.securecookie import SecureCookie
|
||||
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
|
||||
|
||||
Dumping into a string so that one can store it in a cookie:
|
||||
|
||||
>>> value = x.serialize()
|
||||
|
||||
Loading from that string again:
|
||||
|
||||
>>> x = SecureCookie.unserialize(value, "deadbeef")
|
||||
>>> x["baz"]
|
||||
(1, 2, 3)
|
||||
|
||||
If someone modifies the cookie and the checksum is wrong the unserialize
|
||||
method will fail silently and return a new empty `SecureCookie` object.
|
||||
|
||||
Keep in mind that the values will be visible in the cookie so do not
|
||||
store data in a cookie you don't want the user to see.
|
||||
|
||||
Application Integration
|
||||
=======================
|
||||
|
||||
If you are using the werkzeug request objects you could integrate the
|
||||
secure cookie into your application like this::
|
||||
|
||||
from werkzeug.utils import cached_property
|
||||
from werkzeug.wrappers import BaseRequest
|
||||
from werkzeug.contrib.securecookie import SecureCookie
|
||||
|
||||
# don't use this key but a different one; you could just use
|
||||
# os.urandom(20) to get something random
|
||||
SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
|
||||
|
||||
class Request(BaseRequest):
|
||||
|
||||
@cached_property
|
||||
def client_session(self):
|
||||
data = self.cookies.get('session_data')
|
||||
if not data:
|
||||
return SecureCookie(secret_key=SECRET_KEY)
|
||||
return SecureCookie.unserialize(data, SECRET_KEY)
|
||||
|
||||
def application(environ, start_response):
|
||||
request = Request(environ, start_response)
|
||||
|
||||
# get a response object here
|
||||
response = ...
|
||||
|
||||
if request.client_session.should_save:
|
||||
session_data = request.client_session.serialize()
|
||||
response.set_cookie('session_data', session_data,
|
||||
httponly=True)
|
||||
return response(environ, start_response)
|
||||
|
||||
A less verbose integration can be achieved by using shorthand methods::
|
||||
|
||||
class Request(BaseRequest):
|
||||
|
||||
@cached_property
|
||||
def client_session(self):
|
||||
return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
|
||||
|
||||
def application(environ, start_response):
|
||||
request = Request(environ, start_response)
|
||||
|
||||
# get a response object here
|
||||
response = ...
|
||||
|
||||
request.client_session.save_cookie(response)
|
||||
return response(environ, start_response)
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import pickle
|
||||
import base64
|
||||
from hmac import new as hmac
|
||||
from time import time
|
||||
from hashlib import sha1 as _default_hash
|
||||
|
||||
from werkzeug._compat import iteritems, text_type
|
||||
from werkzeug.urls import url_quote_plus, url_unquote_plus
|
||||
from werkzeug._internal import _date_to_unix
|
||||
from werkzeug.contrib.sessions import ModificationTrackingDict
|
||||
from werkzeug.security import safe_str_cmp
|
||||
from werkzeug._compat import to_native
|
||||
|
||||
|
||||
class UnquoteError(Exception):
|
||||
"""Internal exception used to signal failures on quoting."""
|
||||
|
||||
|
||||
class SecureCookie(ModificationTrackingDict):
|
||||
"""Represents a secure cookie. You can subclass this class and provide
|
||||
an alternative mac method. The import thing is that the mac method
|
||||
is a function with a similar interface to the hashlib. Required
|
||||
methods are update() and digest().
|
||||
|
||||
Example usage:
|
||||
|
||||
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
|
||||
>>> x["foo"]
|
||||
42
|
||||
>>> x["baz"]
|
||||
(1, 2, 3)
|
||||
>>> x["blafasel"] = 23
|
||||
>>> x.should_save
|
||||
True
|
||||
|
||||
:param data: the initial data. Either a dict, list of tuples or `None`.
|
||||
:param secret_key: the secret key. If not set `None` or not specified
|
||||
it has to be set before :meth:`serialize` is called.
|
||||
:param new: The initial value of the `new` flag.
|
||||
"""
|
||||
|
||||
#: The hash method to use. This has to be a module with a new function
|
||||
#: or a function that creates a hashlib object. Such as `hashlib.md5`
|
||||
#: Subclasses can override this attribute. The default hash is sha1.
|
||||
#: Make sure to wrap this in staticmethod() if you store an arbitrary
|
||||
#: function there such as hashlib.sha1 which might be implemented
|
||||
#: as a function.
|
||||
hash_method = staticmethod(_default_hash)
|
||||
|
||||
#: the module used for serialization. Unless overriden by subclasses
|
||||
#: the standard pickle module is used.
|
||||
serialization_method = pickle
|
||||
|
||||
#: if the contents should be base64 quoted. This can be disabled if the
|
||||
#: serialization process returns cookie safe strings only.
|
||||
quote_base64 = True
|
||||
|
||||
def __init__(self, data=None, secret_key=None, new=True):
|
||||
ModificationTrackingDict.__init__(self, data or ())
|
||||
# explicitly convert it into a bytestring because python 2.6
|
||||
# no longer performs an implicit string conversion on hmac
|
||||
if secret_key is not None:
|
||||
secret_key = bytes(secret_key)
|
||||
self.secret_key = secret_key
|
||||
self.new = new
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s%s>' % (
|
||||
self.__class__.__name__,
|
||||
dict.__repr__(self),
|
||||
self.should_save and '*' or ''
|
||||
)
|
||||
|
||||
@property
|
||||
def should_save(self):
|
||||
"""True if the session should be saved. By default this is only true
|
||||
for :attr:`modified` cookies, not :attr:`new`.
|
||||
"""
|
||||
return self.modified
|
||||
|
||||
@classmethod
|
||||
def quote(cls, value):
|
||||
"""Quote the value for the cookie. This can be any object supported
|
||||
by :attr:`serialization_method`.
|
||||
|
||||
:param value: the value to quote.
|
||||
"""
|
||||
if cls.serialization_method is not None:
|
||||
value = cls.serialization_method.dumps(value)
|
||||
if cls.quote_base64:
|
||||
value = b''.join(base64.b64encode(value).splitlines()).strip()
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def unquote(cls, value):
|
||||
"""Unquote the value for the cookie. If unquoting does not work a
|
||||
:exc:`UnquoteError` is raised.
|
||||
|
||||
:param value: the value to unquote.
|
||||
"""
|
||||
try:
|
||||
if cls.quote_base64:
|
||||
value = base64.b64decode(value)
|
||||
if cls.serialization_method is not None:
|
||||
value = cls.serialization_method.loads(value)
|
||||
return value
|
||||
except Exception:
|
||||
# unfortunately pickle and other serialization modules can
|
||||
# cause pretty every error here. if we get one we catch it
|
||||
# and convert it into an UnquoteError
|
||||
raise UnquoteError()
|
||||
|
||||
def serialize(self, expires=None):
|
||||
"""Serialize the secure cookie into a string.
|
||||
|
||||
If expires is provided, the session will be automatically invalidated
|
||||
after expiration when you unseralize it. This provides better
|
||||
protection against session cookie theft.
|
||||
|
||||
:param expires: an optional expiration date for the cookie (a
|
||||
:class:`datetime.datetime` object)
|
||||
"""
|
||||
if self.secret_key is None:
|
||||
raise RuntimeError('no secret key defined')
|
||||
if expires:
|
||||
self['_expires'] = _date_to_unix(expires)
|
||||
result = []
|
||||
mac = hmac(self.secret_key, None, self.hash_method)
|
||||
for key, value in sorted(self.items()):
|
||||
result.append(('%s=%s' % (
|
||||
url_quote_plus(key),
|
||||
self.quote(value).decode('ascii')
|
||||
)).encode('ascii'))
|
||||
mac.update(b'|' + result[-1])
|
||||
return b'?'.join([
|
||||
base64.b64encode(mac.digest()).strip(),
|
||||
b'&'.join(result)
|
||||
])
|
||||
|
||||
@classmethod
|
||||
def unserialize(cls, string, secret_key):
|
||||
"""Load the secure cookie from a serialized string.
|
||||
|
||||
:param string: the cookie value to unserialize.
|
||||
:param secret_key: the secret key used to serialize the cookie.
|
||||
:return: a new :class:`SecureCookie`.
|
||||
"""
|
||||
if isinstance(string, text_type):
|
||||
string = string.encode('utf-8', 'replace')
|
||||
if isinstance(secret_key, text_type):
|
||||
secret_key = secret_key.encode('utf-8', 'replace')
|
||||
try:
|
||||
base64_hash, data = string.split(b'?', 1)
|
||||
except (ValueError, IndexError):
|
||||
items = ()
|
||||
else:
|
||||
items = {}
|
||||
mac = hmac(secret_key, None, cls.hash_method)
|
||||
for item in data.split(b'&'):
|
||||
mac.update(b'|' + item)
|
||||
if not b'=' in item:
|
||||
items = None
|
||||
break
|
||||
key, value = item.split(b'=', 1)
|
||||
# try to make the key a string
|
||||
key = url_unquote_plus(key.decode('ascii'))
|
||||
try:
|
||||
key = to_native(key)
|
||||
except UnicodeError:
|
||||
pass
|
||||
items[key] = value
|
||||
|
||||
# no parsing error and the mac looks okay, we can now
|
||||
# sercurely unpickle our cookie.
|
||||
try:
|
||||
client_hash = base64.b64decode(base64_hash)
|
||||
except TypeError:
|
||||
items = client_hash = None
|
||||
if items is not None and safe_str_cmp(client_hash, mac.digest()):
|
||||
try:
|
||||
for key, value in iteritems(items):
|
||||
items[key] = cls.unquote(value)
|
||||
except UnquoteError:
|
||||
items = ()
|
||||
else:
|
||||
if '_expires' in items:
|
||||
if time() > items['_expires']:
|
||||
items = ()
|
||||
else:
|
||||
del items['_expires']
|
||||
else:
|
||||
items = ()
|
||||
return cls(items, secret_key, False)
|
||||
|
||||
@classmethod
|
||||
def load_cookie(cls, request, key='session', secret_key=None):
|
||||
"""Loads a :class:`SecureCookie` from a cookie in request. If the
|
||||
cookie is not set, a new :class:`SecureCookie` instanced is
|
||||
returned.
|
||||
|
||||
:param request: a request object that has a `cookies` attribute
|
||||
which is a dict of all cookie values.
|
||||
:param key: the name of the cookie.
|
||||
:param secret_key: the secret key used to unquote the cookie.
|
||||
Always provide the value even though it has
|
||||
no default!
|
||||
"""
|
||||
data = request.cookies.get(key)
|
||||
if not data:
|
||||
return cls(secret_key=secret_key)
|
||||
return cls.unserialize(data, secret_key)
|
||||
|
||||
def save_cookie(self, response, key='session', expires=None,
|
||||
session_expires=None, max_age=None, path='/', domain=None,
|
||||
secure=None, httponly=False, force=False):
|
||||
"""Saves the SecureCookie in a cookie on response object. All
|
||||
parameters that are not described here are forwarded directly
|
||||
to :meth:`~BaseResponse.set_cookie`.
|
||||
|
||||
:param response: a response object that has a
|
||||
:meth:`~BaseResponse.set_cookie` method.
|
||||
:param key: the name of the cookie.
|
||||
:param session_expires: the expiration date of the secure cookie
|
||||
stored information. If this is not provided
|
||||
the cookie `expires` date is used instead.
|
||||
"""
|
||||
if force or self.should_save:
|
||||
data = self.serialize(session_expires or expires)
|
||||
response.set_cookie(key, data, expires=expires, max_age=max_age,
|
||||
path=path, domain=domain, secure=secure,
|
||||
httponly=httponly)
|
||||
|
|
@ -0,0 +1,348 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
r"""
|
||||
werkzeug.contrib.sessions
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains some helper classes that help one to add session
|
||||
support to a python WSGI application. For full client-side session
|
||||
storage see :mod:`~werkzeug.contrib.securecookie` which implements a
|
||||
secure, client-side session storage.
|
||||
|
||||
|
||||
Application Integration
|
||||
=======================
|
||||
|
||||
::
|
||||
|
||||
from werkzeug.contrib.sessions import SessionMiddleware, \
|
||||
FilesystemSessionStore
|
||||
|
||||
app = SessionMiddleware(app, FilesystemSessionStore())
|
||||
|
||||
The current session will then appear in the WSGI environment as
|
||||
`werkzeug.session`. However it's recommended to not use the middleware
|
||||
but the stores directly in the application. However for very simple
|
||||
scripts a middleware for sessions could be sufficient.
|
||||
|
||||
This module does not implement methods or ways to check if a session is
|
||||
expired. That should be done by a cronjob and storage specific. For
|
||||
example to prune unused filesystem sessions one could check the modified
|
||||
time of the files. It sessions are stored in the database the new()
|
||||
method should add an expiration timestamp for the session.
|
||||
|
||||
For better flexibility it's recommended to not use the middleware but the
|
||||
store and session object directly in the application dispatching::
|
||||
|
||||
session_store = FilesystemSessionStore()
|
||||
|
||||
def application(environ, start_response):
|
||||
request = Request(environ)
|
||||
sid = request.cookies.get('cookie_name')
|
||||
if sid is None:
|
||||
request.session = session_store.new()
|
||||
else:
|
||||
request.session = session_store.get(sid)
|
||||
response = get_the_response_object(request)
|
||||
if request.session.should_save:
|
||||
session_store.save(request.session)
|
||||
response.set_cookie('cookie_name', request.session.sid)
|
||||
return response(environ, start_response)
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
from os import path
|
||||
from time import time
|
||||
from random import random
|
||||
from hashlib import sha1
|
||||
from pickle import dump, load, HIGHEST_PROTOCOL
|
||||
|
||||
from werkzeug.datastructures import CallbackDict
|
||||
from werkzeug.utils import dump_cookie, parse_cookie
|
||||
from werkzeug.wsgi import ClosingIterator
|
||||
from werkzeug.posixemulation import rename
|
||||
from werkzeug._compat import PY2, text_type
|
||||
|
||||
|
||||
_sha1_re = re.compile(r'^[a-f0-9]{40}$')
|
||||
|
||||
|
||||
def _urandom():
|
||||
if hasattr(os, 'urandom'):
|
||||
return os.urandom(30)
|
||||
return random()
|
||||
|
||||
|
||||
def generate_key(salt=None):
|
||||
if salt is None:
|
||||
salt = repr(salt).encode('ascii')
|
||||
return sha1(b''.join([
|
||||
salt,
|
||||
str(time()).encode('ascii'),
|
||||
_urandom()
|
||||
])).hexdigest()
|
||||
|
||||
|
||||
class ModificationTrackingDict(CallbackDict):
|
||||
__slots__ = ('modified',)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
def on_update(self):
|
||||
self.modified = True
|
||||
self.modified = False
|
||||
CallbackDict.__init__(self, on_update=on_update)
|
||||
dict.update(self, *args, **kwargs)
|
||||
|
||||
def copy(self):
|
||||
"""Create a flat copy of the dict."""
|
||||
missing = object()
|
||||
result = object.__new__(self.__class__)
|
||||
for name in self.__slots__:
|
||||
val = getattr(self, name, missing)
|
||||
if val is not missing:
|
||||
setattr(result, name, val)
|
||||
return result
|
||||
|
||||
def __copy__(self):
|
||||
return self.copy()
|
||||
|
||||
|
||||
class Session(ModificationTrackingDict):
|
||||
"""Subclass of a dict that keeps track of direct object changes. Changes
|
||||
in mutable structures are not tracked, for those you have to set
|
||||
`modified` to `True` by hand.
|
||||
"""
|
||||
__slots__ = ModificationTrackingDict.__slots__ + ('sid', 'new')
|
||||
|
||||
def __init__(self, data, sid, new=False):
|
||||
ModificationTrackingDict.__init__(self, data)
|
||||
self.sid = sid
|
||||
self.new = new
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s%s>' % (
|
||||
self.__class__.__name__,
|
||||
dict.__repr__(self),
|
||||
self.should_save and '*' or ''
|
||||
)
|
||||
|
||||
@property
|
||||
def should_save(self):
|
||||
"""True if the session should be saved.
|
||||
|
||||
.. versionchanged:: 0.6
|
||||
By default the session is now only saved if the session is
|
||||
modified, not if it is new like it was before.
|
||||
"""
|
||||
return self.modified
|
||||
|
||||
|
||||
class SessionStore(object):
|
||||
"""Baseclass for all session stores. The Werkzeug contrib module does not
|
||||
implement any useful stores besides the filesystem store, application
|
||||
developers are encouraged to create their own stores.
|
||||
|
||||
:param session_class: The session class to use. Defaults to
|
||||
:class:`Session`.
|
||||
"""
|
||||
|
||||
def __init__(self, session_class=None):
|
||||
if session_class is None:
|
||||
session_class = Session
|
||||
self.session_class = session_class
|
||||
|
||||
def is_valid_key(self, key):
|
||||
"""Check if a key has the correct format."""
|
||||
return _sha1_re.match(key) is not None
|
||||
|
||||
def generate_key(self, salt=None):
|
||||
"""Simple function that generates a new session key."""
|
||||
return generate_key(salt)
|
||||
|
||||
def new(self):
|
||||
"""Generate a new session."""
|
||||
return self.session_class({}, self.generate_key(), True)
|
||||
|
||||
def save(self, session):
|
||||
"""Save a session."""
|
||||
|
||||
def save_if_modified(self, session):
|
||||
"""Save if a session class wants an update."""
|
||||
if session.should_save:
|
||||
self.save(session)
|
||||
|
||||
def delete(self, session):
|
||||
"""Delete a session."""
|
||||
|
||||
def get(self, sid):
|
||||
"""Get a session for this sid or a new session object. This method
|
||||
has to check if the session key is valid and create a new session if
|
||||
that wasn't the case.
|
||||
"""
|
||||
return self.session_class({}, sid, True)
|
||||
|
||||
|
||||
#: used for temporary files by the filesystem session store
|
||||
_fs_transaction_suffix = '.__wz_sess'
|
||||
|
||||
|
||||
class FilesystemSessionStore(SessionStore):
|
||||
"""Simple example session store that saves sessions on the filesystem.
|
||||
This store works best on POSIX systems and Windows Vista / Windows
|
||||
Server 2008 and newer.
|
||||
|
||||
.. versionchanged:: 0.6
|
||||
`renew_missing` was added. Previously this was considered `True`,
|
||||
now the default changed to `False` and it can be explicitly
|
||||
deactivated.
|
||||
|
||||
:param path: the path to the folder used for storing the sessions.
|
||||
If not provided the default temporary directory is used.
|
||||
:param filename_template: a string template used to give the session
|
||||
a filename. ``%s`` is replaced with the
|
||||
session id.
|
||||
:param session_class: The session class to use. Defaults to
|
||||
:class:`Session`.
|
||||
:param renew_missing: set to `True` if you want the store to
|
||||
give the user a new sid if the session was
|
||||
not yet saved.
|
||||
"""
|
||||
|
||||
def __init__(self, path=None, filename_template='werkzeug_%s.sess',
|
||||
session_class=None, renew_missing=False, mode=0o644):
|
||||
SessionStore.__init__(self, session_class)
|
||||
if path is None:
|
||||
path = tempfile.gettempdir()
|
||||
self.path = path
|
||||
if isinstance(filename_template, text_type) and PY2:
|
||||
filename_template = filename_template.encode(
|
||||
sys.getfilesystemencoding() or 'utf-8')
|
||||
assert not filename_template.endswith(_fs_transaction_suffix), \
|
||||
'filename templates may not end with %s' % _fs_transaction_suffix
|
||||
self.filename_template = filename_template
|
||||
self.renew_missing = renew_missing
|
||||
self.mode = mode
|
||||
|
||||
def get_session_filename(self, sid):
|
||||
# out of the box, this should be a strict ASCII subset but
|
||||
# you might reconfigure the session object to have a more
|
||||
# arbitrary string.
|
||||
if isinstance(sid, text_type) and PY2:
|
||||
sid = sid.encode(sys.getfilesystemencoding() or 'utf-8')
|
||||
return path.join(self.path, self.filename_template % sid)
|
||||
|
||||
def save(self, session):
|
||||
fn = self.get_session_filename(session.sid)
|
||||
fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix,
|
||||
dir=self.path)
|
||||
f = os.fdopen(fd, 'wb')
|
||||
try:
|
||||
dump(dict(session), f, HIGHEST_PROTOCOL)
|
||||
finally:
|
||||
f.close()
|
||||
try:
|
||||
rename(tmp, fn)
|
||||
os.chmod(fn, self.mode)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
def delete(self, session):
|
||||
fn = self.get_session_filename(session.sid)
|
||||
try:
|
||||
os.unlink(fn)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def get(self, sid):
|
||||
if not self.is_valid_key(sid):
|
||||
return self.new()
|
||||
try:
|
||||
f = open(self.get_session_filename(sid), 'rb')
|
||||
except IOError:
|
||||
if self.renew_missing:
|
||||
return self.new()
|
||||
data = {}
|
||||
else:
|
||||
try:
|
||||
try:
|
||||
data = load(f)
|
||||
except Exception:
|
||||
data = {}
|
||||
finally:
|
||||
f.close()
|
||||
return self.session_class(data, sid, False)
|
||||
|
||||
def list(self):
|
||||
"""Lists all sessions in the store.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
before, after = self.filename_template.split('%s', 1)
|
||||
filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before),
|
||||
re.escape(after)))
|
||||
result = []
|
||||
for filename in os.listdir(self.path):
|
||||
#: this is a session that is still being saved.
|
||||
if filename.endswith(_fs_transaction_suffix):
|
||||
continue
|
||||
match = filename_re.match(filename)
|
||||
if match is not None:
|
||||
result.append(match.group(1))
|
||||
return result
|
||||
|
||||
|
||||
class SessionMiddleware(object):
|
||||
"""A simple middleware that puts the session object of a store provided
|
||||
into the WSGI environ. It automatically sets cookies and restores
|
||||
sessions.
|
||||
|
||||
However a middleware is not the preferred solution because it won't be as
|
||||
fast as sessions managed by the application itself and will put a key into
|
||||
the WSGI environment only relevant for the application which is against
|
||||
the concept of WSGI.
|
||||
|
||||
The cookie parameters are the same as for the :func:`~dump_cookie`
|
||||
function just prefixed with ``cookie_``. Additionally `max_age` is
|
||||
called `cookie_age` and not `cookie_max_age` because of backwards
|
||||
compatibility.
|
||||
"""
|
||||
|
||||
def __init__(self, app, store, cookie_name='session_id',
|
||||
cookie_age=None, cookie_expires=None, cookie_path='/',
|
||||
cookie_domain=None, cookie_secure=None,
|
||||
cookie_httponly=False, environ_key='werkzeug.session'):
|
||||
self.app = app
|
||||
self.store = store
|
||||
self.cookie_name = cookie_name
|
||||
self.cookie_age = cookie_age
|
||||
self.cookie_expires = cookie_expires
|
||||
self.cookie_path = cookie_path
|
||||
self.cookie_domain = cookie_domain
|
||||
self.cookie_secure = cookie_secure
|
||||
self.cookie_httponly = cookie_httponly
|
||||
self.environ_key = environ_key
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
cookie = parse_cookie(environ.get('HTTP_COOKIE', ''))
|
||||
sid = cookie.get(self.cookie_name, None)
|
||||
if sid is None:
|
||||
session = self.store.new()
|
||||
else:
|
||||
session = self.store.get(sid)
|
||||
environ[self.environ_key] = session
|
||||
|
||||
def injecting_start_response(status, headers, exc_info=None):
|
||||
if session.should_save:
|
||||
self.store.save(session)
|
||||
headers.append(('Set-Cookie', dump_cookie(self.cookie_name,
|
||||
session.sid, self.cookie_age,
|
||||
self.cookie_expires, self.cookie_path,
|
||||
self.cookie_domain, self.cookie_secure,
|
||||
self.cookie_httponly)))
|
||||
return start_response(status, headers, exc_info)
|
||||
return ClosingIterator(self.app(environ, injecting_start_response),
|
||||
lambda: self.store.save_if_modified(session))
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib.testtools
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module implements extended wrappers for simplified testing.
|
||||
|
||||
`TestResponse`
|
||||
A response wrapper which adds various cached attributes for
|
||||
simplified assertions on various content types.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from werkzeug.utils import cached_property, import_string
|
||||
from werkzeug.wrappers import Response
|
||||
|
||||
from warnings import warn
|
||||
warn(DeprecationWarning('werkzeug.contrib.testtools is deprecated and '
|
||||
'will be removed with Werkzeug 1.0'))
|
||||
|
||||
|
||||
class ContentAccessors(object):
|
||||
"""
|
||||
A mixin class for response objects that provides a couple of useful
|
||||
accessors for unittesting.
|
||||
"""
|
||||
|
||||
def xml(self):
|
||||
"""Get an etree if possible."""
|
||||
if 'xml' not in self.mimetype:
|
||||
raise AttributeError(
|
||||
'Not a XML response (Content-Type: %s)'
|
||||
% self.mimetype)
|
||||
for module in ['xml.etree.ElementTree', 'ElementTree',
|
||||
'elementtree.ElementTree']:
|
||||
etree = import_string(module, silent=True)
|
||||
if etree is not None:
|
||||
return etree.XML(self.body)
|
||||
raise RuntimeError('You must have ElementTree installed '
|
||||
'to use TestResponse.xml')
|
||||
xml = cached_property(xml)
|
||||
|
||||
def lxml(self):
|
||||
"""Get an lxml etree if possible."""
|
||||
if ('html' not in self.mimetype and 'xml' not in self.mimetype):
|
||||
raise AttributeError('Not an HTML/XML response')
|
||||
from lxml import etree
|
||||
try:
|
||||
from lxml.html import fromstring
|
||||
except ImportError:
|
||||
fromstring = etree.HTML
|
||||
if self.mimetype=='text/html':
|
||||
return fromstring(self.data)
|
||||
return etree.XML(self.data)
|
||||
lxml = cached_property(lxml)
|
||||
|
||||
def json(self):
|
||||
"""Get the result of simplejson.loads if possible."""
|
||||
if 'json' not in self.mimetype:
|
||||
raise AttributeError('Not a JSON response')
|
||||
try:
|
||||
from simplejson import loads
|
||||
except ImportError:
|
||||
from json import loads
|
||||
return loads(self.data)
|
||||
json = cached_property(json)
|
||||
|
||||
|
||||
class TestResponse(Response, ContentAccessors):
|
||||
"""Pass this to `werkzeug.test.Client` for easier unittesting."""
|
||||
|
|
@ -0,0 +1,278 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib.wrappers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Extra wrappers or mixins contributed by the community. These wrappers can
|
||||
be mixed in into request objects to add extra functionality.
|
||||
|
||||
Example::
|
||||
|
||||
from werkzeug.wrappers import Request as RequestBase
|
||||
from werkzeug.contrib.wrappers import JSONRequestMixin
|
||||
|
||||
class Request(RequestBase, JSONRequestMixin):
|
||||
pass
|
||||
|
||||
Afterwards this request object provides the extra functionality of the
|
||||
:class:`JSONRequestMixin`.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import codecs
|
||||
try:
|
||||
from simplejson import loads
|
||||
except ImportError:
|
||||
from json import loads
|
||||
|
||||
from werkzeug.exceptions import BadRequest
|
||||
from werkzeug.utils import cached_property
|
||||
from werkzeug.http import dump_options_header, parse_options_header
|
||||
from werkzeug._compat import wsgi_decoding_dance
|
||||
|
||||
|
||||
def is_known_charset(charset):
|
||||
"""Checks if the given charset is known to Python."""
|
||||
try:
|
||||
codecs.lookup(charset)
|
||||
except LookupError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class JSONRequestMixin(object):
|
||||
"""Add json method to a request object. This will parse the input data
|
||||
through simplejson if possible.
|
||||
|
||||
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
|
||||
is not json or if the data itself cannot be parsed as json.
|
||||
"""
|
||||
|
||||
@cached_property
|
||||
def json(self):
|
||||
"""Get the result of simplejson.loads if possible."""
|
||||
if 'json' not in self.environ.get('CONTENT_TYPE', ''):
|
||||
raise BadRequest('Not a JSON request')
|
||||
try:
|
||||
return loads(self.data)
|
||||
except Exception:
|
||||
raise BadRequest('Unable to read JSON request')
|
||||
|
||||
|
||||
class ProtobufRequestMixin(object):
|
||||
"""Add protobuf parsing method to a request object. This will parse the
|
||||
input data through `protobuf`_ if possible.
|
||||
|
||||
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
|
||||
is not protobuf or if the data itself cannot be parsed property.
|
||||
|
||||
.. _protobuf: http://code.google.com/p/protobuf/
|
||||
"""
|
||||
|
||||
#: by default the :class:`ProtobufRequestMixin` will raise a
|
||||
#: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
|
||||
#: initialized. You can bypass that check by setting this
|
||||
#: attribute to `False`.
|
||||
protobuf_check_initialization = True
|
||||
|
||||
def parse_protobuf(self, proto_type):
|
||||
"""Parse the data into an instance of proto_type."""
|
||||
if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''):
|
||||
raise BadRequest('Not a Protobuf request')
|
||||
|
||||
obj = proto_type()
|
||||
try:
|
||||
obj.ParseFromString(self.data)
|
||||
except Exception:
|
||||
raise BadRequest("Unable to parse Protobuf request")
|
||||
|
||||
# Fail if not all required fields are set
|
||||
if self.protobuf_check_initialization and not obj.IsInitialized():
|
||||
raise BadRequest("Partial Protobuf request")
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
class RoutingArgsRequestMixin(object):
|
||||
"""This request mixin adds support for the wsgiorg routing args
|
||||
`specification`_.
|
||||
|
||||
.. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args
|
||||
"""
|
||||
|
||||
def _get_routing_args(self):
|
||||
return self.environ.get('wsgiorg.routing_args', (()))[0]
|
||||
|
||||
def _set_routing_args(self, value):
|
||||
if self.shallow:
|
||||
raise RuntimeError('A shallow request tried to modify the WSGI '
|
||||
'environment. If you really want to do that, '
|
||||
'set `shallow` to False.')
|
||||
self.environ['wsgiorg.routing_args'] = (value, self.routing_vars)
|
||||
|
||||
routing_args = property(_get_routing_args, _set_routing_args, doc='''
|
||||
The positional URL arguments as `tuple`.''')
|
||||
del _get_routing_args, _set_routing_args
|
||||
|
||||
def _get_routing_vars(self):
|
||||
rv = self.environ.get('wsgiorg.routing_args')
|
||||
if rv is not None:
|
||||
return rv[1]
|
||||
rv = {}
|
||||
if not self.shallow:
|
||||
self.routing_vars = rv
|
||||
return rv
|
||||
|
||||
def _set_routing_vars(self, value):
|
||||
if self.shallow:
|
||||
raise RuntimeError('A shallow request tried to modify the WSGI '
|
||||
'environment. If you really want to do that, '
|
||||
'set `shallow` to False.')
|
||||
self.environ['wsgiorg.routing_args'] = (self.routing_args, value)
|
||||
|
||||
routing_vars = property(_get_routing_vars, _set_routing_vars, doc='''
|
||||
The keyword URL arguments as `dict`.''')
|
||||
del _get_routing_vars, _set_routing_vars
|
||||
|
||||
|
||||
class ReverseSlashBehaviorRequestMixin(object):
|
||||
"""This mixin reverses the trailing slash behavior of :attr:`script_root`
|
||||
and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin`
|
||||
directly on the paths.
|
||||
|
||||
Because it changes the behavior or :class:`Request` this class has to be
|
||||
mixed in *before* the actual request class::
|
||||
|
||||
class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
|
||||
pass
|
||||
|
||||
This example shows the differences (for an application mounted on
|
||||
`/application` and the request going to `/application/foo/bar`):
|
||||
|
||||
+---------------+-------------------+---------------------+
|
||||
| | normal behavior | reverse behavior |
|
||||
+===============+===================+=====================+
|
||||
| `script_root` | ``/application`` | ``/application/`` |
|
||||
+---------------+-------------------+---------------------+
|
||||
| `path` | ``/foo/bar`` | ``foo/bar`` |
|
||||
+---------------+-------------------+---------------------+
|
||||
"""
|
||||
|
||||
@cached_property
|
||||
def path(self):
|
||||
"""Requested path as unicode. This works a bit like the regular path
|
||||
info in the WSGI environment but will not include a leading slash.
|
||||
"""
|
||||
path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',
|
||||
self.charset, self.encoding_errors)
|
||||
return path.lstrip('/')
|
||||
|
||||
@cached_property
|
||||
def script_root(self):
|
||||
"""The root path of the script includling a trailing slash."""
|
||||
path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',
|
||||
self.charset, self.encoding_errors)
|
||||
return path.rstrip('/') + '/'
|
||||
|
||||
|
||||
class DynamicCharsetRequestMixin(object):
|
||||
""""If this mixin is mixed into a request class it will provide
|
||||
a dynamic `charset` attribute. This means that if the charset is
|
||||
transmitted in the content type headers it's used from there.
|
||||
|
||||
Because it changes the behavior or :class:`Request` this class has
|
||||
to be mixed in *before* the actual request class::
|
||||
|
||||
class MyRequest(DynamicCharsetRequestMixin, Request):
|
||||
pass
|
||||
|
||||
By default the request object assumes that the URL charset is the
|
||||
same as the data charset. If the charset varies on each request
|
||||
based on the transmitted data it's not a good idea to let the URLs
|
||||
change based on that. Most browsers assume either utf-8 or latin1
|
||||
for the URLs if they have troubles figuring out. It's strongly
|
||||
recommended to set the URL charset to utf-8::
|
||||
|
||||
class MyRequest(DynamicCharsetRequestMixin, Request):
|
||||
url_charset = 'utf-8'
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
|
||||
#: the default charset that is assumed if the content type header
|
||||
#: is missing or does not contain a charset parameter. The default
|
||||
#: is latin1 which is what HTTP specifies as default charset.
|
||||
#: You may however want to set this to utf-8 to better support
|
||||
#: browsers that do not transmit a charset for incoming data.
|
||||
default_charset = 'latin1'
|
||||
|
||||
def unknown_charset(self, charset):
|
||||
"""Called if a charset was provided but is not supported by
|
||||
the Python codecs module. By default latin1 is assumed then
|
||||
to not lose any information, you may override this method to
|
||||
change the behavior.
|
||||
|
||||
:param charset: the charset that was not found.
|
||||
:return: the replacement charset.
|
||||
"""
|
||||
return 'latin1'
|
||||
|
||||
@cached_property
|
||||
def charset(self):
|
||||
"""The charset from the content type."""
|
||||
header = self.environ.get('CONTENT_TYPE')
|
||||
if header:
|
||||
ct, options = parse_options_header(header)
|
||||
charset = options.get('charset')
|
||||
if charset:
|
||||
if is_known_charset(charset):
|
||||
return charset
|
||||
return self.unknown_charset(charset)
|
||||
return self.default_charset
|
||||
|
||||
|
||||
class DynamicCharsetResponseMixin(object):
|
||||
"""If this mixin is mixed into a response class it will provide
|
||||
a dynamic `charset` attribute. This means that if the charset is
|
||||
looked up and stored in the `Content-Type` header and updates
|
||||
itself automatically. This also means a small performance hit but
|
||||
can be useful if you're working with different charsets on
|
||||
responses.
|
||||
|
||||
Because the charset attribute is no a property at class-level, the
|
||||
default value is stored in `default_charset`.
|
||||
|
||||
Because it changes the behavior or :class:`Response` this class has
|
||||
to be mixed in *before* the actual response class::
|
||||
|
||||
class MyResponse(DynamicCharsetResponseMixin, Response):
|
||||
pass
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
|
||||
#: the default charset.
|
||||
default_charset = 'utf-8'
|
||||
|
||||
def _get_charset(self):
|
||||
header = self.headers.get('content-type')
|
||||
if header:
|
||||
charset = parse_options_header(header)[1].get('charset')
|
||||
if charset:
|
||||
return charset
|
||||
return self.default_charset
|
||||
|
||||
def _set_charset(self, charset):
|
||||
header = self.headers.get('content-type')
|
||||
ct, options = parse_options_header(header)
|
||||
if not ct:
|
||||
raise TypeError('Cannot set charset if Content-Type '
|
||||
'header is missing.')
|
||||
options['charset'] = charset
|
||||
self.headers['Content-Type'] = dump_options_header(ct, options)
|
||||
|
||||
charset = property(_get_charset, _set_charset, doc="""
|
||||
The charset for the response. It's stored inside the
|
||||
Content-Type header as a parameter.""")
|
||||
del _get_charset, _set_charset
|
||||
2612
Linux_x86_64/lib/python2.7/site-packages/werkzeug/datastructures.py
Normal file
|
|
@ -0,0 +1,185 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.debug
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
WSGI application traceback debugger.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import json
|
||||
import mimetypes
|
||||
from os.path import join, dirname, basename, isfile
|
||||
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
|
||||
from werkzeug.debug.tbtools import get_current_traceback, render_console_html
|
||||
from werkzeug.debug.console import Console
|
||||
from werkzeug.security import gen_salt
|
||||
|
||||
|
||||
#: import this here because it once was documented as being available
|
||||
#: from this module. In case there are users left ...
|
||||
from werkzeug.debug.repr import debug_repr
|
||||
|
||||
|
||||
class _ConsoleFrame(object):
|
||||
"""Helper class so that we can reuse the frame console code for the
|
||||
standalone console.
|
||||
"""
|
||||
|
||||
def __init__(self, namespace):
|
||||
self.console = Console(namespace)
|
||||
self.id = 0
|
||||
|
||||
|
||||
class DebuggedApplication(object):
|
||||
"""Enables debugging support for a given application::
|
||||
|
||||
from werkzeug.debug import DebuggedApplication
|
||||
from myapp import app
|
||||
app = DebuggedApplication(app, evalex=True)
|
||||
|
||||
The `evalex` keyword argument allows evaluating expressions in a
|
||||
traceback's frame context.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
The `lodgeit_url` parameter was deprecated.
|
||||
|
||||
:param app: the WSGI application to run debugged.
|
||||
:param evalex: enable exception evaluation feature (interactive
|
||||
debugging). This requires a non-forking server.
|
||||
:param request_key: The key that points to the request object in ths
|
||||
environment. This parameter is ignored in current
|
||||
versions.
|
||||
:param console_path: the URL for a general purpose console.
|
||||
:param console_init_func: the function that is executed before starting
|
||||
the general purpose console. The return value
|
||||
is used as initial namespace.
|
||||
:param show_hidden_frames: by default hidden traceback frames are skipped.
|
||||
You can show them by setting this parameter
|
||||
to `True`.
|
||||
"""
|
||||
|
||||
# this class is public
|
||||
__module__ = 'werkzeug'
|
||||
|
||||
def __init__(self, app, evalex=False, request_key='werkzeug.request',
|
||||
console_path='/console', console_init_func=None,
|
||||
show_hidden_frames=False, lodgeit_url=None):
|
||||
if lodgeit_url is not None:
|
||||
from warnings import warn
|
||||
warn(DeprecationWarning('Werkzeug now pastes into gists.'))
|
||||
if not console_init_func:
|
||||
console_init_func = dict
|
||||
self.app = app
|
||||
self.evalex = evalex
|
||||
self.frames = {}
|
||||
self.tracebacks = {}
|
||||
self.request_key = request_key
|
||||
self.console_path = console_path
|
||||
self.console_init_func = console_init_func
|
||||
self.show_hidden_frames = show_hidden_frames
|
||||
self.secret = gen_salt(20)
|
||||
|
||||
def debug_application(self, environ, start_response):
|
||||
"""Run the application and conserve the traceback frames."""
|
||||
app_iter = None
|
||||
try:
|
||||
app_iter = self.app(environ, start_response)
|
||||
for item in app_iter:
|
||||
yield item
|
||||
if hasattr(app_iter, 'close'):
|
||||
app_iter.close()
|
||||
except Exception:
|
||||
if hasattr(app_iter, 'close'):
|
||||
app_iter.close()
|
||||
traceback = get_current_traceback(skip=1, show_hidden_frames=
|
||||
self.show_hidden_frames,
|
||||
ignore_system_exceptions=True)
|
||||
for frame in traceback.frames:
|
||||
self.frames[frame.id] = frame
|
||||
self.tracebacks[traceback.id] = traceback
|
||||
|
||||
try:
|
||||
start_response('500 INTERNAL SERVER ERROR', [
|
||||
('Content-Type', 'text/html; charset=utf-8'),
|
||||
# Disable Chrome's XSS protection, the debug
|
||||
# output can cause false-positives.
|
||||
('X-XSS-Protection', '0'),
|
||||
])
|
||||
except Exception:
|
||||
# if we end up here there has been output but an error
|
||||
# occurred. in that situation we can do nothing fancy any
|
||||
# more, better log something into the error log and fall
|
||||
# back gracefully.
|
||||
environ['wsgi.errors'].write(
|
||||
'Debugging middleware caught exception in streamed '
|
||||
'response at a point where response headers were already '
|
||||
'sent.\n')
|
||||
else:
|
||||
yield traceback.render_full(evalex=self.evalex,
|
||||
secret=self.secret) \
|
||||
.encode('utf-8', 'replace')
|
||||
|
||||
traceback.log(environ['wsgi.errors'])
|
||||
|
||||
def execute_command(self, request, command, frame):
|
||||
"""Execute a command in a console."""
|
||||
return Response(frame.console.eval(command), mimetype='text/html')
|
||||
|
||||
def display_console(self, request):
|
||||
"""Display a standalone shell."""
|
||||
if 0 not in self.frames:
|
||||
self.frames[0] = _ConsoleFrame(self.console_init_func())
|
||||
return Response(render_console_html(secret=self.secret),
|
||||
mimetype='text/html')
|
||||
|
||||
def paste_traceback(self, request, traceback):
|
||||
"""Paste the traceback and return a JSON response."""
|
||||
rv = traceback.paste()
|
||||
return Response(json.dumps(rv), mimetype='application/json')
|
||||
|
||||
def get_source(self, request, frame):
|
||||
"""Render the source viewer."""
|
||||
return Response(frame.render_source(), mimetype='text/html')
|
||||
|
||||
def get_resource(self, request, filename):
|
||||
"""Return a static resource from the shared folder."""
|
||||
filename = join(dirname(__file__), 'shared', basename(filename))
|
||||
if isfile(filename):
|
||||
mimetype = mimetypes.guess_type(filename)[0] \
|
||||
or 'application/octet-stream'
|
||||
f = open(filename, 'rb')
|
||||
try:
|
||||
return Response(f.read(), mimetype=mimetype)
|
||||
finally:
|
||||
f.close()
|
||||
return Response('Not Found', status=404)
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
"""Dispatch the requests."""
|
||||
# important: don't ever access a function here that reads the incoming
|
||||
# form data! Otherwise the application won't have access to that data
|
||||
# any more!
|
||||
request = Request(environ)
|
||||
response = self.debug_application
|
||||
if request.args.get('__debugger__') == 'yes':
|
||||
cmd = request.args.get('cmd')
|
||||
arg = request.args.get('f')
|
||||
secret = request.args.get('s')
|
||||
traceback = self.tracebacks.get(request.args.get('tb', type=int))
|
||||
frame = self.frames.get(request.args.get('frm', type=int))
|
||||
if cmd == 'resource' and arg:
|
||||
response = self.get_resource(request, arg)
|
||||
elif cmd == 'paste' and traceback is not None and \
|
||||
secret == self.secret:
|
||||
response = self.paste_traceback(request, traceback)
|
||||
elif cmd == 'source' and frame and self.secret == secret:
|
||||
response = self.get_source(request, frame)
|
||||
elif self.evalex and cmd is not None and frame is not None and \
|
||||
self.secret == secret:
|
||||
response = self.execute_command(request, cmd, frame)
|
||||
elif self.evalex and self.console_path is not None and \
|
||||
request.path == self.console_path:
|
||||
response = self.display_console(request)
|
||||
return response(environ, start_response)
|
||||
|
|
@ -0,0 +1,211 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.debug.console
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Interactive console support.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD.
|
||||
"""
|
||||
import sys
|
||||
import code
|
||||
from types import CodeType
|
||||
|
||||
from werkzeug.utils import escape
|
||||
from werkzeug.local import Local
|
||||
from werkzeug.debug.repr import debug_repr, dump, helper
|
||||
|
||||
|
||||
_local = Local()
|
||||
|
||||
|
||||
class HTMLStringO(object):
|
||||
"""A StringO version that HTML escapes on write."""
|
||||
|
||||
def __init__(self):
|
||||
self._buffer = []
|
||||
|
||||
def isatty(self):
|
||||
return False
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
def flush(self):
|
||||
pass
|
||||
|
||||
def seek(self, n, mode=0):
|
||||
pass
|
||||
|
||||
def readline(self):
|
||||
if len(self._buffer) == 0:
|
||||
return ''
|
||||
ret = self._buffer[0]
|
||||
del self._buffer[0]
|
||||
return ret
|
||||
|
||||
def reset(self):
|
||||
val = ''.join(self._buffer)
|
||||
del self._buffer[:]
|
||||
return val
|
||||
|
||||
def _write(self, x):
|
||||
if isinstance(x, bytes):
|
||||
x = x.decode('utf-8', 'replace')
|
||||
self._buffer.append(x)
|
||||
|
||||
def write(self, x):
|
||||
self._write(escape(x))
|
||||
|
||||
def writelines(self, x):
|
||||
self._write(escape(''.join(x)))
|
||||
|
||||
|
||||
class ThreadedStream(object):
|
||||
"""Thread-local wrapper for sys.stdout for the interactive console."""
|
||||
|
||||
def push():
|
||||
if not isinstance(sys.stdout, ThreadedStream):
|
||||
sys.stdout = ThreadedStream()
|
||||
_local.stream = HTMLStringO()
|
||||
push = staticmethod(push)
|
||||
|
||||
def fetch():
|
||||
try:
|
||||
stream = _local.stream
|
||||
except AttributeError:
|
||||
return ''
|
||||
return stream.reset()
|
||||
fetch = staticmethod(fetch)
|
||||
|
||||
def displayhook(obj):
|
||||
try:
|
||||
stream = _local.stream
|
||||
except AttributeError:
|
||||
return _displayhook(obj)
|
||||
# stream._write bypasses escaping as debug_repr is
|
||||
# already generating HTML for us.
|
||||
if obj is not None:
|
||||
_local._current_ipy.locals['_'] = obj
|
||||
stream._write(debug_repr(obj))
|
||||
displayhook = staticmethod(displayhook)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
raise AttributeError('read only attribute %s' % name)
|
||||
|
||||
def __dir__(self):
|
||||
return dir(sys.__stdout__)
|
||||
|
||||
def __getattribute__(self, name):
|
||||
if name == '__members__':
|
||||
return dir(sys.__stdout__)
|
||||
try:
|
||||
stream = _local.stream
|
||||
except AttributeError:
|
||||
stream = sys.__stdout__
|
||||
return getattr(stream, name)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(sys.__stdout__)
|
||||
|
||||
|
||||
# add the threaded stream as display hook
|
||||
_displayhook = sys.displayhook
|
||||
sys.displayhook = ThreadedStream.displayhook
|
||||
|
||||
|
||||
class _ConsoleLoader(object):
|
||||
|
||||
def __init__(self):
|
||||
self._storage = {}
|
||||
|
||||
def register(self, code, source):
|
||||
self._storage[id(code)] = source
|
||||
# register code objects of wrapped functions too.
|
||||
for var in code.co_consts:
|
||||
if isinstance(var, CodeType):
|
||||
self._storage[id(var)] = source
|
||||
|
||||
def get_source_by_code(self, code):
|
||||
try:
|
||||
return self._storage[id(code)]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
def _wrap_compiler(console):
|
||||
compile = console.compile
|
||||
def func(source, filename, symbol):
|
||||
code = compile(source, filename, symbol)
|
||||
console.loader.register(code, source)
|
||||
return code
|
||||
console.compile = func
|
||||
|
||||
|
||||
class _InteractiveConsole(code.InteractiveInterpreter):
|
||||
|
||||
def __init__(self, globals, locals):
|
||||
code.InteractiveInterpreter.__init__(self, locals)
|
||||
self.globals = dict(globals)
|
||||
self.globals['dump'] = dump
|
||||
self.globals['help'] = helper
|
||||
self.globals['__loader__'] = self.loader = _ConsoleLoader()
|
||||
self.more = False
|
||||
self.buffer = []
|
||||
_wrap_compiler(self)
|
||||
|
||||
def runsource(self, source):
|
||||
source = source.rstrip() + '\n'
|
||||
ThreadedStream.push()
|
||||
prompt = self.more and '... ' or '>>> '
|
||||
try:
|
||||
source_to_eval = ''.join(self.buffer + [source])
|
||||
if code.InteractiveInterpreter.runsource(self,
|
||||
source_to_eval, '<debugger>', 'single'):
|
||||
self.more = True
|
||||
self.buffer.append(source)
|
||||
else:
|
||||
self.more = False
|
||||
del self.buffer[:]
|
||||
finally:
|
||||
output = ThreadedStream.fetch()
|
||||
return prompt + source + output
|
||||
|
||||
def runcode(self, code):
|
||||
try:
|
||||
eval(code, self.globals, self.locals)
|
||||
except Exception:
|
||||
self.showtraceback()
|
||||
|
||||
def showtraceback(self):
|
||||
from werkzeug.debug.tbtools import get_current_traceback
|
||||
tb = get_current_traceback(skip=1)
|
||||
sys.stdout._write(tb.render_summary())
|
||||
|
||||
def showsyntaxerror(self, filename=None):
|
||||
from werkzeug.debug.tbtools import get_current_traceback
|
||||
tb = get_current_traceback(skip=4)
|
||||
sys.stdout._write(tb.render_summary())
|
||||
|
||||
def write(self, data):
|
||||
sys.stdout.write(data)
|
||||
|
||||
|
||||
class Console(object):
|
||||
"""An interactive console."""
|
||||
|
||||
def __init__(self, globals=None, locals=None):
|
||||
if locals is None:
|
||||
locals = {}
|
||||
if globals is None:
|
||||
globals = {}
|
||||
self._ipy = _InteractiveConsole(globals, locals)
|
||||
|
||||
def eval(self, code):
|
||||
_local._current_ipy = self._ipy
|
||||
old_sys_stdout = sys.stdout
|
||||
try:
|
||||
return self._ipy.runsource(code)
|
||||
finally:
|
||||
sys.stdout = old_sys_stdout
|
||||
280
Linux_x86_64/lib/python2.7/site-packages/werkzeug/debug/repr.py
Normal file
|
|
@ -0,0 +1,280 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.debug.repr
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module implements object representations for debugging purposes.
|
||||
Unlike the default repr these reprs expose a lot more information and
|
||||
produce HTML instead of ASCII.
|
||||
|
||||
Together with the CSS and JavaScript files of the debugger this gives
|
||||
a colorful and more compact output.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD.
|
||||
"""
|
||||
import sys
|
||||
import re
|
||||
import codecs
|
||||
from traceback import format_exception_only
|
||||
try:
|
||||
from collections import deque
|
||||
except ImportError: # pragma: no cover
|
||||
deque = None
|
||||
from werkzeug.utils import escape
|
||||
from werkzeug._compat import iteritems, PY2, text_type, integer_types, \
|
||||
string_types
|
||||
|
||||
|
||||
missing = object()
|
||||
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
|
||||
RegexType = type(_paragraph_re)
|
||||
|
||||
|
||||
HELP_HTML = '''\
|
||||
<div class=box>
|
||||
<h3>%(title)s</h3>
|
||||
<pre class=help>%(text)s</pre>
|
||||
</div>\
|
||||
'''
|
||||
OBJECT_DUMP_HTML = '''\
|
||||
<div class=box>
|
||||
<h3>%(title)s</h3>
|
||||
%(repr)s
|
||||
<table>%(items)s</table>
|
||||
</div>\
|
||||
'''
|
||||
|
||||
|
||||
def debug_repr(obj):
|
||||
"""Creates a debug repr of an object as HTML unicode string."""
|
||||
return DebugReprGenerator().repr(obj)
|
||||
|
||||
|
||||
def dump(obj=missing):
|
||||
"""Print the object details to stdout._write (for the interactive
|
||||
console of the web debugger.
|
||||
"""
|
||||
gen = DebugReprGenerator()
|
||||
if obj is missing:
|
||||
rv = gen.dump_locals(sys._getframe(1).f_locals)
|
||||
else:
|
||||
rv = gen.dump_object(obj)
|
||||
sys.stdout._write(rv)
|
||||
|
||||
|
||||
class _Helper(object):
|
||||
"""Displays an HTML version of the normal help, for the interactive
|
||||
debugger only because it requires a patched sys.stdout.
|
||||
"""
|
||||
|
||||
def __repr__(self):
|
||||
return 'Type help(object) for help about object.'
|
||||
|
||||
def __call__(self, topic=None):
|
||||
if topic is None:
|
||||
sys.stdout._write('<span class=help>%s</span>' % repr(self))
|
||||
return
|
||||
import pydoc
|
||||
pydoc.help(topic)
|
||||
rv = sys.stdout.reset()
|
||||
if isinstance(rv, bytes):
|
||||
rv = rv.decode('utf-8', 'ignore')
|
||||
paragraphs = _paragraph_re.split(rv)
|
||||
if len(paragraphs) > 1:
|
||||
title = paragraphs[0]
|
||||
text = '\n\n'.join(paragraphs[1:])
|
||||
else: # pragma: no cover
|
||||
title = 'Help'
|
||||
text = paragraphs[0]
|
||||
sys.stdout._write(HELP_HTML % {'title': title, 'text': text})
|
||||
|
||||
|
||||
helper = _Helper()
|
||||
|
||||
|
||||
def _add_subclass_info(inner, obj, base):
|
||||
if isinstance(base, tuple):
|
||||
for base in base:
|
||||
if type(obj) is base:
|
||||
return inner
|
||||
elif type(obj) is base:
|
||||
return inner
|
||||
module = ''
|
||||
if obj.__class__.__module__ not in ('__builtin__', 'exceptions'):
|
||||
module = '<span class="module">%s.</span>' % obj.__class__.__module__
|
||||
return '%s%s(%s)' % (module, obj.__class__.__name__, inner)
|
||||
|
||||
|
||||
class DebugReprGenerator(object):
|
||||
|
||||
def __init__(self):
|
||||
self._stack = []
|
||||
|
||||
def _sequence_repr_maker(left, right, base=object(), limit=8):
|
||||
def proxy(self, obj, recursive):
|
||||
if recursive:
|
||||
return _add_subclass_info(left + '...' + right, obj, base)
|
||||
buf = [left]
|
||||
have_extended_section = False
|
||||
for idx, item in enumerate(obj):
|
||||
if idx:
|
||||
buf.append(', ')
|
||||
if idx == limit:
|
||||
buf.append('<span class="extended">')
|
||||
have_extended_section = True
|
||||
buf.append(self.repr(item))
|
||||
if have_extended_section:
|
||||
buf.append('</span>')
|
||||
buf.append(right)
|
||||
return _add_subclass_info(u''.join(buf), obj, base)
|
||||
return proxy
|
||||
|
||||
list_repr = _sequence_repr_maker('[', ']', list)
|
||||
tuple_repr = _sequence_repr_maker('(', ')', tuple)
|
||||
set_repr = _sequence_repr_maker('set([', '])', set)
|
||||
frozenset_repr = _sequence_repr_maker('frozenset([', '])', frozenset)
|
||||
if deque is not None:
|
||||
deque_repr = _sequence_repr_maker('<span class="module">collections.'
|
||||
'</span>deque([', '])', deque)
|
||||
del _sequence_repr_maker
|
||||
|
||||
def regex_repr(self, obj):
|
||||
pattern = repr(obj.pattern)
|
||||
if PY2:
|
||||
pattern = pattern.decode('string-escape', 'ignore')
|
||||
else:
|
||||
pattern = codecs.decode(pattern, 'unicode-escape', 'ignore')
|
||||
if pattern[:1] == 'u':
|
||||
pattern = 'ur' + pattern[1:]
|
||||
else:
|
||||
pattern = 'r' + pattern
|
||||
return u're.compile(<span class="string regex">%s</span>)' % pattern
|
||||
|
||||
def string_repr(self, obj, limit=70):
|
||||
buf = ['<span class="string">']
|
||||
escaped = escape(obj)
|
||||
a = repr(escaped[:limit])
|
||||
b = repr(escaped[limit:])
|
||||
if isinstance(obj, text_type) and PY2:
|
||||
buf.append('u')
|
||||
a = a[1:]
|
||||
b = b[1:]
|
||||
if b != "''":
|
||||
buf.extend((a[:-1], '<span class="extended">', b[1:], '</span>'))
|
||||
else:
|
||||
buf.append(a)
|
||||
buf.append('</span>')
|
||||
return _add_subclass_info(u''.join(buf), obj, (bytes, text_type))
|
||||
|
||||
def dict_repr(self, d, recursive, limit=5):
|
||||
if recursive:
|
||||
return _add_subclass_info(u'{...}', d, dict)
|
||||
buf = ['{']
|
||||
have_extended_section = False
|
||||
for idx, (key, value) in enumerate(iteritems(d)):
|
||||
if idx:
|
||||
buf.append(', ')
|
||||
if idx == limit - 1:
|
||||
buf.append('<span class="extended">')
|
||||
have_extended_section = True
|
||||
buf.append('<span class="pair"><span class="key">%s</span>: '
|
||||
'<span class="value">%s</span></span>' %
|
||||
(self.repr(key), self.repr(value)))
|
||||
if have_extended_section:
|
||||
buf.append('</span>')
|
||||
buf.append('}')
|
||||
return _add_subclass_info(u''.join(buf), d, dict)
|
||||
|
||||
def object_repr(self, obj):
|
||||
r = repr(obj)
|
||||
if PY2:
|
||||
r = r.decode('utf-8', 'replace')
|
||||
return u'<span class="object">%s</span>' % escape(r)
|
||||
|
||||
def dispatch_repr(self, obj, recursive):
|
||||
if obj is helper:
|
||||
return u'<span class="help">%r</span>' % helper
|
||||
if isinstance(obj, (integer_types, float, complex)):
|
||||
return u'<span class="number">%r</span>' % obj
|
||||
if isinstance(obj, string_types):
|
||||
return self.string_repr(obj)
|
||||
if isinstance(obj, RegexType):
|
||||
return self.regex_repr(obj)
|
||||
if isinstance(obj, list):
|
||||
return self.list_repr(obj, recursive)
|
||||
if isinstance(obj, tuple):
|
||||
return self.tuple_repr(obj, recursive)
|
||||
if isinstance(obj, set):
|
||||
return self.set_repr(obj, recursive)
|
||||
if isinstance(obj, frozenset):
|
||||
return self.frozenset_repr(obj, recursive)
|
||||
if isinstance(obj, dict):
|
||||
return self.dict_repr(obj, recursive)
|
||||
if deque is not None and isinstance(obj, deque):
|
||||
return self.deque_repr(obj, recursive)
|
||||
return self.object_repr(obj)
|
||||
|
||||
def fallback_repr(self):
|
||||
try:
|
||||
info = ''.join(format_exception_only(*sys.exc_info()[:2]))
|
||||
except Exception: # pragma: no cover
|
||||
info = '?'
|
||||
if PY2:
|
||||
info = info.decode('utf-8', 'ignore')
|
||||
return u'<span class="brokenrepr"><broken repr (%s)>' \
|
||||
u'</span>' % escape(info.strip())
|
||||
|
||||
def repr(self, obj):
|
||||
recursive = False
|
||||
for item in self._stack:
|
||||
if item is obj:
|
||||
recursive = True
|
||||
break
|
||||
self._stack.append(obj)
|
||||
try:
|
||||
try:
|
||||
return self.dispatch_repr(obj, recursive)
|
||||
except Exception:
|
||||
return self.fallback_repr()
|
||||
finally:
|
||||
self._stack.pop()
|
||||
|
||||
def dump_object(self, obj):
|
||||
repr = items = None
|
||||
if isinstance(obj, dict):
|
||||
title = 'Contents of'
|
||||
items = []
|
||||
for key, value in iteritems(obj):
|
||||
if not isinstance(key, string_types):
|
||||
items = None
|
||||
break
|
||||
items.append((key, self.repr(value)))
|
||||
if items is None:
|
||||
items = []
|
||||
repr = self.repr(obj)
|
||||
for key in dir(obj):
|
||||
try:
|
||||
items.append((key, self.repr(getattr(obj, key))))
|
||||
except Exception:
|
||||
pass
|
||||
title = 'Details for'
|
||||
title += ' ' + object.__repr__(obj)[1:-1]
|
||||
return self.render_object_dump(items, title, repr)
|
||||
|
||||
def dump_locals(self, d):
|
||||
items = [(key, self.repr(value)) for key, value in d.items()]
|
||||
return self.render_object_dump(items, 'Local variables in frame')
|
||||
|
||||
def render_object_dump(self, items, title, repr=None):
|
||||
html_items = []
|
||||
for key, value in items:
|
||||
html_items.append('<tr><th>%s<td><pre class=repr>%s</pre>' %
|
||||
(escape(key), value))
|
||||
if not html_items:
|
||||
html_items.append('<tr><td><em>Nothing</em>')
|
||||
return OBJECT_DUMP_HTML % {
|
||||
'title': escape(title),
|
||||
'repr': repr and '<pre class=repr>%s</pre>' % repr or '',
|
||||
'items': '\n'.join(html_items)
|
||||
}
|
||||
|
|
@ -0,0 +1,96 @@
|
|||
-------------------------------
|
||||
UBUNTU FONT LICENCE Version 1.0
|
||||
-------------------------------
|
||||
|
||||
PREAMBLE
|
||||
This licence allows the licensed fonts to be used, studied, modified and
|
||||
redistributed freely. The fonts, including any derivative works, can be
|
||||
bundled, embedded, and redistributed provided the terms of this licence
|
||||
are met. The fonts and derivatives, however, cannot be released under
|
||||
any other licence. The requirement for fonts to remain under this
|
||||
licence does not require any document created using the fonts or their
|
||||
derivatives to be published under this licence, as long as the primary
|
||||
purpose of the document is not to be a vehicle for the distribution of
|
||||
the fonts.
|
||||
|
||||
DEFINITIONS
|
||||
"Font Software" refers to the set of files released by the Copyright
|
||||
Holder(s) under this licence and clearly marked as such. This may
|
||||
include source files, build scripts and documentation.
|
||||
|
||||
"Original Version" refers to the collection of Font Software components
|
||||
as received under this licence.
|
||||
|
||||
"Modified Version" refers to any derivative made by adding to, deleting,
|
||||
or substituting -- in part or in whole -- any of the components of the
|
||||
Original Version, by changing formats or by porting the Font Software to
|
||||
a new environment.
|
||||
|
||||
"Copyright Holder(s)" refers to all individuals and companies who have a
|
||||
copyright ownership of the Font Software.
|
||||
|
||||
"Substantially Changed" refers to Modified Versions which can be easily
|
||||
identified as dissimilar to the Font Software by users of the Font
|
||||
Software comparing the Original Version with the Modified Version.
|
||||
|
||||
To "Propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification and with or without charging
|
||||
a redistribution fee), making available to the public, and in some
|
||||
countries other activities as well.
|
||||
|
||||
PERMISSION & CONDITIONS
|
||||
This licence does not grant any rights under trademark law and all such
|
||||
rights are reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of the Font Software, to propagate the Font Software, subject to
|
||||
the below conditions:
|
||||
|
||||
1) Each copy of the Font Software must contain the above copyright
|
||||
notice and this licence. These can be included either as stand-alone
|
||||
text files, human-readable headers or in the appropriate machine-
|
||||
readable metadata fields within text or binary files as long as those
|
||||
fields can be easily viewed by the user.
|
||||
|
||||
2) The font name complies with the following:
|
||||
(a) The Original Version must retain its name, unmodified.
|
||||
(b) Modified Versions which are Substantially Changed must be renamed to
|
||||
avoid use of the name of the Original Version or similar names entirely.
|
||||
(c) Modified Versions which are not Substantially Changed must be
|
||||
renamed to both (i) retain the name of the Original Version and (ii) add
|
||||
additional naming elements to distinguish the Modified Version from the
|
||||
Original Version. The name of such Modified Versions must be the name of
|
||||
the Original Version, with "derivative X" where X represents the name of
|
||||
the new work, appended to that name.
|
||||
|
||||
3) The name(s) of the Copyright Holder(s) and any contributor to the
|
||||
Font Software shall not be used to promote, endorse or advertise any
|
||||
Modified Version, except (i) as required by this licence, (ii) to
|
||||
acknowledge the contribution(s) of the Copyright Holder(s) or (iii) with
|
||||
their explicit written permission.
|
||||
|
||||
4) The Font Software, modified or unmodified, in part or in whole, must
|
||||
be distributed entirely under this licence, and must not be distributed
|
||||
under any other licence. The requirement for fonts to remain under this
|
||||
licence does not affect any document created using the Font Software,
|
||||
except any version of the Font Software extracted from a document
|
||||
created using the Font Software may only be distributed under this
|
||||
licence.
|
||||
|
||||
TERMINATION
|
||||
This licence becomes null and void if any of the above conditions are
|
||||
not met.
|
||||
|
||||
DISCLAIMER
|
||||
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
|
||||
COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
|
||||
COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
|
||||
DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER
|
||||
DEALINGS IN THE FONT SOFTWARE.
|
||||
BIN
Linux_x86_64/lib/python2.7/site-packages/werkzeug/debug/shared/console.png
Executable file
|
After Width: | Height: | Size: 507 B |
|
|
@ -0,0 +1,201 @@
|
|||
$(function() {
|
||||
var sourceView = null;
|
||||
|
||||
/**
|
||||
* if we are in console mode, show the console.
|
||||
*/
|
||||
if (CONSOLE_MODE && EVALEX) {
|
||||
openShell(null, $('div.console div.inner').empty(), 0);
|
||||
}
|
||||
|
||||
$('div.traceback div.frame').each(function() {
|
||||
var
|
||||
target = $('pre', this)
|
||||
.click(function() {
|
||||
sourceButton.click();
|
||||
}),
|
||||
consoleNode = null, source = null,
|
||||
frameID = this.id.substring(6);
|
||||
|
||||
/**
|
||||
* Add an interactive console to the frames
|
||||
*/
|
||||
if (EVALEX)
|
||||
$('<img src="?__debugger__=yes&cmd=resource&f=console.png">')
|
||||
.attr('title', 'Open an interactive python shell in this frame')
|
||||
.click(function() {
|
||||
consoleNode = openShell(consoleNode, target, frameID);
|
||||
return false;
|
||||
})
|
||||
.prependTo(target);
|
||||
|
||||
/**
|
||||
* Show sourcecode
|
||||
*/
|
||||
var sourceButton = $('<img src="?__debugger__=yes&cmd=resource&f=source.png">')
|
||||
.attr('title', 'Display the sourcecode for this frame')
|
||||
.click(function() {
|
||||
if (!sourceView)
|
||||
$('h2', sourceView =
|
||||
$('<div class="box"><h2>View Source</h2><div class="sourceview">' +
|
||||
'<table></table></div>')
|
||||
.insertBefore('div.explanation'))
|
||||
.css('cursor', 'pointer')
|
||||
.click(function() {
|
||||
sourceView.slideUp('fast');
|
||||
});
|
||||
$.get('', {__debugger__: 'yes', cmd:
|
||||
'source', frm: frameID, s: SECRET}, function(data) {
|
||||
$('table', sourceView)
|
||||
.replaceWith(data);
|
||||
if (!sourceView.is(':visible'))
|
||||
sourceView.slideDown('fast', function() {
|
||||
focusSourceBlock();
|
||||
});
|
||||
else
|
||||
focusSourceBlock();
|
||||
});
|
||||
return false;
|
||||
})
|
||||
.prependTo(target);
|
||||
});
|
||||
|
||||
/**
|
||||
* toggle traceback types on click.
|
||||
*/
|
||||
$('h2.traceback').click(function() {
|
||||
$(this).next().slideToggle('fast');
|
||||
$('div.plain').slideToggle('fast');
|
||||
}).css('cursor', 'pointer');
|
||||
$('div.plain').hide();
|
||||
|
||||
/**
|
||||
* Add extra info (this is here so that only users with JavaScript
|
||||
* enabled see it.)
|
||||
*/
|
||||
$('span.nojavascript')
|
||||
.removeClass('nojavascript')
|
||||
.html('<p>To switch between the interactive traceback and the plaintext ' +
|
||||
'one, you can click on the "Traceback" headline. From the text ' +
|
||||
'traceback you can also create a paste of it. ' + (!EVALEX ? '' :
|
||||
'For code execution mouse-over the frame you want to debug and ' +
|
||||
'click on the console icon on the right side.' +
|
||||
'<p>You can execute arbitrary Python code in the stack frames and ' +
|
||||
'there are some extra helpers available for introspection:' +
|
||||
'<ul><li><code>dump()</code> shows all variables in the frame' +
|
||||
'<li><code>dump(obj)</code> dumps all that\'s known about the object</ul>'));
|
||||
|
||||
/**
|
||||
* Add the pastebin feature
|
||||
*/
|
||||
$('div.plain form')
|
||||
.submit(function() {
|
||||
var label = $('input[type="submit"]', this);
|
||||
var old_val = label.val();
|
||||
label.val('submitting...');
|
||||
$.ajax({
|
||||
dataType: 'json',
|
||||
url: document.location.pathname,
|
||||
data: {__debugger__: 'yes', tb: TRACEBACK, cmd: 'paste',
|
||||
s: SECRET},
|
||||
success: function(data) {
|
||||
$('div.plain span.pastemessage')
|
||||
.removeClass('pastemessage')
|
||||
.text('Paste created: ')
|
||||
.append($('<a>#' + data.id + '</a>').attr('href', data.url));
|
||||
},
|
||||
error: function() {
|
||||
alert('Error: Could not submit paste. No network connection?');
|
||||
label.val(old_val);
|
||||
}
|
||||
});
|
||||
return false;
|
||||
});
|
||||
|
||||
// if we have javascript we submit by ajax anyways, so no need for the
|
||||
// not scaling textarea.
|
||||
var plainTraceback = $('div.plain textarea');
|
||||
plainTraceback.replaceWith($('<pre>').text(plainTraceback.text()));
|
||||
});
|
||||
|
||||
|
||||
/**
|
||||
* Helper function for shell initialization
|
||||
*/
|
||||
function openShell(consoleNode, target, frameID) {
|
||||
if (consoleNode)
|
||||
return consoleNode.slideToggle('fast');
|
||||
consoleNode = $('<pre class="console">')
|
||||
.appendTo(target.parent())
|
||||
.hide()
|
||||
var historyPos = 0, history = [''];
|
||||
var output = $('<div class="output">[console ready]</div>')
|
||||
.appendTo(consoleNode);
|
||||
var form = $('<form>>>> </form>')
|
||||
.submit(function() {
|
||||
var cmd = command.val();
|
||||
$.get('', {
|
||||
__debugger__: 'yes', cmd: cmd, frm: frameID, s: SECRET}, function(data) {
|
||||
var tmp = $('<div>').html(data);
|
||||
$('span.extended', tmp).each(function() {
|
||||
var hidden = $(this).wrap('<span>').hide();
|
||||
hidden
|
||||
.parent()
|
||||
.append($('<a href="#" class="toggle"> </a>')
|
||||
.click(function() {
|
||||
hidden.toggle();
|
||||
$(this).toggleClass('open')
|
||||
return false;
|
||||
}));
|
||||
});
|
||||
output.append(tmp);
|
||||
command.focus();
|
||||
consoleNode.scrollTop(consoleNode.get(0).scrollHeight);
|
||||
var old = history.pop();
|
||||
history.push(cmd);
|
||||
if (typeof old != 'undefined')
|
||||
history.push(old);
|
||||
historyPos = history.length - 1;
|
||||
});
|
||||
command.val('');
|
||||
return false;
|
||||
}).
|
||||
appendTo(consoleNode);
|
||||
|
||||
var command = $('<input type="text">')
|
||||
.appendTo(form)
|
||||
.keydown(function(e) {
|
||||
if (e.charCode == 100 && e.ctrlKey) {
|
||||
output.text('--- screen cleared ---');
|
||||
return false;
|
||||
}
|
||||
else if (e.charCode == 0 && (e.keyCode == 38 || e.keyCode == 40)) {
|
||||
if (e.keyCode == 38 && historyPos > 0)
|
||||
historyPos--;
|
||||
else if (e.keyCode == 40 && historyPos < history.length)
|
||||
historyPos++;
|
||||
command.val(history[historyPos]);
|
||||
return false;
|
||||
}
|
||||
});
|
||||
|
||||
return consoleNode.slideDown('fast', function() {
|
||||
command.focus();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Focus the current block in the source view.
|
||||
*/
|
||||
function focusSourceBlock() {
|
||||
var tmp, line = $('table.source tr.current');
|
||||
for (var i = 0; i < 7; i++) {
|
||||
tmp = line.prev();
|
||||
if (!(tmp && tmp.is('.in-frame')))
|
||||
break
|
||||
line = tmp;
|
||||
}
|
||||
var container = $('div.sourceview');
|
||||
|
||||
container.scrollTop(line.offset().top - container.offset().top + container.scrollTop());
|
||||
}
|
||||
167
Linux_x86_64/lib/python2.7/site-packages/werkzeug/debug/shared/jquery.js
vendored
Normal file
|
|
@ -0,0 +1,167 @@
|
|||
/*!
|
||||
* jQuery JavaScript Library v1.4.4
|
||||
* http://jquery.com/
|
||||
*
|
||||
* Copyright 2010, John Resig
|
||||
* Dual licensed under the MIT or GPL Version 2 licenses.
|
||||
* http://jquery.org/license
|
||||
*
|
||||
* Includes Sizzle.js
|
||||
* http://sizzlejs.com/
|
||||
* Copyright 2010, The Dojo Foundation
|
||||
* Released under the MIT, BSD, and GPL Licenses.
|
||||
*
|
||||
* Date: Thu Nov 11 19:04:53 2010 -0500
|
||||
*/
|
||||
(function(E,B){function ka(a,b,d){if(d===B&&a.nodeType===1){d=a.getAttribute("data-"+b);if(typeof d==="string"){try{d=d==="true"?true:d==="false"?false:d==="null"?null:!c.isNaN(d)?parseFloat(d):Ja.test(d)?c.parseJSON(d):d}catch(e){}c.data(a,b,d)}else d=B}return d}function U(){return false}function ca(){return true}function la(a,b,d){d[0].type=a;return c.event.handle.apply(b,d)}function Ka(a){var b,d,e,f,h,l,k,o,x,r,A,C=[];f=[];h=c.data(this,this.nodeType?"events":"__events__");if(typeof h==="function")h=
|
||||
h.events;if(!(a.liveFired===this||!h||!h.live||a.button&&a.type==="click")){if(a.namespace)A=RegExp("(^|\\.)"+a.namespace.split(".").join("\\.(?:.*\\.)?")+"(\\.|$)");a.liveFired=this;var J=h.live.slice(0);for(k=0;k<J.length;k++){h=J[k];h.origType.replace(X,"")===a.type?f.push(h.selector):J.splice(k--,1)}f=c(a.target).closest(f,a.currentTarget);o=0;for(x=f.length;o<x;o++){r=f[o];for(k=0;k<J.length;k++){h=J[k];if(r.selector===h.selector&&(!A||A.test(h.namespace))){l=r.elem;e=null;if(h.preType==="mouseenter"||
|
||||
h.preType==="mouseleave"){a.type=h.preType;e=c(a.relatedTarget).closest(h.selector)[0]}if(!e||e!==l)C.push({elem:l,handleObj:h,level:r.level})}}}o=0;for(x=C.length;o<x;o++){f=C[o];if(d&&f.level>d)break;a.currentTarget=f.elem;a.data=f.handleObj.data;a.handleObj=f.handleObj;A=f.handleObj.origHandler.apply(f.elem,arguments);if(A===false||a.isPropagationStopped()){d=f.level;if(A===false)b=false;if(a.isImmediatePropagationStopped())break}}return b}}function Y(a,b){return(a&&a!=="*"?a+".":"")+b.replace(La,
|
||||
"`").replace(Ma,"&")}function ma(a,b,d){if(c.isFunction(b))return c.grep(a,function(f,h){return!!b.call(f,h,f)===d});else if(b.nodeType)return c.grep(a,function(f){return f===b===d});else if(typeof b==="string"){var e=c.grep(a,function(f){return f.nodeType===1});if(Na.test(b))return c.filter(b,e,!d);else b=c.filter(b,e)}return c.grep(a,function(f){return c.inArray(f,b)>=0===d})}function na(a,b){var d=0;b.each(function(){if(this.nodeName===(a[d]&&a[d].nodeName)){var e=c.data(a[d++]),f=c.data(this,
|
||||
e);if(e=e&&e.events){delete f.handle;f.events={};for(var h in e)for(var l in e[h])c.event.add(this,h,e[h][l],e[h][l].data)}}})}function Oa(a,b){b.src?c.ajax({url:b.src,async:false,dataType:"script"}):c.globalEval(b.text||b.textContent||b.innerHTML||"");b.parentNode&&b.parentNode.removeChild(b)}function oa(a,b,d){var e=b==="width"?a.offsetWidth:a.offsetHeight;if(d==="border")return e;c.each(b==="width"?Pa:Qa,function(){d||(e-=parseFloat(c.css(a,"padding"+this))||0);if(d==="margin")e+=parseFloat(c.css(a,
|
||||
"margin"+this))||0;else e-=parseFloat(c.css(a,"border"+this+"Width"))||0});return e}function da(a,b,d,e){if(c.isArray(b)&&b.length)c.each(b,function(f,h){d||Ra.test(a)?e(a,h):da(a+"["+(typeof h==="object"||c.isArray(h)?f:"")+"]",h,d,e)});else if(!d&&b!=null&&typeof b==="object")c.isEmptyObject(b)?e(a,""):c.each(b,function(f,h){da(a+"["+f+"]",h,d,e)});else e(a,b)}function S(a,b){var d={};c.each(pa.concat.apply([],pa.slice(0,b)),function(){d[this]=a});return d}function qa(a){if(!ea[a]){var b=c("<"+
|
||||
a+">").appendTo("body"),d=b.css("display");b.remove();if(d==="none"||d==="")d="block";ea[a]=d}return ea[a]}function fa(a){return c.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:false}var t=E.document,c=function(){function a(){if(!b.isReady){try{t.documentElement.doScroll("left")}catch(j){setTimeout(a,1);return}b.ready()}}var b=function(j,s){return new b.fn.init(j,s)},d=E.jQuery,e=E.$,f,h=/^(?:[^<]*(<[\w\W]+>)[^>]*$|#([\w\-]+)$)/,l=/\S/,k=/^\s+/,o=/\s+$/,x=/\W/,r=/\d/,A=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,
|
||||
C=/^[\],:{}\s]*$/,J=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,w=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,I=/(?:^|:|,)(?:\s*\[)+/g,L=/(webkit)[ \/]([\w.]+)/,g=/(opera)(?:.*version)?[ \/]([\w.]+)/,i=/(msie) ([\w.]+)/,n=/(mozilla)(?:.*? rv:([\w.]+))?/,m=navigator.userAgent,p=false,q=[],u,y=Object.prototype.toString,F=Object.prototype.hasOwnProperty,M=Array.prototype.push,N=Array.prototype.slice,O=String.prototype.trim,D=Array.prototype.indexOf,R={};b.fn=b.prototype={init:function(j,
|
||||
s){var v,z,H;if(!j)return this;if(j.nodeType){this.context=this[0]=j;this.length=1;return this}if(j==="body"&&!s&&t.body){this.context=t;this[0]=t.body;this.selector="body";this.length=1;return this}if(typeof j==="string")if((v=h.exec(j))&&(v[1]||!s))if(v[1]){H=s?s.ownerDocument||s:t;if(z=A.exec(j))if(b.isPlainObject(s)){j=[t.createElement(z[1])];b.fn.attr.call(j,s,true)}else j=[H.createElement(z[1])];else{z=b.buildFragment([v[1]],[H]);j=(z.cacheable?z.fragment.cloneNode(true):z.fragment).childNodes}return b.merge(this,
|
||||
j)}else{if((z=t.getElementById(v[2]))&&z.parentNode){if(z.id!==v[2])return f.find(j);this.length=1;this[0]=z}this.context=t;this.selector=j;return this}else if(!s&&!x.test(j)){this.selector=j;this.context=t;j=t.getElementsByTagName(j);return b.merge(this,j)}else return!s||s.jquery?(s||f).find(j):b(s).find(j);else if(b.isFunction(j))return f.ready(j);if(j.selector!==B){this.selector=j.selector;this.context=j.context}return b.makeArray(j,this)},selector:"",jquery:"1.4.4",length:0,size:function(){return this.length},
|
||||
toArray:function(){return N.call(this,0)},get:function(j){return j==null?this.toArray():j<0?this.slice(j)[0]:this[j]},pushStack:function(j,s,v){var z=b();b.isArray(j)?M.apply(z,j):b.merge(z,j);z.prevObject=this;z.context=this.context;if(s==="find")z.selector=this.selector+(this.selector?" ":"")+v;else if(s)z.selector=this.selector+"."+s+"("+v+")";return z},each:function(j,s){return b.each(this,j,s)},ready:function(j){b.bindReady();if(b.isReady)j.call(t,b);else q&&q.push(j);return this},eq:function(j){return j===
|
||||
-1?this.slice(j):this.slice(j,+j+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(N.apply(this,arguments),"slice",N.call(arguments).join(","))},map:function(j){return this.pushStack(b.map(this,function(s,v){return j.call(s,v,s)}))},end:function(){return this.prevObject||b(null)},push:M,sort:[].sort,splice:[].splice};b.fn.init.prototype=b.fn;b.extend=b.fn.extend=function(){var j,s,v,z,H,G=arguments[0]||{},K=1,Q=arguments.length,ga=false;
|
||||
if(typeof G==="boolean"){ga=G;G=arguments[1]||{};K=2}if(typeof G!=="object"&&!b.isFunction(G))G={};if(Q===K){G=this;--K}for(;K<Q;K++)if((j=arguments[K])!=null)for(s in j){v=G[s];z=j[s];if(G!==z)if(ga&&z&&(b.isPlainObject(z)||(H=b.isArray(z)))){if(H){H=false;v=v&&b.isArray(v)?v:[]}else v=v&&b.isPlainObject(v)?v:{};G[s]=b.extend(ga,v,z)}else if(z!==B)G[s]=z}return G};b.extend({noConflict:function(j){E.$=e;if(j)E.jQuery=d;return b},isReady:false,readyWait:1,ready:function(j){j===true&&b.readyWait--;
|
||||
if(!b.readyWait||j!==true&&!b.isReady){if(!t.body)return setTimeout(b.ready,1);b.isReady=true;if(!(j!==true&&--b.readyWait>0))if(q){var s=0,v=q;for(q=null;j=v[s++];)j.call(t,b);b.fn.trigger&&b(t).trigger("ready").unbind("ready")}}},bindReady:function(){if(!p){p=true;if(t.readyState==="complete")return setTimeout(b.ready,1);if(t.addEventListener){t.addEventListener("DOMContentLoaded",u,false);E.addEventListener("load",b.ready,false)}else if(t.attachEvent){t.attachEvent("onreadystatechange",u);E.attachEvent("onload",
|
||||
b.ready);var j=false;try{j=E.frameElement==null}catch(s){}t.documentElement.doScroll&&j&&a()}}},isFunction:function(j){return b.type(j)==="function"},isArray:Array.isArray||function(j){return b.type(j)==="array"},isWindow:function(j){return j&&typeof j==="object"&&"setInterval"in j},isNaN:function(j){return j==null||!r.test(j)||isNaN(j)},type:function(j){return j==null?String(j):R[y.call(j)]||"object"},isPlainObject:function(j){if(!j||b.type(j)!=="object"||j.nodeType||b.isWindow(j))return false;if(j.constructor&&
|
||||
!F.call(j,"constructor")&&!F.call(j.constructor.prototype,"isPrototypeOf"))return false;for(var s in j);return s===B||F.call(j,s)},isEmptyObject:function(j){for(var s in j)return false;return true},error:function(j){throw j;},parseJSON:function(j){if(typeof j!=="string"||!j)return null;j=b.trim(j);if(C.test(j.replace(J,"@").replace(w,"]").replace(I,"")))return E.JSON&&E.JSON.parse?E.JSON.parse(j):(new Function("return "+j))();else b.error("Invalid JSON: "+j)},noop:function(){},globalEval:function(j){if(j&&
|
||||
l.test(j)){var s=t.getElementsByTagName("head")[0]||t.documentElement,v=t.createElement("script");v.type="text/javascript";if(b.support.scriptEval)v.appendChild(t.createTextNode(j));else v.text=j;s.insertBefore(v,s.firstChild);s.removeChild(v)}},nodeName:function(j,s){return j.nodeName&&j.nodeName.toUpperCase()===s.toUpperCase()},each:function(j,s,v){var z,H=0,G=j.length,K=G===B||b.isFunction(j);if(v)if(K)for(z in j){if(s.apply(j[z],v)===false)break}else for(;H<G;){if(s.apply(j[H++],v)===false)break}else if(K)for(z in j){if(s.call(j[z],
|
||||
z,j[z])===false)break}else for(v=j[0];H<G&&s.call(v,H,v)!==false;v=j[++H]);return j},trim:O?function(j){return j==null?"":O.call(j)}:function(j){return j==null?"":j.toString().replace(k,"").replace(o,"")},makeArray:function(j,s){var v=s||[];if(j!=null){var z=b.type(j);j.length==null||z==="string"||z==="function"||z==="regexp"||b.isWindow(j)?M.call(v,j):b.merge(v,j)}return v},inArray:function(j,s){if(s.indexOf)return s.indexOf(j);for(var v=0,z=s.length;v<z;v++)if(s[v]===j)return v;return-1},merge:function(j,
|
||||
s){var v=j.length,z=0;if(typeof s.length==="number")for(var H=s.length;z<H;z++)j[v++]=s[z];else for(;s[z]!==B;)j[v++]=s[z++];j.length=v;return j},grep:function(j,s,v){var z=[],H;v=!!v;for(var G=0,K=j.length;G<K;G++){H=!!s(j[G],G);v!==H&&z.push(j[G])}return z},map:function(j,s,v){for(var z=[],H,G=0,K=j.length;G<K;G++){H=s(j[G],G,v);if(H!=null)z[z.length]=H}return z.concat.apply([],z)},guid:1,proxy:function(j,s,v){if(arguments.length===2)if(typeof s==="string"){v=j;j=v[s];s=B}else if(s&&!b.isFunction(s)){v=
|
||||
s;s=B}if(!s&&j)s=function(){return j.apply(v||this,arguments)};if(j)s.guid=j.guid=j.guid||s.guid||b.guid++;return s},access:function(j,s,v,z,H,G){var K=j.length;if(typeof s==="object"){for(var Q in s)b.access(j,Q,s[Q],z,H,v);return j}if(v!==B){z=!G&&z&&b.isFunction(v);for(Q=0;Q<K;Q++)H(j[Q],s,z?v.call(j[Q],Q,H(j[Q],s)):v,G);return j}return K?H(j[0],s):B},now:function(){return(new Date).getTime()},uaMatch:function(j){j=j.toLowerCase();j=L.exec(j)||g.exec(j)||i.exec(j)||j.indexOf("compatible")<0&&n.exec(j)||
|
||||
[];return{browser:j[1]||"",version:j[2]||"0"}},browser:{}});b.each("Boolean Number String Function Array Date RegExp Object".split(" "),function(j,s){R["[object "+s+"]"]=s.toLowerCase()});m=b.uaMatch(m);if(m.browser){b.browser[m.browser]=true;b.browser.version=m.version}if(b.browser.webkit)b.browser.safari=true;if(D)b.inArray=function(j,s){return D.call(s,j)};if(!/\s/.test("\u00a0")){k=/^[\s\xA0]+/;o=/[\s\xA0]+$/}f=b(t);if(t.addEventListener)u=function(){t.removeEventListener("DOMContentLoaded",u,
|
||||
false);b.ready()};else if(t.attachEvent)u=function(){if(t.readyState==="complete"){t.detachEvent("onreadystatechange",u);b.ready()}};return E.jQuery=E.$=b}();(function(){c.support={};var a=t.documentElement,b=t.createElement("script"),d=t.createElement("div"),e="script"+c.now();d.style.display="none";d.innerHTML=" <link/><table></table><a href='/a' style='color:red;float:left;opacity:.55;'>a</a><input type='checkbox'/>";var f=d.getElementsByTagName("*"),h=d.getElementsByTagName("a")[0],l=t.createElement("select"),
|
||||
k=l.appendChild(t.createElement("option"));if(!(!f||!f.length||!h)){c.support={leadingWhitespace:d.firstChild.nodeType===3,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/red/.test(h.getAttribute("style")),hrefNormalized:h.getAttribute("href")==="/a",opacity:/^0.55$/.test(h.style.opacity),cssFloat:!!h.style.cssFloat,checkOn:d.getElementsByTagName("input")[0].value==="on",optSelected:k.selected,deleteExpando:true,optDisabled:false,checkClone:false,
|
||||
scriptEval:false,noCloneEvent:true,boxModel:null,inlineBlockNeedsLayout:false,shrinkWrapBlocks:false,reliableHiddenOffsets:true};l.disabled=true;c.support.optDisabled=!k.disabled;b.type="text/javascript";try{b.appendChild(t.createTextNode("window."+e+"=1;"))}catch(o){}a.insertBefore(b,a.firstChild);if(E[e]){c.support.scriptEval=true;delete E[e]}try{delete b.test}catch(x){c.support.deleteExpando=false}a.removeChild(b);if(d.attachEvent&&d.fireEvent){d.attachEvent("onclick",function r(){c.support.noCloneEvent=
|
||||
false;d.detachEvent("onclick",r)});d.cloneNode(true).fireEvent("onclick")}d=t.createElement("div");d.innerHTML="<input type='radio' name='radiotest' checked='checked'/>";a=t.createDocumentFragment();a.appendChild(d.firstChild);c.support.checkClone=a.cloneNode(true).cloneNode(true).lastChild.checked;c(function(){var r=t.createElement("div");r.style.width=r.style.paddingLeft="1px";t.body.appendChild(r);c.boxModel=c.support.boxModel=r.offsetWidth===2;if("zoom"in r.style){r.style.display="inline";r.style.zoom=
|
||||
1;c.support.inlineBlockNeedsLayout=r.offsetWidth===2;r.style.display="";r.innerHTML="<div style='width:4px;'></div>";c.support.shrinkWrapBlocks=r.offsetWidth!==2}r.innerHTML="<table><tr><td style='padding:0;display:none'></td><td>t</td></tr></table>";var A=r.getElementsByTagName("td");c.support.reliableHiddenOffsets=A[0].offsetHeight===0;A[0].style.display="";A[1].style.display="none";c.support.reliableHiddenOffsets=c.support.reliableHiddenOffsets&&A[0].offsetHeight===0;r.innerHTML="";t.body.removeChild(r).style.display=
|
||||
"none"});a=function(r){var A=t.createElement("div");r="on"+r;var C=r in A;if(!C){A.setAttribute(r,"return;");C=typeof A[r]==="function"}return C};c.support.submitBubbles=a("submit");c.support.changeBubbles=a("change");a=b=d=f=h=null}})();var ra={},Ja=/^(?:\{.*\}|\[.*\])$/;c.extend({cache:{},uuid:0,expando:"jQuery"+c.now(),noData:{embed:true,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:true},data:function(a,b,d){if(c.acceptData(a)){a=a==E?ra:a;var e=a.nodeType,f=e?a[c.expando]:null,h=
|
||||
c.cache;if(!(e&&!f&&typeof b==="string"&&d===B)){if(e)f||(a[c.expando]=f=++c.uuid);else h=a;if(typeof b==="object")if(e)h[f]=c.extend(h[f],b);else c.extend(h,b);else if(e&&!h[f])h[f]={};a=e?h[f]:h;if(d!==B)a[b]=d;return typeof b==="string"?a[b]:a}}},removeData:function(a,b){if(c.acceptData(a)){a=a==E?ra:a;var d=a.nodeType,e=d?a[c.expando]:a,f=c.cache,h=d?f[e]:e;if(b){if(h){delete h[b];d&&c.isEmptyObject(h)&&c.removeData(a)}}else if(d&&c.support.deleteExpando)delete a[c.expando];else if(a.removeAttribute)a.removeAttribute(c.expando);
|
||||
else if(d)delete f[e];else for(var l in a)delete a[l]}},acceptData:function(a){if(a.nodeName){var b=c.noData[a.nodeName.toLowerCase()];if(b)return!(b===true||a.getAttribute("classid")!==b)}return true}});c.fn.extend({data:function(a,b){var d=null;if(typeof a==="undefined"){if(this.length){var e=this[0].attributes,f;d=c.data(this[0]);for(var h=0,l=e.length;h<l;h++){f=e[h].name;if(f.indexOf("data-")===0){f=f.substr(5);ka(this[0],f,d[f])}}}return d}else if(typeof a==="object")return this.each(function(){c.data(this,
|
||||
a)});var k=a.split(".");k[1]=k[1]?"."+k[1]:"";if(b===B){d=this.triggerHandler("getData"+k[1]+"!",[k[0]]);if(d===B&&this.length){d=c.data(this[0],a);d=ka(this[0],a,d)}return d===B&&k[1]?this.data(k[0]):d}else return this.each(function(){var o=c(this),x=[k[0],b];o.triggerHandler("setData"+k[1]+"!",x);c.data(this,a,b);o.triggerHandler("changeData"+k[1]+"!",x)})},removeData:function(a){return this.each(function(){c.removeData(this,a)})}});c.extend({queue:function(a,b,d){if(a){b=(b||"fx")+"queue";var e=
|
||||
c.data(a,b);if(!d)return e||[];if(!e||c.isArray(d))e=c.data(a,b,c.makeArray(d));else e.push(d);return e}},dequeue:function(a,b){b=b||"fx";var d=c.queue(a,b),e=d.shift();if(e==="inprogress")e=d.shift();if(e){b==="fx"&&d.unshift("inprogress");e.call(a,function(){c.dequeue(a,b)})}}});c.fn.extend({queue:function(a,b){if(typeof a!=="string"){b=a;a="fx"}if(b===B)return c.queue(this[0],a);return this.each(function(){var d=c.queue(this,a,b);a==="fx"&&d[0]!=="inprogress"&&c.dequeue(this,a)})},dequeue:function(a){return this.each(function(){c.dequeue(this,
|
||||
a)})},delay:function(a,b){a=c.fx?c.fx.speeds[a]||a:a;b=b||"fx";return this.queue(b,function(){var d=this;setTimeout(function(){c.dequeue(d,b)},a)})},clearQueue:function(a){return this.queue(a||"fx",[])}});var sa=/[\n\t]/g,ha=/\s+/,Sa=/\r/g,Ta=/^(?:href|src|style)$/,Ua=/^(?:button|input)$/i,Va=/^(?:button|input|object|select|textarea)$/i,Wa=/^a(?:rea)?$/i,ta=/^(?:radio|checkbox)$/i;c.props={"for":"htmlFor","class":"className",readonly:"readOnly",maxlength:"maxLength",cellspacing:"cellSpacing",rowspan:"rowSpan",
|
||||
colspan:"colSpan",tabindex:"tabIndex",usemap:"useMap",frameborder:"frameBorder"};c.fn.extend({attr:function(a,b){return c.access(this,a,b,true,c.attr)},removeAttr:function(a){return this.each(function(){c.attr(this,a,"");this.nodeType===1&&this.removeAttribute(a)})},addClass:function(a){if(c.isFunction(a))return this.each(function(x){var r=c(this);r.addClass(a.call(this,x,r.attr("class")))});if(a&&typeof a==="string")for(var b=(a||"").split(ha),d=0,e=this.length;d<e;d++){var f=this[d];if(f.nodeType===
|
||||
1)if(f.className){for(var h=" "+f.className+" ",l=f.className,k=0,o=b.length;k<o;k++)if(h.indexOf(" "+b[k]+" ")<0)l+=" "+b[k];f.className=c.trim(l)}else f.className=a}return this},removeClass:function(a){if(c.isFunction(a))return this.each(function(o){var x=c(this);x.removeClass(a.call(this,o,x.attr("class")))});if(a&&typeof a==="string"||a===B)for(var b=(a||"").split(ha),d=0,e=this.length;d<e;d++){var f=this[d];if(f.nodeType===1&&f.className)if(a){for(var h=(" "+f.className+" ").replace(sa," "),
|
||||
l=0,k=b.length;l<k;l++)h=h.replace(" "+b[l]+" "," ");f.className=c.trim(h)}else f.className=""}return this},toggleClass:function(a,b){var d=typeof a,e=typeof b==="boolean";if(c.isFunction(a))return this.each(function(f){var h=c(this);h.toggleClass(a.call(this,f,h.attr("class"),b),b)});return this.each(function(){if(d==="string")for(var f,h=0,l=c(this),k=b,o=a.split(ha);f=o[h++];){k=e?k:!l.hasClass(f);l[k?"addClass":"removeClass"](f)}else if(d==="undefined"||d==="boolean"){this.className&&c.data(this,
|
||||
"__className__",this.className);this.className=this.className||a===false?"":c.data(this,"__className__")||""}})},hasClass:function(a){a=" "+a+" ";for(var b=0,d=this.length;b<d;b++)if((" "+this[b].className+" ").replace(sa," ").indexOf(a)>-1)return true;return false},val:function(a){if(!arguments.length){var b=this[0];if(b){if(c.nodeName(b,"option")){var d=b.attributes.value;return!d||d.specified?b.value:b.text}if(c.nodeName(b,"select")){var e=b.selectedIndex;d=[];var f=b.options;b=b.type==="select-one";
|
||||
if(e<0)return null;var h=b?e:0;for(e=b?e+1:f.length;h<e;h++){var l=f[h];if(l.selected&&(c.support.optDisabled?!l.disabled:l.getAttribute("disabled")===null)&&(!l.parentNode.disabled||!c.nodeName(l.parentNode,"optgroup"))){a=c(l).val();if(b)return a;d.push(a)}}return d}if(ta.test(b.type)&&!c.support.checkOn)return b.getAttribute("value")===null?"on":b.value;return(b.value||"").replace(Sa,"")}return B}var k=c.isFunction(a);return this.each(function(o){var x=c(this),r=a;if(this.nodeType===1){if(k)r=
|
||||
a.call(this,o,x.val());if(r==null)r="";else if(typeof r==="number")r+="";else if(c.isArray(r))r=c.map(r,function(C){return C==null?"":C+""});if(c.isArray(r)&&ta.test(this.type))this.checked=c.inArray(x.val(),r)>=0;else if(c.nodeName(this,"select")){var A=c.makeArray(r);c("option",this).each(function(){this.selected=c.inArray(c(this).val(),A)>=0});if(!A.length)this.selectedIndex=-1}else this.value=r}})}});c.extend({attrFn:{val:true,css:true,html:true,text:true,data:true,width:true,height:true,offset:true},
|
||||
attr:function(a,b,d,e){if(!a||a.nodeType===3||a.nodeType===8)return B;if(e&&b in c.attrFn)return c(a)[b](d);e=a.nodeType!==1||!c.isXMLDoc(a);var f=d!==B;b=e&&c.props[b]||b;var h=Ta.test(b);if((b in a||a[b]!==B)&&e&&!h){if(f){b==="type"&&Ua.test(a.nodeName)&&a.parentNode&&c.error("type property can't be changed");if(d===null)a.nodeType===1&&a.removeAttribute(b);else a[b]=d}if(c.nodeName(a,"form")&&a.getAttributeNode(b))return a.getAttributeNode(b).nodeValue;if(b==="tabIndex")return(b=a.getAttributeNode("tabIndex"))&&
|
||||
b.specified?b.value:Va.test(a.nodeName)||Wa.test(a.nodeName)&&a.href?0:B;return a[b]}if(!c.support.style&&e&&b==="style"){if(f)a.style.cssText=""+d;return a.style.cssText}f&&a.setAttribute(b,""+d);if(!a.attributes[b]&&a.hasAttribute&&!a.hasAttribute(b))return B;a=!c.support.hrefNormalized&&e&&h?a.getAttribute(b,2):a.getAttribute(b);return a===null?B:a}});var X=/\.(.*)$/,ia=/^(?:textarea|input|select)$/i,La=/\./g,Ma=/ /g,Xa=/[^\w\s.|`]/g,Ya=function(a){return a.replace(Xa,"\\$&")},ua={focusin:0,focusout:0};
|
||||
c.event={add:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(c.isWindow(a)&&a!==E&&!a.frameElement)a=E;if(d===false)d=U;else if(!d)return;var f,h;if(d.handler){f=d;d=f.handler}if(!d.guid)d.guid=c.guid++;if(h=c.data(a)){var l=a.nodeType?"events":"__events__",k=h[l],o=h.handle;if(typeof k==="function"){o=k.handle;k=k.events}else if(!k){a.nodeType||(h[l]=h=function(){});h.events=k={}}if(!o)h.handle=o=function(){return typeof c!=="undefined"&&!c.event.triggered?c.event.handle.apply(o.elem,
|
||||
arguments):B};o.elem=a;b=b.split(" ");for(var x=0,r;l=b[x++];){h=f?c.extend({},f):{handler:d,data:e};if(l.indexOf(".")>-1){r=l.split(".");l=r.shift();h.namespace=r.slice(0).sort().join(".")}else{r=[];h.namespace=""}h.type=l;if(!h.guid)h.guid=d.guid;var A=k[l],C=c.event.special[l]||{};if(!A){A=k[l]=[];if(!C.setup||C.setup.call(a,e,r,o)===false)if(a.addEventListener)a.addEventListener(l,o,false);else a.attachEvent&&a.attachEvent("on"+l,o)}if(C.add){C.add.call(a,h);if(!h.handler.guid)h.handler.guid=
|
||||
d.guid}A.push(h);c.event.global[l]=true}a=null}}},global:{},remove:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(d===false)d=U;var f,h,l=0,k,o,x,r,A,C,J=a.nodeType?"events":"__events__",w=c.data(a),I=w&&w[J];if(w&&I){if(typeof I==="function"){w=I;I=I.events}if(b&&b.type){d=b.handler;b=b.type}if(!b||typeof b==="string"&&b.charAt(0)==="."){b=b||"";for(f in I)c.event.remove(a,f+b)}else{for(b=b.split(" ");f=b[l++];){r=f;k=f.indexOf(".")<0;o=[];if(!k){o=f.split(".");f=o.shift();x=RegExp("(^|\\.)"+
|
||||
c.map(o.slice(0).sort(),Ya).join("\\.(?:.*\\.)?")+"(\\.|$)")}if(A=I[f])if(d){r=c.event.special[f]||{};for(h=e||0;h<A.length;h++){C=A[h];if(d.guid===C.guid){if(k||x.test(C.namespace)){e==null&&A.splice(h--,1);r.remove&&r.remove.call(a,C)}if(e!=null)break}}if(A.length===0||e!=null&&A.length===1){if(!r.teardown||r.teardown.call(a,o)===false)c.removeEvent(a,f,w.handle);delete I[f]}}else for(h=0;h<A.length;h++){C=A[h];if(k||x.test(C.namespace)){c.event.remove(a,r,C.handler,h);A.splice(h--,1)}}}if(c.isEmptyObject(I)){if(b=
|
||||
w.handle)b.elem=null;delete w.events;delete w.handle;if(typeof w==="function")c.removeData(a,J);else c.isEmptyObject(w)&&c.removeData(a)}}}}},trigger:function(a,b,d,e){var f=a.type||a;if(!e){a=typeof a==="object"?a[c.expando]?a:c.extend(c.Event(f),a):c.Event(f);if(f.indexOf("!")>=0){a.type=f=f.slice(0,-1);a.exclusive=true}if(!d){a.stopPropagation();c.event.global[f]&&c.each(c.cache,function(){this.events&&this.events[f]&&c.event.trigger(a,b,this.handle.elem)})}if(!d||d.nodeType===3||d.nodeType===
|
||||
8)return B;a.result=B;a.target=d;b=c.makeArray(b);b.unshift(a)}a.currentTarget=d;(e=d.nodeType?c.data(d,"handle"):(c.data(d,"__events__")||{}).handle)&&e.apply(d,b);e=d.parentNode||d.ownerDocument;try{if(!(d&&d.nodeName&&c.noData[d.nodeName.toLowerCase()]))if(d["on"+f]&&d["on"+f].apply(d,b)===false){a.result=false;a.preventDefault()}}catch(h){}if(!a.isPropagationStopped()&&e)c.event.trigger(a,b,e,true);else if(!a.isDefaultPrevented()){var l;e=a.target;var k=f.replace(X,""),o=c.nodeName(e,"a")&&k===
|
||||
"click",x=c.event.special[k]||{};if((!x._default||x._default.call(d,a)===false)&&!o&&!(e&&e.nodeName&&c.noData[e.nodeName.toLowerCase()])){try{if(e[k]){if(l=e["on"+k])e["on"+k]=null;c.event.triggered=true;e[k]()}}catch(r){}if(l)e["on"+k]=l;c.event.triggered=false}}},handle:function(a){var b,d,e,f;d=[];var h=c.makeArray(arguments);a=h[0]=c.event.fix(a||E.event);a.currentTarget=this;b=a.type.indexOf(".")<0&&!a.exclusive;if(!b){e=a.type.split(".");a.type=e.shift();d=e.slice(0).sort();e=RegExp("(^|\\.)"+
|
||||
d.join("\\.(?:.*\\.)?")+"(\\.|$)")}a.namespace=a.namespace||d.join(".");f=c.data(this,this.nodeType?"events":"__events__");if(typeof f==="function")f=f.events;d=(f||{})[a.type];if(f&&d){d=d.slice(0);f=0;for(var l=d.length;f<l;f++){var k=d[f];if(b||e.test(k.namespace)){a.handler=k.handler;a.data=k.data;a.handleObj=k;k=k.handler.apply(this,h);if(k!==B){a.result=k;if(k===false){a.preventDefault();a.stopPropagation()}}if(a.isImmediatePropagationStopped())break}}}return a.result},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode layerX layerY metaKey newValue offsetX offsetY pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),
|
||||
fix:function(a){if(a[c.expando])return a;var b=a;a=c.Event(b);for(var d=this.props.length,e;d;){e=this.props[--d];a[e]=b[e]}if(!a.target)a.target=a.srcElement||t;if(a.target.nodeType===3)a.target=a.target.parentNode;if(!a.relatedTarget&&a.fromElement)a.relatedTarget=a.fromElement===a.target?a.toElement:a.fromElement;if(a.pageX==null&&a.clientX!=null){b=t.documentElement;d=t.body;a.pageX=a.clientX+(b&&b.scrollLeft||d&&d.scrollLeft||0)-(b&&b.clientLeft||d&&d.clientLeft||0);a.pageY=a.clientY+(b&&b.scrollTop||
|
||||
d&&d.scrollTop||0)-(b&&b.clientTop||d&&d.clientTop||0)}if(a.which==null&&(a.charCode!=null||a.keyCode!=null))a.which=a.charCode!=null?a.charCode:a.keyCode;if(!a.metaKey&&a.ctrlKey)a.metaKey=a.ctrlKey;if(!a.which&&a.button!==B)a.which=a.button&1?1:a.button&2?3:a.button&4?2:0;return a},guid:1E8,proxy:c.proxy,special:{ready:{setup:c.bindReady,teardown:c.noop},live:{add:function(a){c.event.add(this,Y(a.origType,a.selector),c.extend({},a,{handler:Ka,guid:a.handler.guid}))},remove:function(a){c.event.remove(this,
|
||||
Y(a.origType,a.selector),a)}},beforeunload:{setup:function(a,b,d){if(c.isWindow(this))this.onbeforeunload=d},teardown:function(a,b){if(this.onbeforeunload===b)this.onbeforeunload=null}}}};c.removeEvent=t.removeEventListener?function(a,b,d){a.removeEventListener&&a.removeEventListener(b,d,false)}:function(a,b,d){a.detachEvent&&a.detachEvent("on"+b,d)};c.Event=function(a){if(!this.preventDefault)return new c.Event(a);if(a&&a.type){this.originalEvent=a;this.type=a.type}else this.type=a;this.timeStamp=
|
||||
c.now();this[c.expando]=true};c.Event.prototype={preventDefault:function(){this.isDefaultPrevented=ca;var a=this.originalEvent;if(a)if(a.preventDefault)a.preventDefault();else a.returnValue=false},stopPropagation:function(){this.isPropagationStopped=ca;var a=this.originalEvent;if(a){a.stopPropagation&&a.stopPropagation();a.cancelBubble=true}},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=ca;this.stopPropagation()},isDefaultPrevented:U,isPropagationStopped:U,isImmediatePropagationStopped:U};
|
||||
var va=function(a){var b=a.relatedTarget;try{for(;b&&b!==this;)b=b.parentNode;if(b!==this){a.type=a.data;c.event.handle.apply(this,arguments)}}catch(d){}},wa=function(a){a.type=a.data;c.event.handle.apply(this,arguments)};c.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){c.event.special[a]={setup:function(d){c.event.add(this,b,d&&d.selector?wa:va,a)},teardown:function(d){c.event.remove(this,b,d&&d.selector?wa:va)}}});if(!c.support.submitBubbles)c.event.special.submit={setup:function(){if(this.nodeName.toLowerCase()!==
|
||||
"form"){c.event.add(this,"click.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="submit"||d==="image")&&c(b).closest("form").length){a.liveFired=B;return la("submit",this,arguments)}});c.event.add(this,"keypress.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="text"||d==="password")&&c(b).closest("form").length&&a.keyCode===13){a.liveFired=B;return la("submit",this,arguments)}})}else return false},teardown:function(){c.event.remove(this,".specialSubmit")}};if(!c.support.changeBubbles){var V,
|
||||
xa=function(a){var b=a.type,d=a.value;if(b==="radio"||b==="checkbox")d=a.checked;else if(b==="select-multiple")d=a.selectedIndex>-1?c.map(a.options,function(e){return e.selected}).join("-"):"";else if(a.nodeName.toLowerCase()==="select")d=a.selectedIndex;return d},Z=function(a,b){var d=a.target,e,f;if(!(!ia.test(d.nodeName)||d.readOnly)){e=c.data(d,"_change_data");f=xa(d);if(a.type!=="focusout"||d.type!=="radio")c.data(d,"_change_data",f);if(!(e===B||f===e))if(e!=null||f){a.type="change";a.liveFired=
|
||||
B;return c.event.trigger(a,b,d)}}};c.event.special.change={filters:{focusout:Z,beforedeactivate:Z,click:function(a){var b=a.target,d=b.type;if(d==="radio"||d==="checkbox"||b.nodeName.toLowerCase()==="select")return Z.call(this,a)},keydown:function(a){var b=a.target,d=b.type;if(a.keyCode===13&&b.nodeName.toLowerCase()!=="textarea"||a.keyCode===32&&(d==="checkbox"||d==="radio")||d==="select-multiple")return Z.call(this,a)},beforeactivate:function(a){a=a.target;c.data(a,"_change_data",xa(a))}},setup:function(){if(this.type===
|
||||
"file")return false;for(var a in V)c.event.add(this,a+".specialChange",V[a]);return ia.test(this.nodeName)},teardown:function(){c.event.remove(this,".specialChange");return ia.test(this.nodeName)}};V=c.event.special.change.filters;V.focus=V.beforeactivate}t.addEventListener&&c.each({focus:"focusin",blur:"focusout"},function(a,b){function d(e){e=c.event.fix(e);e.type=b;return c.event.trigger(e,null,e.target)}c.event.special[b]={setup:function(){ua[b]++===0&&t.addEventListener(a,d,true)},teardown:function(){--ua[b]===
|
||||
0&&t.removeEventListener(a,d,true)}}});c.each(["bind","one"],function(a,b){c.fn[b]=function(d,e,f){if(typeof d==="object"){for(var h in d)this[b](h,e,d[h],f);return this}if(c.isFunction(e)||e===false){f=e;e=B}var l=b==="one"?c.proxy(f,function(o){c(this).unbind(o,l);return f.apply(this,arguments)}):f;if(d==="unload"&&b!=="one")this.one(d,e,f);else{h=0;for(var k=this.length;h<k;h++)c.event.add(this[h],d,l,e)}return this}});c.fn.extend({unbind:function(a,b){if(typeof a==="object"&&!a.preventDefault)for(var d in a)this.unbind(d,
|
||||
a[d]);else{d=0;for(var e=this.length;d<e;d++)c.event.remove(this[d],a,b)}return this},delegate:function(a,b,d,e){return this.live(b,d,e,a)},undelegate:function(a,b,d){return arguments.length===0?this.unbind("live"):this.die(b,null,d,a)},trigger:function(a,b){return this.each(function(){c.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0]){var d=c.Event(a);d.preventDefault();d.stopPropagation();c.event.trigger(d,b,this[0]);return d.result}},toggle:function(a){for(var b=arguments,d=
|
||||
1;d<b.length;)c.proxy(a,b[d++]);return this.click(c.proxy(a,function(e){var f=(c.data(this,"lastToggle"+a.guid)||0)%d;c.data(this,"lastToggle"+a.guid,f+1);e.preventDefault();return b[f].apply(this,arguments)||false}))},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}});var ya={focus:"focusin",blur:"focusout",mouseenter:"mouseover",mouseleave:"mouseout"};c.each(["live","die"],function(a,b){c.fn[b]=function(d,e,f,h){var l,k=0,o,x,r=h||this.selector;h=h?this:c(this.context);if(typeof d===
|
||||
"object"&&!d.preventDefault){for(l in d)h[b](l,e,d[l],r);return this}if(c.isFunction(e)){f=e;e=B}for(d=(d||"").split(" ");(l=d[k++])!=null;){o=X.exec(l);x="";if(o){x=o[0];l=l.replace(X,"")}if(l==="hover")d.push("mouseenter"+x,"mouseleave"+x);else{o=l;if(l==="focus"||l==="blur"){d.push(ya[l]+x);l+=x}else l=(ya[l]||l)+x;if(b==="live"){x=0;for(var A=h.length;x<A;x++)c.event.add(h[x],"live."+Y(l,r),{data:e,selector:r,handler:f,origType:l,origHandler:f,preType:o})}else h.unbind("live."+Y(l,r),f)}}return this}});
|
||||
c.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error".split(" "),function(a,b){c.fn[b]=function(d,e){if(e==null){e=d;d=null}return arguments.length>0?this.bind(b,d,e):this.trigger(b)};if(c.attrFn)c.attrFn[b]=true});E.attachEvent&&!E.addEventListener&&c(E).bind("unload",function(){for(var a in c.cache)if(c.cache[a].handle)try{c.event.remove(c.cache[a].handle.elem)}catch(b){}});
|
||||
(function(){function a(g,i,n,m,p,q){p=0;for(var u=m.length;p<u;p++){var y=m[p];if(y){var F=false;for(y=y[g];y;){if(y.sizcache===n){F=m[y.sizset];break}if(y.nodeType===1&&!q){y.sizcache=n;y.sizset=p}if(y.nodeName.toLowerCase()===i){F=y;break}y=y[g]}m[p]=F}}}function b(g,i,n,m,p,q){p=0;for(var u=m.length;p<u;p++){var y=m[p];if(y){var F=false;for(y=y[g];y;){if(y.sizcache===n){F=m[y.sizset];break}if(y.nodeType===1){if(!q){y.sizcache=n;y.sizset=p}if(typeof i!=="string"){if(y===i){F=true;break}}else if(k.filter(i,
|
||||
[y]).length>0){F=y;break}}y=y[g]}m[p]=F}}}var d=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,e=0,f=Object.prototype.toString,h=false,l=true;[0,0].sort(function(){l=false;return 0});var k=function(g,i,n,m){n=n||[];var p=i=i||t;if(i.nodeType!==1&&i.nodeType!==9)return[];if(!g||typeof g!=="string")return n;var q,u,y,F,M,N=true,O=k.isXML(i),D=[],R=g;do{d.exec("");if(q=d.exec(R)){R=q[3];D.push(q[1]);if(q[2]){F=q[3];
|
||||
break}}}while(q);if(D.length>1&&x.exec(g))if(D.length===2&&o.relative[D[0]])u=L(D[0]+D[1],i);else for(u=o.relative[D[0]]?[i]:k(D.shift(),i);D.length;){g=D.shift();if(o.relative[g])g+=D.shift();u=L(g,u)}else{if(!m&&D.length>1&&i.nodeType===9&&!O&&o.match.ID.test(D[0])&&!o.match.ID.test(D[D.length-1])){q=k.find(D.shift(),i,O);i=q.expr?k.filter(q.expr,q.set)[0]:q.set[0]}if(i){q=m?{expr:D.pop(),set:C(m)}:k.find(D.pop(),D.length===1&&(D[0]==="~"||D[0]==="+")&&i.parentNode?i.parentNode:i,O);u=q.expr?k.filter(q.expr,
|
||||
q.set):q.set;if(D.length>0)y=C(u);else N=false;for(;D.length;){q=M=D.pop();if(o.relative[M])q=D.pop();else M="";if(q==null)q=i;o.relative[M](y,q,O)}}else y=[]}y||(y=u);y||k.error(M||g);if(f.call(y)==="[object Array]")if(N)if(i&&i.nodeType===1)for(g=0;y[g]!=null;g++){if(y[g]&&(y[g]===true||y[g].nodeType===1&&k.contains(i,y[g])))n.push(u[g])}else for(g=0;y[g]!=null;g++)y[g]&&y[g].nodeType===1&&n.push(u[g]);else n.push.apply(n,y);else C(y,n);if(F){k(F,p,n,m);k.uniqueSort(n)}return n};k.uniqueSort=function(g){if(w){h=
|
||||
l;g.sort(w);if(h)for(var i=1;i<g.length;i++)g[i]===g[i-1]&&g.splice(i--,1)}return g};k.matches=function(g,i){return k(g,null,null,i)};k.matchesSelector=function(g,i){return k(i,null,null,[g]).length>0};k.find=function(g,i,n){var m;if(!g)return[];for(var p=0,q=o.order.length;p<q;p++){var u,y=o.order[p];if(u=o.leftMatch[y].exec(g)){var F=u[1];u.splice(1,1);if(F.substr(F.length-1)!=="\\"){u[1]=(u[1]||"").replace(/\\/g,"");m=o.find[y](u,i,n);if(m!=null){g=g.replace(o.match[y],"");break}}}}m||(m=i.getElementsByTagName("*"));
|
||||
return{set:m,expr:g}};k.filter=function(g,i,n,m){for(var p,q,u=g,y=[],F=i,M=i&&i[0]&&k.isXML(i[0]);g&&i.length;){for(var N in o.filter)if((p=o.leftMatch[N].exec(g))!=null&&p[2]){var O,D,R=o.filter[N];D=p[1];q=false;p.splice(1,1);if(D.substr(D.length-1)!=="\\"){if(F===y)y=[];if(o.preFilter[N])if(p=o.preFilter[N](p,F,n,y,m,M)){if(p===true)continue}else q=O=true;if(p)for(var j=0;(D=F[j])!=null;j++)if(D){O=R(D,p,j,F);var s=m^!!O;if(n&&O!=null)if(s)q=true;else F[j]=false;else if(s){y.push(D);q=true}}if(O!==
|
||||
B){n||(F=y);g=g.replace(o.match[N],"");if(!q)return[];break}}}if(g===u)if(q==null)k.error(g);else break;u=g}return F};k.error=function(g){throw"Syntax error, unrecognized expression: "+g;};var o=k.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,CLASS:/\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\((even|odd|[\dn+\-]*)\))?/,
|
||||
POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(g){return g.getAttribute("href")}},relative:{"+":function(g,i){var n=typeof i==="string",m=n&&!/\W/.test(i);n=n&&!m;if(m)i=i.toLowerCase();m=0;for(var p=g.length,q;m<p;m++)if(q=g[m]){for(;(q=q.previousSibling)&&q.nodeType!==1;);g[m]=n||q&&q.nodeName.toLowerCase()===
|
||||
i?q||false:q===i}n&&k.filter(i,g,true)},">":function(g,i){var n,m=typeof i==="string",p=0,q=g.length;if(m&&!/\W/.test(i))for(i=i.toLowerCase();p<q;p++){if(n=g[p]){n=n.parentNode;g[p]=n.nodeName.toLowerCase()===i?n:false}}else{for(;p<q;p++)if(n=g[p])g[p]=m?n.parentNode:n.parentNode===i;m&&k.filter(i,g,true)}},"":function(g,i,n){var m,p=e++,q=b;if(typeof i==="string"&&!/\W/.test(i)){m=i=i.toLowerCase();q=a}q("parentNode",i,p,g,m,n)},"~":function(g,i,n){var m,p=e++,q=b;if(typeof i==="string"&&!/\W/.test(i)){m=
|
||||
i=i.toLowerCase();q=a}q("previousSibling",i,p,g,m,n)}},find:{ID:function(g,i,n){if(typeof i.getElementById!=="undefined"&&!n)return(g=i.getElementById(g[1]))&&g.parentNode?[g]:[]},NAME:function(g,i){if(typeof i.getElementsByName!=="undefined"){for(var n=[],m=i.getElementsByName(g[1]),p=0,q=m.length;p<q;p++)m[p].getAttribute("name")===g[1]&&n.push(m[p]);return n.length===0?null:n}},TAG:function(g,i){return i.getElementsByTagName(g[1])}},preFilter:{CLASS:function(g,i,n,m,p,q){g=" "+g[1].replace(/\\/g,
|
||||
"")+" ";if(q)return g;q=0;for(var u;(u=i[q])!=null;q++)if(u)if(p^(u.className&&(" "+u.className+" ").replace(/[\t\n]/g," ").indexOf(g)>=0))n||m.push(u);else if(n)i[q]=false;return false},ID:function(g){return g[1].replace(/\\/g,"")},TAG:function(g){return g[1].toLowerCase()},CHILD:function(g){if(g[1]==="nth"){var i=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(g[2]==="even"&&"2n"||g[2]==="odd"&&"2n+1"||!/\D/.test(g[2])&&"0n+"+g[2]||g[2]);g[2]=i[1]+(i[2]||1)-0;g[3]=i[3]-0}g[0]=e++;return g},ATTR:function(g,i,n,
|
||||
m,p,q){i=g[1].replace(/\\/g,"");if(!q&&o.attrMap[i])g[1]=o.attrMap[i];if(g[2]==="~=")g[4]=" "+g[4]+" ";return g},PSEUDO:function(g,i,n,m,p){if(g[1]==="not")if((d.exec(g[3])||"").length>1||/^\w/.test(g[3]))g[3]=k(g[3],null,null,i);else{g=k.filter(g[3],i,n,true^p);n||m.push.apply(m,g);return false}else if(o.match.POS.test(g[0])||o.match.CHILD.test(g[0]))return true;return g},POS:function(g){g.unshift(true);return g}},filters:{enabled:function(g){return g.disabled===false&&g.type!=="hidden"},disabled:function(g){return g.disabled===
|
||||
true},checked:function(g){return g.checked===true},selected:function(g){return g.selected===true},parent:function(g){return!!g.firstChild},empty:function(g){return!g.firstChild},has:function(g,i,n){return!!k(n[3],g).length},header:function(g){return/h\d/i.test(g.nodeName)},text:function(g){return"text"===g.type},radio:function(g){return"radio"===g.type},checkbox:function(g){return"checkbox"===g.type},file:function(g){return"file"===g.type},password:function(g){return"password"===g.type},submit:function(g){return"submit"===
|
||||
g.type},image:function(g){return"image"===g.type},reset:function(g){return"reset"===g.type},button:function(g){return"button"===g.type||g.nodeName.toLowerCase()==="button"},input:function(g){return/input|select|textarea|button/i.test(g.nodeName)}},setFilters:{first:function(g,i){return i===0},last:function(g,i,n,m){return i===m.length-1},even:function(g,i){return i%2===0},odd:function(g,i){return i%2===1},lt:function(g,i,n){return i<n[3]-0},gt:function(g,i,n){return i>n[3]-0},nth:function(g,i,n){return n[3]-
|
||||
0===i},eq:function(g,i,n){return n[3]-0===i}},filter:{PSEUDO:function(g,i,n,m){var p=i[1],q=o.filters[p];if(q)return q(g,n,i,m);else if(p==="contains")return(g.textContent||g.innerText||k.getText([g])||"").indexOf(i[3])>=0;else if(p==="not"){i=i[3];n=0;for(m=i.length;n<m;n++)if(i[n]===g)return false;return true}else k.error("Syntax error, unrecognized expression: "+p)},CHILD:function(g,i){var n=i[1],m=g;switch(n){case "only":case "first":for(;m=m.previousSibling;)if(m.nodeType===1)return false;if(n===
|
||||
"first")return true;m=g;case "last":for(;m=m.nextSibling;)if(m.nodeType===1)return false;return true;case "nth":n=i[2];var p=i[3];if(n===1&&p===0)return true;var q=i[0],u=g.parentNode;if(u&&(u.sizcache!==q||!g.nodeIndex)){var y=0;for(m=u.firstChild;m;m=m.nextSibling)if(m.nodeType===1)m.nodeIndex=++y;u.sizcache=q}m=g.nodeIndex-p;return n===0?m===0:m%n===0&&m/n>=0}},ID:function(g,i){return g.nodeType===1&&g.getAttribute("id")===i},TAG:function(g,i){return i==="*"&&g.nodeType===1||g.nodeName.toLowerCase()===
|
||||
i},CLASS:function(g,i){return(" "+(g.className||g.getAttribute("class"))+" ").indexOf(i)>-1},ATTR:function(g,i){var n=i[1];n=o.attrHandle[n]?o.attrHandle[n](g):g[n]!=null?g[n]:g.getAttribute(n);var m=n+"",p=i[2],q=i[4];return n==null?p==="!=":p==="="?m===q:p==="*="?m.indexOf(q)>=0:p==="~="?(" "+m+" ").indexOf(q)>=0:!q?m&&n!==false:p==="!="?m!==q:p==="^="?m.indexOf(q)===0:p==="$="?m.substr(m.length-q.length)===q:p==="|="?m===q||m.substr(0,q.length+1)===q+"-":false},POS:function(g,i,n,m){var p=o.setFilters[i[2]];
|
||||
if(p)return p(g,n,i,m)}}},x=o.match.POS,r=function(g,i){return"\\"+(i-0+1)},A;for(A in o.match){o.match[A]=RegExp(o.match[A].source+/(?![^\[]*\])(?![^\(]*\))/.source);o.leftMatch[A]=RegExp(/(^(?:.|\r|\n)*?)/.source+o.match[A].source.replace(/\\(\d+)/g,r))}var C=function(g,i){g=Array.prototype.slice.call(g,0);if(i){i.push.apply(i,g);return i}return g};try{Array.prototype.slice.call(t.documentElement.childNodes,0)}catch(J){C=function(g,i){var n=0,m=i||[];if(f.call(g)==="[object Array]")Array.prototype.push.apply(m,
|
||||
g);else if(typeof g.length==="number")for(var p=g.length;n<p;n++)m.push(g[n]);else for(;g[n];n++)m.push(g[n]);return m}}var w,I;if(t.documentElement.compareDocumentPosition)w=function(g,i){if(g===i){h=true;return 0}if(!g.compareDocumentPosition||!i.compareDocumentPosition)return g.compareDocumentPosition?-1:1;return g.compareDocumentPosition(i)&4?-1:1};else{w=function(g,i){var n,m,p=[],q=[];n=g.parentNode;m=i.parentNode;var u=n;if(g===i){h=true;return 0}else if(n===m)return I(g,i);else if(n){if(!m)return 1}else return-1;
|
||||
for(;u;){p.unshift(u);u=u.parentNode}for(u=m;u;){q.unshift(u);u=u.parentNode}n=p.length;m=q.length;for(u=0;u<n&&u<m;u++)if(p[u]!==q[u])return I(p[u],q[u]);return u===n?I(g,q[u],-1):I(p[u],i,1)};I=function(g,i,n){if(g===i)return n;for(g=g.nextSibling;g;){if(g===i)return-1;g=g.nextSibling}return 1}}k.getText=function(g){for(var i="",n,m=0;g[m];m++){n=g[m];if(n.nodeType===3||n.nodeType===4)i+=n.nodeValue;else if(n.nodeType!==8)i+=k.getText(n.childNodes)}return i};(function(){var g=t.createElement("div"),
|
||||
i="script"+(new Date).getTime(),n=t.documentElement;g.innerHTML="<a name='"+i+"'/>";n.insertBefore(g,n.firstChild);if(t.getElementById(i)){o.find.ID=function(m,p,q){if(typeof p.getElementById!=="undefined"&&!q)return(p=p.getElementById(m[1]))?p.id===m[1]||typeof p.getAttributeNode!=="undefined"&&p.getAttributeNode("id").nodeValue===m[1]?[p]:B:[]};o.filter.ID=function(m,p){var q=typeof m.getAttributeNode!=="undefined"&&m.getAttributeNode("id");return m.nodeType===1&&q&&q.nodeValue===p}}n.removeChild(g);
|
||||
n=g=null})();(function(){var g=t.createElement("div");g.appendChild(t.createComment(""));if(g.getElementsByTagName("*").length>0)o.find.TAG=function(i,n){var m=n.getElementsByTagName(i[1]);if(i[1]==="*"){for(var p=[],q=0;m[q];q++)m[q].nodeType===1&&p.push(m[q]);m=p}return m};g.innerHTML="<a href='#'></a>";if(g.firstChild&&typeof g.firstChild.getAttribute!=="undefined"&&g.firstChild.getAttribute("href")!=="#")o.attrHandle.href=function(i){return i.getAttribute("href",2)};g=null})();t.querySelectorAll&&
|
||||
function(){var g=k,i=t.createElement("div");i.innerHTML="<p class='TEST'></p>";if(!(i.querySelectorAll&&i.querySelectorAll(".TEST").length===0)){k=function(m,p,q,u){p=p||t;m=m.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!u&&!k.isXML(p))if(p.nodeType===9)try{return C(p.querySelectorAll(m),q)}catch(y){}else if(p.nodeType===1&&p.nodeName.toLowerCase()!=="object"){var F=p.getAttribute("id"),M=F||"__sizzle__";F||p.setAttribute("id",M);try{return C(p.querySelectorAll("#"+M+" "+m),q)}catch(N){}finally{F||
|
||||
p.removeAttribute("id")}}return g(m,p,q,u)};for(var n in g)k[n]=g[n];i=null}}();(function(){var g=t.documentElement,i=g.matchesSelector||g.mozMatchesSelector||g.webkitMatchesSelector||g.msMatchesSelector,n=false;try{i.call(t.documentElement,"[test!='']:sizzle")}catch(m){n=true}if(i)k.matchesSelector=function(p,q){q=q.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!k.isXML(p))try{if(n||!o.match.PSEUDO.test(q)&&!/!=/.test(q))return i.call(p,q)}catch(u){}return k(q,null,null,[p]).length>0}})();(function(){var g=
|
||||
t.createElement("div");g.innerHTML="<div class='test e'></div><div class='test'></div>";if(!(!g.getElementsByClassName||g.getElementsByClassName("e").length===0)){g.lastChild.className="e";if(g.getElementsByClassName("e").length!==1){o.order.splice(1,0,"CLASS");o.find.CLASS=function(i,n,m){if(typeof n.getElementsByClassName!=="undefined"&&!m)return n.getElementsByClassName(i[1])};g=null}}})();k.contains=t.documentElement.contains?function(g,i){return g!==i&&(g.contains?g.contains(i):true)}:t.documentElement.compareDocumentPosition?
|
||||
function(g,i){return!!(g.compareDocumentPosition(i)&16)}:function(){return false};k.isXML=function(g){return(g=(g?g.ownerDocument||g:0).documentElement)?g.nodeName!=="HTML":false};var L=function(g,i){for(var n,m=[],p="",q=i.nodeType?[i]:i;n=o.match.PSEUDO.exec(g);){p+=n[0];g=g.replace(o.match.PSEUDO,"")}g=o.relative[g]?g+"*":g;n=0;for(var u=q.length;n<u;n++)k(g,q[n],m);return k.filter(p,m)};c.find=k;c.expr=k.selectors;c.expr[":"]=c.expr.filters;c.unique=k.uniqueSort;c.text=k.getText;c.isXMLDoc=k.isXML;
|
||||
c.contains=k.contains})();var Za=/Until$/,$a=/^(?:parents|prevUntil|prevAll)/,ab=/,/,Na=/^.[^:#\[\.,]*$/,bb=Array.prototype.slice,cb=c.expr.match.POS;c.fn.extend({find:function(a){for(var b=this.pushStack("","find",a),d=0,e=0,f=this.length;e<f;e++){d=b.length;c.find(a,this[e],b);if(e>0)for(var h=d;h<b.length;h++)for(var l=0;l<d;l++)if(b[l]===b[h]){b.splice(h--,1);break}}return b},has:function(a){var b=c(a);return this.filter(function(){for(var d=0,e=b.length;d<e;d++)if(c.contains(this,b[d]))return true})},
|
||||
not:function(a){return this.pushStack(ma(this,a,false),"not",a)},filter:function(a){return this.pushStack(ma(this,a,true),"filter",a)},is:function(a){return!!a&&c.filter(a,this).length>0},closest:function(a,b){var d=[],e,f,h=this[0];if(c.isArray(a)){var l,k={},o=1;if(h&&a.length){e=0;for(f=a.length;e<f;e++){l=a[e];k[l]||(k[l]=c.expr.match.POS.test(l)?c(l,b||this.context):l)}for(;h&&h.ownerDocument&&h!==b;){for(l in k){e=k[l];if(e.jquery?e.index(h)>-1:c(h).is(e))d.push({selector:l,elem:h,level:o})}h=
|
||||
h.parentNode;o++}}return d}l=cb.test(a)?c(a,b||this.context):null;e=0;for(f=this.length;e<f;e++)for(h=this[e];h;)if(l?l.index(h)>-1:c.find.matchesSelector(h,a)){d.push(h);break}else{h=h.parentNode;if(!h||!h.ownerDocument||h===b)break}d=d.length>1?c.unique(d):d;return this.pushStack(d,"closest",a)},index:function(a){if(!a||typeof a==="string")return c.inArray(this[0],a?c(a):this.parent().children());return c.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var d=typeof a==="string"?c(a,b||this.context):
|
||||
c.makeArray(a),e=c.merge(this.get(),d);return this.pushStack(!d[0]||!d[0].parentNode||d[0].parentNode.nodeType===11||!e[0]||!e[0].parentNode||e[0].parentNode.nodeType===11?e:c.unique(e))},andSelf:function(){return this.add(this.prevObject)}});c.each({parent:function(a){return(a=a.parentNode)&&a.nodeType!==11?a:null},parents:function(a){return c.dir(a,"parentNode")},parentsUntil:function(a,b,d){return c.dir(a,"parentNode",d)},next:function(a){return c.nth(a,2,"nextSibling")},prev:function(a){return c.nth(a,
|
||||
2,"previousSibling")},nextAll:function(a){return c.dir(a,"nextSibling")},prevAll:function(a){return c.dir(a,"previousSibling")},nextUntil:function(a,b,d){return c.dir(a,"nextSibling",d)},prevUntil:function(a,b,d){return c.dir(a,"previousSibling",d)},siblings:function(a){return c.sibling(a.parentNode.firstChild,a)},children:function(a){return c.sibling(a.firstChild)},contents:function(a){return c.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:c.makeArray(a.childNodes)}},function(a,
|
||||
b){c.fn[a]=function(d,e){var f=c.map(this,b,d);Za.test(a)||(e=d);if(e&&typeof e==="string")f=c.filter(e,f);f=this.length>1?c.unique(f):f;if((this.length>1||ab.test(e))&&$a.test(a))f=f.reverse();return this.pushStack(f,a,bb.call(arguments).join(","))}});c.extend({filter:function(a,b,d){if(d)a=":not("+a+")";return b.length===1?c.find.matchesSelector(b[0],a)?[b[0]]:[]:c.find.matches(a,b)},dir:function(a,b,d){var e=[];for(a=a[b];a&&a.nodeType!==9&&(d===B||a.nodeType!==1||!c(a).is(d));){a.nodeType===1&&
|
||||
e.push(a);a=a[b]}return e},nth:function(a,b,d){b=b||1;for(var e=0;a;a=a[d])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){for(var d=[];a;a=a.nextSibling)a.nodeType===1&&a!==b&&d.push(a);return d}});var za=/ jQuery\d+="(?:\d+|null)"/g,$=/^\s+/,Aa=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,Ba=/<([\w:]+)/,db=/<tbody/i,eb=/<|&#?\w+;/,Ca=/<(?:script|object|embed|option|style)/i,Da=/checked\s*(?:[^=]|=\s*.checked.)/i,fb=/\=([^="'>\s]+\/)>/g,P={option:[1,
|
||||
"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],area:[1,"<map>","</map>"],_default:[0,"",""]};P.optgroup=P.option;P.tbody=P.tfoot=P.colgroup=P.caption=P.thead;P.th=P.td;if(!c.support.htmlSerialize)P._default=[1,"div<div>","</div>"];c.fn.extend({text:function(a){if(c.isFunction(a))return this.each(function(b){var d=
|
||||
c(this);d.text(a.call(this,b,d.text()))});if(typeof a!=="object"&&a!==B)return this.empty().append((this[0]&&this[0].ownerDocument||t).createTextNode(a));return c.text(this)},wrapAll:function(a){if(c.isFunction(a))return this.each(function(d){c(this).wrapAll(a.call(this,d))});if(this[0]){var b=c(a,this[0].ownerDocument).eq(0).clone(true);this[0].parentNode&&b.insertBefore(this[0]);b.map(function(){for(var d=this;d.firstChild&&d.firstChild.nodeType===1;)d=d.firstChild;return d}).append(this)}return this},
|
||||
wrapInner:function(a){if(c.isFunction(a))return this.each(function(b){c(this).wrapInner(a.call(this,b))});return this.each(function(){var b=c(this),d=b.contents();d.length?d.wrapAll(a):b.append(a)})},wrap:function(a){return this.each(function(){c(this).wrapAll(a)})},unwrap:function(){return this.parent().each(function(){c.nodeName(this,"body")||c(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.appendChild(a)})},
|
||||
prepend:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this)});else if(arguments.length){var a=c(arguments[0]);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,
|
||||
this.nextSibling)});else if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,c(arguments[0]).toArray());return a}},remove:function(a,b){for(var d=0,e;(e=this[d])!=null;d++)if(!a||c.filter(a,[e]).length){if(!b&&e.nodeType===1){c.cleanData(e.getElementsByTagName("*"));c.cleanData([e])}e.parentNode&&e.parentNode.removeChild(e)}return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++)for(b.nodeType===1&&c.cleanData(b.getElementsByTagName("*"));b.firstChild;)b.removeChild(b.firstChild);
|
||||
return this},clone:function(a){var b=this.map(function(){if(!c.support.noCloneEvent&&!c.isXMLDoc(this)){var d=this.outerHTML,e=this.ownerDocument;if(!d){d=e.createElement("div");d.appendChild(this.cloneNode(true));d=d.innerHTML}return c.clean([d.replace(za,"").replace(fb,'="$1">').replace($,"")],e)[0]}else return this.cloneNode(true)});if(a===true){na(this,b);na(this.find("*"),b.find("*"))}return b},html:function(a){if(a===B)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(za,""):null;
|
||||
else if(typeof a==="string"&&!Ca.test(a)&&(c.support.leadingWhitespace||!$.test(a))&&!P[(Ba.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Aa,"<$1></$2>");try{for(var b=0,d=this.length;b<d;b++)if(this[b].nodeType===1){c.cleanData(this[b].getElementsByTagName("*"));this[b].innerHTML=a}}catch(e){this.empty().append(a)}}else c.isFunction(a)?this.each(function(f){var h=c(this);h.html(a.call(this,f,h.html()))}):this.empty().append(a);return this},replaceWith:function(a){if(this[0]&&this[0].parentNode){if(c.isFunction(a))return this.each(function(b){var d=
|
||||
c(this),e=d.html();d.replaceWith(a.call(this,b,e))});if(typeof a!=="string")a=c(a).detach();return this.each(function(){var b=this.nextSibling,d=this.parentNode;c(this).remove();b?c(b).before(a):c(d).append(a)})}else return this.pushStack(c(c.isFunction(a)?a():a),"replaceWith",a)},detach:function(a){return this.remove(a,true)},domManip:function(a,b,d){var e,f,h,l=a[0],k=[];if(!c.support.checkClone&&arguments.length===3&&typeof l==="string"&&Da.test(l))return this.each(function(){c(this).domManip(a,
|
||||
b,d,true)});if(c.isFunction(l))return this.each(function(x){var r=c(this);a[0]=l.call(this,x,b?r.html():B);r.domManip(a,b,d)});if(this[0]){e=l&&l.parentNode;e=c.support.parentNode&&e&&e.nodeType===11&&e.childNodes.length===this.length?{fragment:e}:c.buildFragment(a,this,k);h=e.fragment;if(f=h.childNodes.length===1?h=h.firstChild:h.firstChild){b=b&&c.nodeName(f,"tr");f=0;for(var o=this.length;f<o;f++)d.call(b?c.nodeName(this[f],"table")?this[f].getElementsByTagName("tbody")[0]||this[f].appendChild(this[f].ownerDocument.createElement("tbody")):
|
||||
this[f]:this[f],f>0||e.cacheable||this.length>1?h.cloneNode(true):h)}k.length&&c.each(k,Oa)}return this}});c.buildFragment=function(a,b,d){var e,f,h;b=b&&b[0]?b[0].ownerDocument||b[0]:t;if(a.length===1&&typeof a[0]==="string"&&a[0].length<512&&b===t&&!Ca.test(a[0])&&(c.support.checkClone||!Da.test(a[0]))){f=true;if(h=c.fragments[a[0]])if(h!==1)e=h}if(!e){e=b.createDocumentFragment();c.clean(a,b,e,d)}if(f)c.fragments[a[0]]=h?e:1;return{fragment:e,cacheable:f}};c.fragments={};c.each({appendTo:"append",
|
||||
prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){c.fn[a]=function(d){var e=[];d=c(d);var f=this.length===1&&this[0].parentNode;if(f&&f.nodeType===11&&f.childNodes.length===1&&d.length===1){d[b](this[0]);return this}else{f=0;for(var h=d.length;f<h;f++){var l=(f>0?this.clone(true):this).get();c(d[f])[b](l);e=e.concat(l)}return this.pushStack(e,a,d.selector)}}});c.extend({clean:function(a,b,d,e){b=b||t;if(typeof b.createElement==="undefined")b=b.ownerDocument||
|
||||
b[0]&&b[0].ownerDocument||t;for(var f=[],h=0,l;(l=a[h])!=null;h++){if(typeof l==="number")l+="";if(l){if(typeof l==="string"&&!eb.test(l))l=b.createTextNode(l);else if(typeof l==="string"){l=l.replace(Aa,"<$1></$2>");var k=(Ba.exec(l)||["",""])[1].toLowerCase(),o=P[k]||P._default,x=o[0],r=b.createElement("div");for(r.innerHTML=o[1]+l+o[2];x--;)r=r.lastChild;if(!c.support.tbody){x=db.test(l);k=k==="table"&&!x?r.firstChild&&r.firstChild.childNodes:o[1]==="<table>"&&!x?r.childNodes:[];for(o=k.length-
|
||||
1;o>=0;--o)c.nodeName(k[o],"tbody")&&!k[o].childNodes.length&&k[o].parentNode.removeChild(k[o])}!c.support.leadingWhitespace&&$.test(l)&&r.insertBefore(b.createTextNode($.exec(l)[0]),r.firstChild);l=r.childNodes}if(l.nodeType)f.push(l);else f=c.merge(f,l)}}if(d)for(h=0;f[h];h++)if(e&&c.nodeName(f[h],"script")&&(!f[h].type||f[h].type.toLowerCase()==="text/javascript"))e.push(f[h].parentNode?f[h].parentNode.removeChild(f[h]):f[h]);else{f[h].nodeType===1&&f.splice.apply(f,[h+1,0].concat(c.makeArray(f[h].getElementsByTagName("script"))));
|
||||
d.appendChild(f[h])}return f},cleanData:function(a){for(var b,d,e=c.cache,f=c.event.special,h=c.support.deleteExpando,l=0,k;(k=a[l])!=null;l++)if(!(k.nodeName&&c.noData[k.nodeName.toLowerCase()]))if(d=k[c.expando]){if((b=e[d])&&b.events)for(var o in b.events)f[o]?c.event.remove(k,o):c.removeEvent(k,o,b.handle);if(h)delete k[c.expando];else k.removeAttribute&&k.removeAttribute(c.expando);delete e[d]}}});var Ea=/alpha\([^)]*\)/i,gb=/opacity=([^)]*)/,hb=/-([a-z])/ig,ib=/([A-Z])/g,Fa=/^-?\d+(?:px)?$/i,
|
||||
jb=/^-?\d/,kb={position:"absolute",visibility:"hidden",display:"block"},Pa=["Left","Right"],Qa=["Top","Bottom"],W,Ga,aa,lb=function(a,b){return b.toUpperCase()};c.fn.css=function(a,b){if(arguments.length===2&&b===B)return this;return c.access(this,a,b,true,function(d,e,f){return f!==B?c.style(d,e,f):c.css(d,e)})};c.extend({cssHooks:{opacity:{get:function(a,b){if(b){var d=W(a,"opacity","opacity");return d===""?"1":d}else return a.style.opacity}}},cssNumber:{zIndex:true,fontWeight:true,opacity:true,
|
||||
zoom:true,lineHeight:true},cssProps:{"float":c.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,d,e){if(!(!a||a.nodeType===3||a.nodeType===8||!a.style)){var f,h=c.camelCase(b),l=a.style,k=c.cssHooks[h];b=c.cssProps[h]||h;if(d!==B){if(!(typeof d==="number"&&isNaN(d)||d==null)){if(typeof d==="number"&&!c.cssNumber[h])d+="px";if(!k||!("set"in k)||(d=k.set(a,d))!==B)try{l[b]=d}catch(o){}}}else{if(k&&"get"in k&&(f=k.get(a,false,e))!==B)return f;return l[b]}}},css:function(a,b,d){var e,f=c.camelCase(b),
|
||||
h=c.cssHooks[f];b=c.cssProps[f]||f;if(h&&"get"in h&&(e=h.get(a,true,d))!==B)return e;else if(W)return W(a,b,f)},swap:function(a,b,d){var e={},f;for(f in b){e[f]=a.style[f];a.style[f]=b[f]}d.call(a);for(f in b)a.style[f]=e[f]},camelCase:function(a){return a.replace(hb,lb)}});c.curCSS=c.css;c.each(["height","width"],function(a,b){c.cssHooks[b]={get:function(d,e,f){var h;if(e){if(d.offsetWidth!==0)h=oa(d,b,f);else c.swap(d,kb,function(){h=oa(d,b,f)});if(h<=0){h=W(d,b,b);if(h==="0px"&&aa)h=aa(d,b,b);
|
||||
if(h!=null)return h===""||h==="auto"?"0px":h}if(h<0||h==null){h=d.style[b];return h===""||h==="auto"?"0px":h}return typeof h==="string"?h:h+"px"}},set:function(d,e){if(Fa.test(e)){e=parseFloat(e);if(e>=0)return e+"px"}else return e}}});if(!c.support.opacity)c.cssHooks.opacity={get:function(a,b){return gb.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":b?"1":""},set:function(a,b){var d=a.style;d.zoom=1;var e=c.isNaN(b)?"":"alpha(opacity="+b*100+")",f=
|
||||
d.filter||"";d.filter=Ea.test(f)?f.replace(Ea,e):d.filter+" "+e}};if(t.defaultView&&t.defaultView.getComputedStyle)Ga=function(a,b,d){var e;d=d.replace(ib,"-$1").toLowerCase();if(!(b=a.ownerDocument.defaultView))return B;if(b=b.getComputedStyle(a,null)){e=b.getPropertyValue(d);if(e===""&&!c.contains(a.ownerDocument.documentElement,a))e=c.style(a,d)}return e};if(t.documentElement.currentStyle)aa=function(a,b){var d,e,f=a.currentStyle&&a.currentStyle[b],h=a.style;if(!Fa.test(f)&&jb.test(f)){d=h.left;
|
||||
e=a.runtimeStyle.left;a.runtimeStyle.left=a.currentStyle.left;h.left=b==="fontSize"?"1em":f||0;f=h.pixelLeft+"px";h.left=d;a.runtimeStyle.left=e}return f===""?"auto":f};W=Ga||aa;if(c.expr&&c.expr.filters){c.expr.filters.hidden=function(a){var b=a.offsetHeight;return a.offsetWidth===0&&b===0||!c.support.reliableHiddenOffsets&&(a.style.display||c.css(a,"display"))==="none"};c.expr.filters.visible=function(a){return!c.expr.filters.hidden(a)}}var mb=c.now(),nb=/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,
|
||||
ob=/^(?:select|textarea)/i,pb=/^(?:color|date|datetime|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,qb=/^(?:GET|HEAD)$/,Ra=/\[\]$/,T=/\=\?(&|$)/,ja=/\?/,rb=/([?&])_=[^&]*/,sb=/^(\w+:)?\/\/([^\/?#]+)/,tb=/%20/g,ub=/#.*$/,Ha=c.fn.load;c.fn.extend({load:function(a,b,d){if(typeof a!=="string"&&Ha)return Ha.apply(this,arguments);else if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var f=a.slice(e,a.length);a=a.slice(0,e)}e="GET";if(b)if(c.isFunction(b)){d=b;b=null}else if(typeof b===
|
||||
"object"){b=c.param(b,c.ajaxSettings.traditional);e="POST"}var h=this;c.ajax({url:a,type:e,dataType:"html",data:b,complete:function(l,k){if(k==="success"||k==="notmodified")h.html(f?c("<div>").append(l.responseText.replace(nb,"")).find(f):l.responseText);d&&h.each(d,[l.responseText,k,l])}});return this},serialize:function(){return c.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?c.makeArray(this.elements):this}).filter(function(){return this.name&&
|
||||
!this.disabled&&(this.checked||ob.test(this.nodeName)||pb.test(this.type))}).map(function(a,b){var d=c(this).val();return d==null?null:c.isArray(d)?c.map(d,function(e){return{name:b.name,value:e}}):{name:b.name,value:d}}).get()}});c.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){c.fn[b]=function(d){return this.bind(b,d)}});c.extend({get:function(a,b,d,e){if(c.isFunction(b)){e=e||d;d=b;b=null}return c.ajax({type:"GET",url:a,data:b,success:d,dataType:e})},
|
||||
getScript:function(a,b){return c.get(a,null,b,"script")},getJSON:function(a,b,d){return c.get(a,b,d,"json")},post:function(a,b,d,e){if(c.isFunction(b)){e=e||d;d=b;b={}}return c.ajax({type:"POST",url:a,data:b,success:d,dataType:e})},ajaxSetup:function(a){c.extend(c.ajaxSettings,a)},ajaxSettings:{url:location.href,global:true,type:"GET",contentType:"application/x-www-form-urlencoded",processData:true,async:true,xhr:function(){return new E.XMLHttpRequest},accepts:{xml:"application/xml, text/xml",html:"text/html",
|
||||
script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},ajax:function(a){var b=c.extend(true,{},c.ajaxSettings,a),d,e,f,h=b.type.toUpperCase(),l=qb.test(h);b.url=b.url.replace(ub,"");b.context=a&&a.context!=null?a.context:b;if(b.data&&b.processData&&typeof b.data!=="string")b.data=c.param(b.data,b.traditional);if(b.dataType==="jsonp"){if(h==="GET")T.test(b.url)||(b.url+=(ja.test(b.url)?"&":"?")+(b.jsonp||"callback")+"=?");else if(!b.data||
|
||||
!T.test(b.data))b.data=(b.data?b.data+"&":"")+(b.jsonp||"callback")+"=?";b.dataType="json"}if(b.dataType==="json"&&(b.data&&T.test(b.data)||T.test(b.url))){d=b.jsonpCallback||"jsonp"+mb++;if(b.data)b.data=(b.data+"").replace(T,"="+d+"$1");b.url=b.url.replace(T,"="+d+"$1");b.dataType="script";var k=E[d];E[d]=function(m){if(c.isFunction(k))k(m);else{E[d]=B;try{delete E[d]}catch(p){}}f=m;c.handleSuccess(b,w,e,f);c.handleComplete(b,w,e,f);r&&r.removeChild(A)}}if(b.dataType==="script"&&b.cache===null)b.cache=
|
||||
false;if(b.cache===false&&l){var o=c.now(),x=b.url.replace(rb,"$1_="+o);b.url=x+(x===b.url?(ja.test(b.url)?"&":"?")+"_="+o:"")}if(b.data&&l)b.url+=(ja.test(b.url)?"&":"?")+b.data;b.global&&c.active++===0&&c.event.trigger("ajaxStart");o=(o=sb.exec(b.url))&&(o[1]&&o[1].toLowerCase()!==location.protocol||o[2].toLowerCase()!==location.host);if(b.dataType==="script"&&h==="GET"&&o){var r=t.getElementsByTagName("head")[0]||t.documentElement,A=t.createElement("script");if(b.scriptCharset)A.charset=b.scriptCharset;
|
||||
A.src=b.url;if(!d){var C=false;A.onload=A.onreadystatechange=function(){if(!C&&(!this.readyState||this.readyState==="loaded"||this.readyState==="complete")){C=true;c.handleSuccess(b,w,e,f);c.handleComplete(b,w,e,f);A.onload=A.onreadystatechange=null;r&&A.parentNode&&r.removeChild(A)}}}r.insertBefore(A,r.firstChild);return B}var J=false,w=b.xhr();if(w){b.username?w.open(h,b.url,b.async,b.username,b.password):w.open(h,b.url,b.async);try{if(b.data!=null&&!l||a&&a.contentType)w.setRequestHeader("Content-Type",
|
||||
b.contentType);if(b.ifModified){c.lastModified[b.url]&&w.setRequestHeader("If-Modified-Since",c.lastModified[b.url]);c.etag[b.url]&&w.setRequestHeader("If-None-Match",c.etag[b.url])}o||w.setRequestHeader("X-Requested-With","XMLHttpRequest");w.setRequestHeader("Accept",b.dataType&&b.accepts[b.dataType]?b.accepts[b.dataType]+", */*; q=0.01":b.accepts._default)}catch(I){}if(b.beforeSend&&b.beforeSend.call(b.context,w,b)===false){b.global&&c.active--===1&&c.event.trigger("ajaxStop");w.abort();return false}b.global&&
|
||||
c.triggerGlobal(b,"ajaxSend",[w,b]);var L=w.onreadystatechange=function(m){if(!w||w.readyState===0||m==="abort"){J||c.handleComplete(b,w,e,f);J=true;if(w)w.onreadystatechange=c.noop}else if(!J&&w&&(w.readyState===4||m==="timeout")){J=true;w.onreadystatechange=c.noop;e=m==="timeout"?"timeout":!c.httpSuccess(w)?"error":b.ifModified&&c.httpNotModified(w,b.url)?"notmodified":"success";var p;if(e==="success")try{f=c.httpData(w,b.dataType,b)}catch(q){e="parsererror";p=q}if(e==="success"||e==="notmodified")d||
|
||||
c.handleSuccess(b,w,e,f);else c.handleError(b,w,e,p);d||c.handleComplete(b,w,e,f);m==="timeout"&&w.abort();if(b.async)w=null}};try{var g=w.abort;w.abort=function(){w&&Function.prototype.call.call(g,w);L("abort")}}catch(i){}b.async&&b.timeout>0&&setTimeout(function(){w&&!J&&L("timeout")},b.timeout);try{w.send(l||b.data==null?null:b.data)}catch(n){c.handleError(b,w,null,n);c.handleComplete(b,w,e,f)}b.async||L();return w}},param:function(a,b){var d=[],e=function(h,l){l=c.isFunction(l)?l():l;d[d.length]=
|
||||
encodeURIComponent(h)+"="+encodeURIComponent(l)};if(b===B)b=c.ajaxSettings.traditional;if(c.isArray(a)||a.jquery)c.each(a,function(){e(this.name,this.value)});else for(var f in a)da(f,a[f],b,e);return d.join("&").replace(tb,"+")}});c.extend({active:0,lastModified:{},etag:{},handleError:function(a,b,d,e){a.error&&a.error.call(a.context,b,d,e);a.global&&c.triggerGlobal(a,"ajaxError",[b,a,e])},handleSuccess:function(a,b,d,e){a.success&&a.success.call(a.context,e,d,b);a.global&&c.triggerGlobal(a,"ajaxSuccess",
|
||||
[b,a])},handleComplete:function(a,b,d){a.complete&&a.complete.call(a.context,b,d);a.global&&c.triggerGlobal(a,"ajaxComplete",[b,a]);a.global&&c.active--===1&&c.event.trigger("ajaxStop")},triggerGlobal:function(a,b,d){(a.context&&a.context.url==null?c(a.context):c.event).trigger(b,d)},httpSuccess:function(a){try{return!a.status&&location.protocol==="file:"||a.status>=200&&a.status<300||a.status===304||a.status===1223}catch(b){}return false},httpNotModified:function(a,b){var d=a.getResponseHeader("Last-Modified"),
|
||||
e=a.getResponseHeader("Etag");if(d)c.lastModified[b]=d;if(e)c.etag[b]=e;return a.status===304},httpData:function(a,b,d){var e=a.getResponseHeader("content-type")||"",f=b==="xml"||!b&&e.indexOf("xml")>=0;a=f?a.responseXML:a.responseText;f&&a.documentElement.nodeName==="parsererror"&&c.error("parsererror");if(d&&d.dataFilter)a=d.dataFilter(a,b);if(typeof a==="string")if(b==="json"||!b&&e.indexOf("json")>=0)a=c.parseJSON(a);else if(b==="script"||!b&&e.indexOf("javascript")>=0)c.globalEval(a);return a}});
|
||||
if(E.ActiveXObject)c.ajaxSettings.xhr=function(){if(E.location.protocol!=="file:")try{return new E.XMLHttpRequest}catch(a){}try{return new E.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}};c.support.ajax=!!c.ajaxSettings.xhr();var ea={},vb=/^(?:toggle|show|hide)$/,wb=/^([+\-]=)?([\d+.\-]+)(.*)$/,ba,pa=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];c.fn.extend({show:function(a,b,d){if(a||a===0)return this.animate(S("show",
|
||||
3),a,b,d);else{d=0;for(var e=this.length;d<e;d++){a=this[d];b=a.style.display;if(!c.data(a,"olddisplay")&&b==="none")b=a.style.display="";b===""&&c.css(a,"display")==="none"&&c.data(a,"olddisplay",qa(a.nodeName))}for(d=0;d<e;d++){a=this[d];b=a.style.display;if(b===""||b==="none")a.style.display=c.data(a,"olddisplay")||""}return this}},hide:function(a,b,d){if(a||a===0)return this.animate(S("hide",3),a,b,d);else{a=0;for(b=this.length;a<b;a++){d=c.css(this[a],"display");d!=="none"&&c.data(this[a],"olddisplay",
|
||||
d)}for(a=0;a<b;a++)this[a].style.display="none";return this}},_toggle:c.fn.toggle,toggle:function(a,b,d){var e=typeof a==="boolean";if(c.isFunction(a)&&c.isFunction(b))this._toggle.apply(this,arguments);else a==null||e?this.each(function(){var f=e?a:c(this).is(":hidden");c(this)[f?"show":"hide"]()}):this.animate(S("toggle",3),a,b,d);return this},fadeTo:function(a,b,d,e){return this.filter(":hidden").css("opacity",0).show().end().animate({opacity:b},a,d,e)},animate:function(a,b,d,e){var f=c.speed(b,
|
||||
d,e);if(c.isEmptyObject(a))return this.each(f.complete);return this[f.queue===false?"each":"queue"](function(){var h=c.extend({},f),l,k=this.nodeType===1,o=k&&c(this).is(":hidden"),x=this;for(l in a){var r=c.camelCase(l);if(l!==r){a[r]=a[l];delete a[l];l=r}if(a[l]==="hide"&&o||a[l]==="show"&&!o)return h.complete.call(this);if(k&&(l==="height"||l==="width")){h.overflow=[this.style.overflow,this.style.overflowX,this.style.overflowY];if(c.css(this,"display")==="inline"&&c.css(this,"float")==="none")if(c.support.inlineBlockNeedsLayout)if(qa(this.nodeName)===
|
||||
"inline")this.style.display="inline-block";else{this.style.display="inline";this.style.zoom=1}else this.style.display="inline-block"}if(c.isArray(a[l])){(h.specialEasing=h.specialEasing||{})[l]=a[l][1];a[l]=a[l][0]}}if(h.overflow!=null)this.style.overflow="hidden";h.curAnim=c.extend({},a);c.each(a,function(A,C){var J=new c.fx(x,h,A);if(vb.test(C))J[C==="toggle"?o?"show":"hide":C](a);else{var w=wb.exec(C),I=J.cur()||0;if(w){var L=parseFloat(w[2]),g=w[3]||"px";if(g!=="px"){c.style(x,A,(L||1)+g);I=(L||
|
||||
1)/J.cur()*I;c.style(x,A,I+g)}if(w[1])L=(w[1]==="-="?-1:1)*L+I;J.custom(I,L,g)}else J.custom(I,C,"")}});return true})},stop:function(a,b){var d=c.timers;a&&this.queue([]);this.each(function(){for(var e=d.length-1;e>=0;e--)if(d[e].elem===this){b&&d[e](true);d.splice(e,1)}});b||this.dequeue();return this}});c.each({slideDown:S("show",1),slideUp:S("hide",1),slideToggle:S("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){c.fn[a]=function(d,e,f){return this.animate(b,
|
||||
d,e,f)}});c.extend({speed:function(a,b,d){var e=a&&typeof a==="object"?c.extend({},a):{complete:d||!d&&b||c.isFunction(a)&&a,duration:a,easing:d&&b||b&&!c.isFunction(b)&&b};e.duration=c.fx.off?0:typeof e.duration==="number"?e.duration:e.duration in c.fx.speeds?c.fx.speeds[e.duration]:c.fx.speeds._default;e.old=e.complete;e.complete=function(){e.queue!==false&&c(this).dequeue();c.isFunction(e.old)&&e.old.call(this)};return e},easing:{linear:function(a,b,d,e){return d+e*a},swing:function(a,b,d,e){return(-Math.cos(a*
|
||||
Math.PI)/2+0.5)*e+d}},timers:[],fx:function(a,b,d){this.options=b;this.elem=a;this.prop=d;if(!b.orig)b.orig={}}});c.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this);(c.fx.step[this.prop]||c.fx.step._default)(this)},cur:function(){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];var a=parseFloat(c.css(this.elem,this.prop));return a&&a>-1E4?a:0},custom:function(a,b,d){function e(l){return f.step(l)}
|
||||
var f=this,h=c.fx;this.startTime=c.now();this.start=a;this.end=b;this.unit=d||this.unit||"px";this.now=this.start;this.pos=this.state=0;e.elem=this.elem;if(e()&&c.timers.push(e)&&!ba)ba=setInterval(h.tick,h.interval)},show:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.show=true;this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur());c(this.elem).show()},hide:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.hide=true;
|
||||
this.custom(this.cur(),0)},step:function(a){var b=c.now(),d=true;if(a||b>=this.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;for(var e in this.options.curAnim)if(this.options.curAnim[e]!==true)d=false;if(d){if(this.options.overflow!=null&&!c.support.shrinkWrapBlocks){var f=this.elem,h=this.options;c.each(["","X","Y"],function(k,o){f.style["overflow"+o]=h.overflow[k]})}this.options.hide&&c(this.elem).hide();if(this.options.hide||
|
||||
this.options.show)for(var l in this.options.curAnim)c.style(this.elem,l,this.options.orig[l]);this.options.complete.call(this.elem)}return false}else{a=b-this.startTime;this.state=a/this.options.duration;b=this.options.easing||(c.easing.swing?"swing":"linear");this.pos=c.easing[this.options.specialEasing&&this.options.specialEasing[this.prop]||b](this.state,a,0,1,this.options.duration);this.now=this.start+(this.end-this.start)*this.pos;this.update()}return true}};c.extend(c.fx,{tick:function(){for(var a=
|
||||
c.timers,b=0;b<a.length;b++)a[b]()||a.splice(b--,1);a.length||c.fx.stop()},interval:13,stop:function(){clearInterval(ba);ba=null},speeds:{slow:600,fast:200,_default:400},step:{opacity:function(a){c.style(a.elem,"opacity",a.now)},_default:function(a){if(a.elem.style&&a.elem.style[a.prop]!=null)a.elem.style[a.prop]=(a.prop==="width"||a.prop==="height"?Math.max(0,a.now):a.now)+a.unit;else a.elem[a.prop]=a.now}}});if(c.expr&&c.expr.filters)c.expr.filters.animated=function(a){return c.grep(c.timers,function(b){return a===
|
||||
b.elem}).length};var xb=/^t(?:able|d|h)$/i,Ia=/^(?:body|html)$/i;c.fn.offset="getBoundingClientRect"in t.documentElement?function(a){var b=this[0],d;if(a)return this.each(function(l){c.offset.setOffset(this,a,l)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);try{d=b.getBoundingClientRect()}catch(e){}var f=b.ownerDocument,h=f.documentElement;if(!d||!c.contains(h,b))return d||{top:0,left:0};b=f.body;f=fa(f);return{top:d.top+(f.pageYOffset||c.support.boxModel&&
|
||||
h.scrollTop||b.scrollTop)-(h.clientTop||b.clientTop||0),left:d.left+(f.pageXOffset||c.support.boxModel&&h.scrollLeft||b.scrollLeft)-(h.clientLeft||b.clientLeft||0)}}:function(a){var b=this[0];if(a)return this.each(function(x){c.offset.setOffset(this,a,x)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);c.offset.initialize();var d,e=b.offsetParent,f=b.ownerDocument,h=f.documentElement,l=f.body;d=(f=f.defaultView)?f.getComputedStyle(b,null):b.currentStyle;
|
||||
for(var k=b.offsetTop,o=b.offsetLeft;(b=b.parentNode)&&b!==l&&b!==h;){if(c.offset.supportsFixedPosition&&d.position==="fixed")break;d=f?f.getComputedStyle(b,null):b.currentStyle;k-=b.scrollTop;o-=b.scrollLeft;if(b===e){k+=b.offsetTop;o+=b.offsetLeft;if(c.offset.doesNotAddBorder&&!(c.offset.doesAddBorderForTableAndCells&&xb.test(b.nodeName))){k+=parseFloat(d.borderTopWidth)||0;o+=parseFloat(d.borderLeftWidth)||0}e=b.offsetParent}if(c.offset.subtractsBorderForOverflowNotVisible&&d.overflow!=="visible"){k+=
|
||||
parseFloat(d.borderTopWidth)||0;o+=parseFloat(d.borderLeftWidth)||0}d=d}if(d.position==="relative"||d.position==="static"){k+=l.offsetTop;o+=l.offsetLeft}if(c.offset.supportsFixedPosition&&d.position==="fixed"){k+=Math.max(h.scrollTop,l.scrollTop);o+=Math.max(h.scrollLeft,l.scrollLeft)}return{top:k,left:o}};c.offset={initialize:function(){var a=t.body,b=t.createElement("div"),d,e,f,h=parseFloat(c.css(a,"marginTop"))||0;c.extend(b.style,{position:"absolute",top:0,left:0,margin:0,border:0,width:"1px",
|
||||
height:"1px",visibility:"hidden"});b.innerHTML="<div style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;'><div></div></div><table style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;' cellpadding='0' cellspacing='0'><tr><td></td></tr></table>";a.insertBefore(b,a.firstChild);d=b.firstChild;e=d.firstChild;f=d.nextSibling.firstChild.firstChild;this.doesNotAddBorder=e.offsetTop!==5;this.doesAddBorderForTableAndCells=
|
||||
f.offsetTop===5;e.style.position="fixed";e.style.top="20px";this.supportsFixedPosition=e.offsetTop===20||e.offsetTop===15;e.style.position=e.style.top="";d.style.overflow="hidden";d.style.position="relative";this.subtractsBorderForOverflowNotVisible=e.offsetTop===-5;this.doesNotIncludeMarginInBodyOffset=a.offsetTop!==h;a.removeChild(b);c.offset.initialize=c.noop},bodyOffset:function(a){var b=a.offsetTop,d=a.offsetLeft;c.offset.initialize();if(c.offset.doesNotIncludeMarginInBodyOffset){b+=parseFloat(c.css(a,
|
||||
"marginTop"))||0;d+=parseFloat(c.css(a,"marginLeft"))||0}return{top:b,left:d}},setOffset:function(a,b,d){var e=c.css(a,"position");if(e==="static")a.style.position="relative";var f=c(a),h=f.offset(),l=c.css(a,"top"),k=c.css(a,"left"),o=e==="absolute"&&c.inArray("auto",[l,k])>-1;e={};var x={};if(o)x=f.position();l=o?x.top:parseInt(l,10)||0;k=o?x.left:parseInt(k,10)||0;if(c.isFunction(b))b=b.call(a,d,h);if(b.top!=null)e.top=b.top-h.top+l;if(b.left!=null)e.left=b.left-h.left+k;"using"in b?b.using.call(a,
|
||||
e):f.css(e)}};c.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),d=this.offset(),e=Ia.test(b[0].nodeName)?{top:0,left:0}:b.offset();d.top-=parseFloat(c.css(a,"marginTop"))||0;d.left-=parseFloat(c.css(a,"marginLeft"))||0;e.top+=parseFloat(c.css(b[0],"borderTopWidth"))||0;e.left+=parseFloat(c.css(b[0],"borderLeftWidth"))||0;return{top:d.top-e.top,left:d.left-e.left}},offsetParent:function(){return this.map(function(){for(var a=this.offsetParent||t.body;a&&!Ia.test(a.nodeName)&&
|
||||
c.css(a,"position")==="static";)a=a.offsetParent;return a})}});c.each(["Left","Top"],function(a,b){var d="scroll"+b;c.fn[d]=function(e){var f=this[0],h;if(!f)return null;if(e!==B)return this.each(function(){if(h=fa(this))h.scrollTo(!a?e:c(h).scrollLeft(),a?e:c(h).scrollTop());else this[d]=e});else return(h=fa(f))?"pageXOffset"in h?h[a?"pageYOffset":"pageXOffset"]:c.support.boxModel&&h.document.documentElement[d]||h.document.body[d]:f[d]}});c.each(["Height","Width"],function(a,b){var d=b.toLowerCase();
|
||||
c.fn["inner"+b]=function(){return this[0]?parseFloat(c.css(this[0],d,"padding")):null};c.fn["outer"+b]=function(e){return this[0]?parseFloat(c.css(this[0],d,e?"margin":"border")):null};c.fn[d]=function(e){var f=this[0];if(!f)return e==null?null:this;if(c.isFunction(e))return this.each(function(l){var k=c(this);k[d](e.call(this,l,k[d]()))});if(c.isWindow(f))return f.document.compatMode==="CSS1Compat"&&f.document.documentElement["client"+b]||f.document.body["client"+b];else if(f.nodeType===9)return Math.max(f.documentElement["client"+
|
||||
b],f.body["scroll"+b],f.documentElement["scroll"+b],f.body["offset"+b],f.documentElement["offset"+b]);else if(e===B){f=c.css(f,d);var h=parseFloat(f);return c.isNaN(h)?f:h}else return this.css(d,typeof e==="string"?e:e+"px")}})})(window);
|
||||
BIN
Linux_x86_64/lib/python2.7/site-packages/werkzeug/debug/shared/less.png
Executable file
|
After Width: | Height: | Size: 191 B |
BIN
Linux_x86_64/lib/python2.7/site-packages/werkzeug/debug/shared/more.png
Executable file
|
After Width: | Height: | Size: 200 B |
BIN
Linux_x86_64/lib/python2.7/site-packages/werkzeug/debug/shared/source.png
Executable file
|
After Width: | Height: | Size: 818 B |
|
|
@ -0,0 +1,113 @@
|
|||
@font-face {
|
||||
font-family: 'Ubuntu';
|
||||
font-style: normal;
|
||||
font-weight: normal;
|
||||
src: local('Ubuntu'), local('Ubuntu-Regular'),
|
||||
url('?__debugger__=yes&cmd=resource&f=ubuntu.ttf') format('truetype');
|
||||
}
|
||||
|
||||
body, input { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
|
||||
'Verdana', sans-serif; color: #000; text-align: center;
|
||||
margin: 1em; padding: 0; font-size: 15px; }
|
||||
h1, h2, h3 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
|
||||
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
|
||||
|
||||
input { background-color: #fff; margin: 0; text-align: left;
|
||||
outline: none !important; }
|
||||
a { color: #11557C; }
|
||||
a:hover { color: #177199; }
|
||||
pre, code, table.source,
|
||||
textarea { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
|
||||
monospace; font-size: 14px; }
|
||||
|
||||
div.debugger { text-align: left; padding: 12px; margin: auto;
|
||||
background-color: white; }
|
||||
h1 { font-size: 36px; margin: 0 0 0.3em 0; }
|
||||
div.detail p { margin: 0 0 8px 13px; font-size: 14px; white-space: pre-wrap; }
|
||||
div.explanation { margin: 20px 13px; font-size: 15px; color: #555; }
|
||||
div.footer { font-size: 13px; text-align: right; margin: 30px 0;
|
||||
color: #86989B; }
|
||||
|
||||
h2 { font-size: 16px; margin: 1.3em 0 0.0 0; padding: 9px;
|
||||
background-color: #11557C; color: white; }
|
||||
h2 em, h3 em { font-style: normal; color: #A5D6D9; font-weight: normal; }
|
||||
|
||||
div.traceback, div.plain { border: 1px solid #ddd; margin: 0 0 1em 0; padding: 10px; }
|
||||
div.plain p { margin: 0; }
|
||||
div.plain textarea,
|
||||
div.plain pre { margin: 10px 0 0 0; padding: 4px;
|
||||
background-color: #E8EFF0; border: 1px solid #D3E7E9; }
|
||||
div.plain textarea { width: 99%; height: 300px; }
|
||||
div.traceback h3 { font-size: 1em; margin: 0 0 0.8em 0; }
|
||||
div.traceback ul { list-style: none; margin: 0; padding: 0 0 0 1em; }
|
||||
div.traceback h4 { font-size: 13px; font-weight: normal; margin: 0.7em 0 0.1em 0; }
|
||||
div.traceback pre { margin: 0; padding: 5px 0 3px 15px;
|
||||
background-color: #E8EFF0; border: 1px solid #D3E7E9; }
|
||||
div.traceback pre,
|
||||
div.box table.source { white-space: pre-wrap; /* css-3 should we be so lucky... */
|
||||
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
|
||||
white-space: -pre-wrap; /* Opera 4-6 ?? */
|
||||
white-space: -o-pre-wrap; /* Opera 7 ?? */
|
||||
word-wrap: break-word; /* Internet Explorer 5.5+ */
|
||||
_white-space: pre; /* IE only hack to re-specify in
|
||||
addition to word-wrap */ }
|
||||
div.traceback pre:hover { background-color: #DDECEE; color: black; cursor: pointer; }
|
||||
div.traceback blockquote { margin: 1em 0 0 0; padding: 0; }
|
||||
div.traceback img { float: right; padding: 2px; margin: -3px 2px 0 0; display: none; }
|
||||
div.traceback img:hover { background-color: #ddd; cursor: pointer;
|
||||
border-color: #BFDDE0; }
|
||||
div.traceback pre:hover img { display: block; }
|
||||
div.traceback cite.filename { font-style: normal; color: #3B666B; }
|
||||
|
||||
pre.console { border: 1px solid #ccc; background: white!important;
|
||||
color: black; padding: 5px!important;
|
||||
margin: 3px 0 0 0!important; cursor: default!important;
|
||||
max-height: 400px; overflow: auto; }
|
||||
pre.console form { color: #555; }
|
||||
pre.console input { background-color: transparent; color: #555;
|
||||
width: 90%; font-family: 'Consolas', 'Deja Vu Sans Mono',
|
||||
'Bitstream Vera Sans Mono', monospace; font-size: 14px;
|
||||
border: none!important; }
|
||||
|
||||
span.string { color: #30799B; }
|
||||
span.number { color: #9C1A1C; }
|
||||
span.help { color: #3A7734; }
|
||||
span.object { color: #485F6E; }
|
||||
span.extended { opacity: 0.5; }
|
||||
span.extended:hover { opacity: 1; }
|
||||
a.toggle { text-decoration: none; background-repeat: no-repeat;
|
||||
background-position: center center;
|
||||
background-image: url(?__debugger__=yes&cmd=resource&f=more.png); }
|
||||
a.toggle:hover { background-color: #444; }
|
||||
a.open { background-image: url(?__debugger__=yes&cmd=resource&f=less.png); }
|
||||
|
||||
pre.console div.traceback,
|
||||
pre.console div.box { margin: 5px 10px; white-space: normal;
|
||||
border: 1px solid #11557C; padding: 10px;
|
||||
font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
|
||||
'Verdana', sans-serif; }
|
||||
pre.console div.box h3,
|
||||
pre.console div.traceback h3 { margin: -10px -10px 10px -10px; padding: 5px;
|
||||
background: #11557C; color: white; }
|
||||
|
||||
pre.console div.traceback pre:hover { cursor: default; background: #E8EFF0; }
|
||||
pre.console div.traceback pre.syntaxerror { background: inherit; border: none;
|
||||
margin: 20px -10px -10px -10px;
|
||||
padding: 10px; border-top: 1px solid #BFDDE0;
|
||||
background: #E8EFF0; }
|
||||
pre.console div.noframe-traceback pre.syntaxerror { margin-top: -10px; border: none; }
|
||||
|
||||
pre.console div.box pre.repr { padding: 0; margin: 0; background-color: white; border: none; }
|
||||
pre.console div.box table { margin-top: 6px; }
|
||||
pre.console div.box pre { border: none; }
|
||||
pre.console div.box pre.help { background-color: white; }
|
||||
pre.console div.box pre.help:hover { cursor: default; }
|
||||
pre.console table tr { vertical-align: top; }
|
||||
div.console { border: 1px solid #ccc; padding: 4px; background-color: #fafafa; }
|
||||
|
||||
div.box table.source { border-collapse: collapse; width: 100%; background: #E8EFF0 }
|
||||
div.box table.source td { border-top: 1px solid #E8EFF0; padding: 4px 0 4px 10px; }
|
||||
div.box table.source td.lineno { color: #999; padding-right: 10px; width: 1px; }
|
||||
div.box table.source tr.in-frame { background-color: white; }
|
||||
div.box table.source tr.current { background-color: #EEF7F8; color: #23707E; }
|
||||
div.sourceview { max-height: 400px; overflow: auto; border: 1px solid #ccc; }
|
||||
|
|
@ -0,0 +1,508 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.debug.tbtools
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module provides various traceback related utility functions.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD.
|
||||
"""
|
||||
import re
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import inspect
|
||||
import traceback
|
||||
import codecs
|
||||
from tokenize import TokenError
|
||||
|
||||
from werkzeug.utils import cached_property, escape
|
||||
from werkzeug.debug.console import Console
|
||||
from werkzeug._compat import range_type, PY2, text_type, string_types
|
||||
|
||||
|
||||
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
|
||||
_line_re = re.compile(r'^(.*?)$(?m)')
|
||||
_funcdef_re = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
|
||||
UTF8_COOKIE = '\xef\xbb\xbf'
|
||||
|
||||
system_exceptions = (SystemExit, KeyboardInterrupt)
|
||||
try:
|
||||
system_exceptions += (GeneratorExit,)
|
||||
except NameError:
|
||||
pass
|
||||
|
||||
|
||||
HEADER = u'''\
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
|
||||
"http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<head>
|
||||
<title>%(title)s // Werkzeug Debugger</title>
|
||||
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css" type="text/css">
|
||||
<!-- We need to make sure this has a favicon so that the debugger does not by
|
||||
accident trigger a request to /favicon.ico which might change the application
|
||||
state. -->
|
||||
<link rel="shortcut icon" href="?__debugger__=yes&cmd=resource&f=console.png">
|
||||
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=jquery.js"></script>
|
||||
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
|
||||
<script type="text/javascript">
|
||||
var TRACEBACK = %(traceback_id)d,
|
||||
CONSOLE_MODE = %(console)s,
|
||||
EVALEX = %(evalex)s,
|
||||
SECRET = "%(secret)s";
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<div class="debugger">
|
||||
'''
|
||||
FOOTER = u'''\
|
||||
<div class="footer">
|
||||
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
|
||||
friendly Werkzeug powered traceback interpreter.
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
'''
|
||||
|
||||
PAGE_HTML = HEADER + u'''\
|
||||
<h1>%(exception_type)s</h1>
|
||||
<div class="detail">
|
||||
<p class="errormsg">%(exception)s</p>
|
||||
</div>
|
||||
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
|
||||
%(summary)s
|
||||
<div class="plain">
|
||||
<form action="/?__debugger__=yes&cmd=paste" method="post">
|
||||
<p>
|
||||
<input type="hidden" name="language" value="pytb">
|
||||
This is the Copy/Paste friendly version of the traceback. <span
|
||||
class="pastemessage">You can also paste this traceback into
|
||||
a <a href="https://gist.github.com/">gist</a>:
|
||||
<input type="submit" value="create paste"></span>
|
||||
</p>
|
||||
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
|
||||
</form>
|
||||
</div>
|
||||
<div class="explanation">
|
||||
The debugger caught an exception in your WSGI application. You can now
|
||||
look at the traceback which led to the error. <span class="nojavascript">
|
||||
If you enable JavaScript you can also use additional features such as code
|
||||
execution (if the evalex feature is enabled), automatic pasting of the
|
||||
exceptions and much more.</span>
|
||||
</div>
|
||||
''' + FOOTER + '''
|
||||
<!--
|
||||
|
||||
%(plaintext_cs)s
|
||||
|
||||
-->
|
||||
'''
|
||||
|
||||
CONSOLE_HTML = HEADER + u'''\
|
||||
<h1>Interactive Console</h1>
|
||||
<div class="explanation">
|
||||
In this console you can execute Python expressions in the context of the
|
||||
application. The initial namespace was created by the debugger automatically.
|
||||
</div>
|
||||
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
|
||||
''' + FOOTER
|
||||
|
||||
SUMMARY_HTML = u'''\
|
||||
<div class="%(classes)s">
|
||||
%(title)s
|
||||
<ul>%(frames)s</ul>
|
||||
%(description)s
|
||||
</div>
|
||||
'''
|
||||
|
||||
FRAME_HTML = u'''\
|
||||
<div class="frame" id="frame-%(id)d">
|
||||
<h4>File <cite class="filename">"%(filename)s"</cite>,
|
||||
line <em class="line">%(lineno)s</em>,
|
||||
in <code class="function">%(function_name)s</code></h4>
|
||||
<pre>%(current_line)s</pre>
|
||||
</div>
|
||||
'''
|
||||
|
||||
SOURCE_TABLE_HTML = u'<table class=source>%s</table>'
|
||||
|
||||
SOURCE_LINE_HTML = u'''\
|
||||
<tr class="%(classes)s">
|
||||
<td class=lineno>%(lineno)s</td>
|
||||
<td>%(code)s</td>
|
||||
</tr>
|
||||
'''
|
||||
|
||||
|
||||
def render_console_html(secret):
|
||||
return CONSOLE_HTML % {
|
||||
'evalex': 'true',
|
||||
'console': 'true',
|
||||
'title': 'Console',
|
||||
'secret': secret,
|
||||
'traceback_id': -1
|
||||
}
|
||||
|
||||
|
||||
def get_current_traceback(ignore_system_exceptions=False,
|
||||
show_hidden_frames=False, skip=0):
|
||||
"""Get the current exception info as `Traceback` object. Per default
|
||||
calling this method will reraise system exceptions such as generator exit,
|
||||
system exit or others. This behavior can be disabled by passing `False`
|
||||
to the function as first parameter.
|
||||
"""
|
||||
exc_type, exc_value, tb = sys.exc_info()
|
||||
if ignore_system_exceptions and exc_type in system_exceptions:
|
||||
raise
|
||||
for x in range_type(skip):
|
||||
if tb.tb_next is None:
|
||||
break
|
||||
tb = tb.tb_next
|
||||
tb = Traceback(exc_type, exc_value, tb)
|
||||
if not show_hidden_frames:
|
||||
tb.filter_hidden_frames()
|
||||
return tb
|
||||
|
||||
|
||||
class Line(object):
|
||||
"""Helper for the source renderer."""
|
||||
__slots__ = ('lineno', 'code', 'in_frame', 'current')
|
||||
|
||||
def __init__(self, lineno, code):
|
||||
self.lineno = lineno
|
||||
self.code = code
|
||||
self.in_frame = False
|
||||
self.current = False
|
||||
|
||||
def classes(self):
|
||||
rv = ['line']
|
||||
if self.in_frame:
|
||||
rv.append('in-frame')
|
||||
if self.current:
|
||||
rv.append('current')
|
||||
return rv
|
||||
classes = property(classes)
|
||||
|
||||
def render(self):
|
||||
return SOURCE_LINE_HTML % {
|
||||
'classes': u' '.join(self.classes),
|
||||
'lineno': self.lineno,
|
||||
'code': escape(self.code)
|
||||
}
|
||||
|
||||
|
||||
class Traceback(object):
|
||||
"""Wraps a traceback."""
|
||||
|
||||
def __init__(self, exc_type, exc_value, tb):
|
||||
self.exc_type = exc_type
|
||||
self.exc_value = exc_value
|
||||
if not isinstance(exc_type, str):
|
||||
exception_type = exc_type.__name__
|
||||
if exc_type.__module__ not in ('__builtin__', 'exceptions'):
|
||||
exception_type = exc_type.__module__ + '.' + exception_type
|
||||
else:
|
||||
exception_type = exc_type
|
||||
self.exception_type = exception_type
|
||||
|
||||
# we only add frames to the list that are not hidden. This follows
|
||||
# the the magic variables as defined by paste.exceptions.collector
|
||||
self.frames = []
|
||||
while tb:
|
||||
self.frames.append(Frame(exc_type, exc_value, tb))
|
||||
tb = tb.tb_next
|
||||
|
||||
def filter_hidden_frames(self):
|
||||
"""Remove the frames according to the paste spec."""
|
||||
if not self.frames:
|
||||
return
|
||||
|
||||
new_frames = []
|
||||
hidden = False
|
||||
for frame in self.frames:
|
||||
hide = frame.hide
|
||||
if hide in ('before', 'before_and_this'):
|
||||
new_frames = []
|
||||
hidden = False
|
||||
if hide == 'before_and_this':
|
||||
continue
|
||||
elif hide in ('reset', 'reset_and_this'):
|
||||
hidden = False
|
||||
if hide == 'reset_and_this':
|
||||
continue
|
||||
elif hide in ('after', 'after_and_this'):
|
||||
hidden = True
|
||||
if hide == 'after_and_this':
|
||||
continue
|
||||
elif hide or hidden:
|
||||
continue
|
||||
new_frames.append(frame)
|
||||
|
||||
# if we only have one frame and that frame is from the codeop
|
||||
# module, remove it.
|
||||
if len(new_frames) == 1 and self.frames[0].module == 'codeop':
|
||||
del self.frames[:]
|
||||
|
||||
# if the last frame is missing something went terrible wrong :(
|
||||
elif self.frames[-1] in new_frames:
|
||||
self.frames[:] = new_frames
|
||||
|
||||
def is_syntax_error(self):
|
||||
"""Is it a syntax error?"""
|
||||
return isinstance(self.exc_value, SyntaxError)
|
||||
is_syntax_error = property(is_syntax_error)
|
||||
|
||||
def exception(self):
|
||||
"""String representation of the exception."""
|
||||
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
|
||||
rv = ''.join(buf).strip()
|
||||
return rv.decode('utf-8', 'replace') if PY2 else rv
|
||||
exception = property(exception)
|
||||
|
||||
def log(self, logfile=None):
|
||||
"""Log the ASCII traceback into a file object."""
|
||||
if logfile is None:
|
||||
logfile = sys.stderr
|
||||
tb = self.plaintext.rstrip() + u'\n'
|
||||
if PY2:
|
||||
tb.encode('utf-8', 'replace')
|
||||
logfile.write(tb)
|
||||
|
||||
def paste(self):
|
||||
"""Create a paste and return the paste id."""
|
||||
data = json.dumps({
|
||||
'description': 'Werkzeug Internal Server Error',
|
||||
'public': False,
|
||||
'files': {
|
||||
'traceback.txt': {
|
||||
'content': self.plaintext
|
||||
}
|
||||
}
|
||||
}).encode('utf-8')
|
||||
try:
|
||||
from urllib2 import urlopen
|
||||
except ImportError:
|
||||
from urllib.request import urlopen
|
||||
rv = urlopen('https://api.github.com/gists', data=data)
|
||||
resp = json.loads(rv.read().decode('utf-8'))
|
||||
rv.close()
|
||||
return {
|
||||
'url': resp['html_url'],
|
||||
'id': resp['id']
|
||||
}
|
||||
|
||||
def render_summary(self, include_title=True):
|
||||
"""Render the traceback for the interactive console."""
|
||||
title = ''
|
||||
frames = []
|
||||
classes = ['traceback']
|
||||
if not self.frames:
|
||||
classes.append('noframe-traceback')
|
||||
|
||||
if include_title:
|
||||
if self.is_syntax_error:
|
||||
title = u'Syntax Error'
|
||||
else:
|
||||
title = u'Traceback <em>(most recent call last)</em>:'
|
||||
|
||||
for frame in self.frames:
|
||||
frames.append(u'<li%s>%s' % (
|
||||
frame.info and u' title="%s"' % escape(frame.info) or u'',
|
||||
frame.render()
|
||||
))
|
||||
|
||||
if self.is_syntax_error:
|
||||
description_wrapper = u'<pre class=syntaxerror>%s</pre>'
|
||||
else:
|
||||
description_wrapper = u'<blockquote>%s</blockquote>'
|
||||
|
||||
return SUMMARY_HTML % {
|
||||
'classes': u' '.join(classes),
|
||||
'title': title and u'<h3>%s</h3>' % title or u'',
|
||||
'frames': u'\n'.join(frames),
|
||||
'description': description_wrapper % escape(self.exception)
|
||||
}
|
||||
|
||||
def render_full(self, evalex=False, secret=None):
|
||||
"""Render the Full HTML page with the traceback info."""
|
||||
exc = escape(self.exception)
|
||||
return PAGE_HTML % {
|
||||
'evalex': evalex and 'true' or 'false',
|
||||
'console': 'false',
|
||||
'title': exc,
|
||||
'exception': exc,
|
||||
'exception_type': escape(self.exception_type),
|
||||
'summary': self.render_summary(include_title=False),
|
||||
'plaintext': self.plaintext,
|
||||
'plaintext_cs': re.sub('-{2,}', '-', self.plaintext),
|
||||
'traceback_id': self.id,
|
||||
'secret': secret
|
||||
}
|
||||
|
||||
def generate_plaintext_traceback(self):
|
||||
"""Like the plaintext attribute but returns a generator"""
|
||||
yield u'Traceback (most recent call last):'
|
||||
for frame in self.frames:
|
||||
yield u' File "%s", line %s, in %s' % (
|
||||
frame.filename,
|
||||
frame.lineno,
|
||||
frame.function_name
|
||||
)
|
||||
yield u' ' + frame.current_line.strip()
|
||||
yield self.exception
|
||||
|
||||
def plaintext(self):
|
||||
return u'\n'.join(self.generate_plaintext_traceback())
|
||||
plaintext = cached_property(plaintext)
|
||||
|
||||
id = property(lambda x: id(x))
|
||||
|
||||
|
||||
class Frame(object):
|
||||
"""A single frame in a traceback."""
|
||||
|
||||
def __init__(self, exc_type, exc_value, tb):
|
||||
self.lineno = tb.tb_lineno
|
||||
self.function_name = tb.tb_frame.f_code.co_name
|
||||
self.locals = tb.tb_frame.f_locals
|
||||
self.globals = tb.tb_frame.f_globals
|
||||
|
||||
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
|
||||
if fn[-4:] in ('.pyo', '.pyc'):
|
||||
fn = fn[:-1]
|
||||
# if it's a file on the file system resolve the real filename.
|
||||
if os.path.isfile(fn):
|
||||
fn = os.path.realpath(fn)
|
||||
self.filename = fn
|
||||
self.module = self.globals.get('__name__')
|
||||
self.loader = self.globals.get('__loader__')
|
||||
self.code = tb.tb_frame.f_code
|
||||
|
||||
# support for paste's traceback extensions
|
||||
self.hide = self.locals.get('__traceback_hide__', False)
|
||||
info = self.locals.get('__traceback_info__')
|
||||
if info is not None:
|
||||
try:
|
||||
info = text_type(info)
|
||||
except UnicodeError:
|
||||
info = str(info).decode('utf-8', 'replace')
|
||||
self.info = info
|
||||
|
||||
def render(self):
|
||||
"""Render a single frame in a traceback."""
|
||||
return FRAME_HTML % {
|
||||
'id': self.id,
|
||||
'filename': escape(self.filename),
|
||||
'lineno': self.lineno,
|
||||
'function_name': escape(self.function_name),
|
||||
'current_line': escape(self.current_line.strip())
|
||||
}
|
||||
|
||||
def get_annotated_lines(self):
|
||||
"""Helper function that returns lines with extra information."""
|
||||
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
|
||||
|
||||
# find function definition and mark lines
|
||||
if hasattr(self.code, 'co_firstlineno'):
|
||||
lineno = self.code.co_firstlineno - 1
|
||||
while lineno > 0:
|
||||
if _funcdef_re.match(lines[lineno].code):
|
||||
break
|
||||
lineno -= 1
|
||||
try:
|
||||
offset = len(inspect.getblock([x.code + '\n' for x
|
||||
in lines[lineno:]]))
|
||||
except TokenError:
|
||||
offset = 0
|
||||
for line in lines[lineno:lineno + offset]:
|
||||
line.in_frame = True
|
||||
|
||||
# mark current line
|
||||
try:
|
||||
lines[self.lineno - 1].current = True
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
return lines
|
||||
|
||||
def render_source(self):
|
||||
"""Render the sourcecode."""
|
||||
return SOURCE_TABLE_HTML % u'\n'.join(line.render() for line in
|
||||
self.get_annotated_lines())
|
||||
|
||||
def eval(self, code, mode='single'):
|
||||
"""Evaluate code in the context of the frame."""
|
||||
if isinstance(code, string_types):
|
||||
if PY2 and isinstance(code, unicode):
|
||||
code = UTF8_COOKIE + code.encode('utf-8')
|
||||
code = compile(code, '<interactive>', mode)
|
||||
return eval(code, self.globals, self.locals)
|
||||
|
||||
@cached_property
|
||||
def sourcelines(self):
|
||||
"""The sourcecode of the file as list of unicode strings."""
|
||||
# get sourcecode from loader or file
|
||||
source = None
|
||||
if self.loader is not None:
|
||||
try:
|
||||
if hasattr(self.loader, 'get_source'):
|
||||
source = self.loader.get_source(self.module)
|
||||
elif hasattr(self.loader, 'get_source_by_code'):
|
||||
source = self.loader.get_source_by_code(self.code)
|
||||
except Exception:
|
||||
# we munch the exception so that we don't cause troubles
|
||||
# if the loader is broken.
|
||||
pass
|
||||
|
||||
if source is None:
|
||||
try:
|
||||
f = open(self.filename)
|
||||
except IOError:
|
||||
return []
|
||||
try:
|
||||
source = f.read()
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
# already unicode? return right away
|
||||
if isinstance(source, text_type):
|
||||
return source.splitlines()
|
||||
|
||||
# yes. it should be ascii, but we don't want to reject too many
|
||||
# characters in the debugger if something breaks
|
||||
charset = 'utf-8'
|
||||
if source.startswith(UTF8_COOKIE):
|
||||
source = source[3:]
|
||||
else:
|
||||
for idx, match in enumerate(_line_re.finditer(source)):
|
||||
match = _line_re.search(match.group())
|
||||
if match is not None:
|
||||
charset = match.group(1)
|
||||
break
|
||||
if idx > 1:
|
||||
break
|
||||
|
||||
# on broken cookies we fall back to utf-8 too
|
||||
try:
|
||||
codecs.lookup(charset)
|
||||
except LookupError:
|
||||
charset = 'utf-8'
|
||||
|
||||
return source.decode(charset, 'replace').splitlines()
|
||||
|
||||
@property
|
||||
def current_line(self):
|
||||
try:
|
||||
return self.sourcelines[self.lineno - 1]
|
||||
except IndexError:
|
||||
return u''
|
||||
|
||||
@cached_property
|
||||
def console(self):
|
||||
return Console(self.globals, self.locals)
|
||||
|
||||
id = property(lambda x: id(x))
|
||||
588
Linux_x86_64/lib/python2.7/site-packages/werkzeug/exceptions.py
Normal file
|
|
@ -0,0 +1,588 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.exceptions
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module implements a number of Python exceptions you can raise from
|
||||
within your views to trigger a standard non-200 response.
|
||||
|
||||
|
||||
Usage Example
|
||||
-------------
|
||||
|
||||
::
|
||||
|
||||
from werkzeug.wrappers import BaseRequest
|
||||
from werkzeug.wsgi import responder
|
||||
from werkzeug.exceptions import HTTPException, NotFound
|
||||
|
||||
def view(request):
|
||||
raise NotFound()
|
||||
|
||||
@responder
|
||||
def application(environ, start_response):
|
||||
request = BaseRequest(environ)
|
||||
try:
|
||||
return view(request)
|
||||
except HTTPException as e:
|
||||
return e
|
||||
|
||||
|
||||
As you can see from this example those exceptions are callable WSGI
|
||||
applications. Because of Python 2.4 compatibility those do not extend
|
||||
from the response objects but only from the python exception class.
|
||||
|
||||
As a matter of fact they are not Werkzeug response objects. However you
|
||||
can get a response object by calling ``get_response()`` on a HTTP
|
||||
exception.
|
||||
|
||||
Keep in mind that you have to pass an environment to ``get_response()``
|
||||
because some errors fetch additional information from the WSGI
|
||||
environment.
|
||||
|
||||
If you want to hook in a different exception page to say, a 404 status
|
||||
code, you can add a second except for a specific subclass of an error::
|
||||
|
||||
@responder
|
||||
def application(environ, start_response):
|
||||
request = BaseRequest(environ)
|
||||
try:
|
||||
return view(request)
|
||||
except NotFound, e:
|
||||
return not_found(request)
|
||||
except HTTPException, e:
|
||||
return e
|
||||
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import sys
|
||||
|
||||
# Because of bootstrapping reasons we need to manually patch ourselves
|
||||
# onto our parent module.
|
||||
import werkzeug
|
||||
werkzeug.exceptions = sys.modules[__name__]
|
||||
|
||||
from werkzeug._internal import _get_environ
|
||||
from werkzeug._compat import iteritems, integer_types, text_type, \
|
||||
implements_to_string
|
||||
|
||||
from werkzeug.wrappers import Response
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class HTTPException(Exception):
|
||||
"""
|
||||
Baseclass for all HTTP exceptions. This exception can be called as WSGI
|
||||
application to render a default error page or you can catch the subclasses
|
||||
of it independently and render nicer error messages.
|
||||
"""
|
||||
|
||||
code = None
|
||||
description = None
|
||||
|
||||
def __init__(self, description=None, response=None):
|
||||
Exception.__init__(self)
|
||||
if description is not None:
|
||||
self.description = description
|
||||
self.response = response
|
||||
|
||||
@classmethod
|
||||
def wrap(cls, exception, name=None):
|
||||
"""This method returns a new subclass of the exception provided that
|
||||
also is a subclass of `BadRequest`.
|
||||
"""
|
||||
class newcls(cls, exception):
|
||||
def __init__(self, arg=None, *args, **kwargs):
|
||||
cls.__init__(self, *args, **kwargs)
|
||||
exception.__init__(self, arg)
|
||||
newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
|
||||
newcls.__name__ = name or cls.__name__ + exception.__name__
|
||||
return newcls
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""The status name."""
|
||||
return HTTP_STATUS_CODES.get(self.code, 'Unknown Error')
|
||||
|
||||
def get_description(self, environ=None):
|
||||
"""Get the description."""
|
||||
return u'<p>%s</p>' % escape(self.description)
|
||||
|
||||
def get_body(self, environ=None):
|
||||
"""Get the HTML body."""
|
||||
return text_type((
|
||||
u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
|
||||
u'<title>%(code)s %(name)s</title>\n'
|
||||
u'<h1>%(name)s</h1>\n'
|
||||
u'%(description)s\n'
|
||||
) % {
|
||||
'code': self.code,
|
||||
'name': escape(self.name),
|
||||
'description': self.get_description(environ)
|
||||
})
|
||||
|
||||
def get_headers(self, environ=None):
|
||||
"""Get a list of headers."""
|
||||
return [('Content-Type', 'text/html')]
|
||||
|
||||
def get_response(self, environ=None):
|
||||
"""Get a response object. If one was passed to the exception
|
||||
it's returned directly.
|
||||
|
||||
:param environ: the optional environ for the request. This
|
||||
can be used to modify the response depending
|
||||
on how the request looked like.
|
||||
:return: a :class:`Response` object or a subclass thereof.
|
||||
"""
|
||||
if self.response is not None:
|
||||
return self.response
|
||||
if environ is not None:
|
||||
environ = _get_environ(environ)
|
||||
headers = self.get_headers(environ)
|
||||
return Response(self.get_body(environ), self.code, headers)
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
"""Call the exception as WSGI application.
|
||||
|
||||
:param environ: the WSGI environment.
|
||||
:param start_response: the response callable provided by the WSGI
|
||||
server.
|
||||
"""
|
||||
response = self.get_response(environ)
|
||||
return response(environ, start_response)
|
||||
|
||||
def __str__(self):
|
||||
return '%d: %s' % (self.code, self.name)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s \'%s\'>' % (self.__class__.__name__, self)
|
||||
|
||||
|
||||
class BadRequest(HTTPException):
|
||||
"""*400* `Bad Request`
|
||||
|
||||
Raise if the browser sends something to the application the application
|
||||
or server cannot handle.
|
||||
"""
|
||||
code = 400
|
||||
description = (
|
||||
'The browser (or proxy) sent a request that this server could '
|
||||
'not understand.'
|
||||
)
|
||||
|
||||
|
||||
class ClientDisconnected(BadRequest):
|
||||
"""Internal exception that is raised if Werkzeug detects a disconnected
|
||||
client. Since the client is already gone at that point attempting to
|
||||
send the error message to the client might not work and might ultimately
|
||||
result in another exception in the server. Mainly this is here so that
|
||||
it is silenced by default as far as Werkzeug is concerned.
|
||||
|
||||
Since disconnections cannot be reliably detected and are unspecified
|
||||
by WSGI to a large extend this might or might not be raised if a client
|
||||
is gone.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
|
||||
|
||||
class SecurityError(BadRequest):
|
||||
"""Raised if something triggers a security error. This is otherwise
|
||||
exactly like a bad request error.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
|
||||
|
||||
class Unauthorized(HTTPException):
|
||||
"""*401* `Unauthorized`
|
||||
|
||||
Raise if the user is not authorized. Also used if you want to use HTTP
|
||||
basic auth.
|
||||
"""
|
||||
code = 401
|
||||
description = (
|
||||
'The server could not verify that you are authorized to access '
|
||||
'the URL requested. You either supplied the wrong credentials (e.g. '
|
||||
'a bad password), or your browser doesn\'t understand how to supply '
|
||||
'the credentials required.'
|
||||
)
|
||||
|
||||
|
||||
class Forbidden(HTTPException):
|
||||
"""*403* `Forbidden`
|
||||
|
||||
Raise if the user doesn't have the permission for the requested resource
|
||||
but was authenticated.
|
||||
"""
|
||||
code = 403
|
||||
description = (
|
||||
'You don\'t have the permission to access the requested resource. '
|
||||
'It is either read-protected or not readable by the server.'
|
||||
)
|
||||
|
||||
|
||||
class NotFound(HTTPException):
|
||||
"""*404* `Not Found`
|
||||
|
||||
Raise if a resource does not exist and never existed.
|
||||
"""
|
||||
code = 404
|
||||
description = (
|
||||
'The requested URL was not found on the server. '
|
||||
'If you entered the URL manually please check your spelling and '
|
||||
'try again.'
|
||||
)
|
||||
|
||||
|
||||
class MethodNotAllowed(HTTPException):
|
||||
"""*405* `Method Not Allowed`
|
||||
|
||||
Raise if the server used a method the resource does not handle. For
|
||||
example `POST` if the resource is view only. Especially useful for REST.
|
||||
|
||||
The first argument for this exception should be a list of allowed methods.
|
||||
Strictly speaking the response would be invalid if you don't provide valid
|
||||
methods in the header which you can do with that list.
|
||||
"""
|
||||
code = 405
|
||||
description = 'The method is not allowed for the requested URL.'
|
||||
|
||||
def __init__(self, valid_methods=None, description=None):
|
||||
"""Takes an optional list of valid http methods
|
||||
starting with werkzeug 0.3 the list will be mandatory."""
|
||||
HTTPException.__init__(self, description)
|
||||
self.valid_methods = valid_methods
|
||||
|
||||
def get_headers(self, environ):
|
||||
headers = HTTPException.get_headers(self, environ)
|
||||
if self.valid_methods:
|
||||
headers.append(('Allow', ', '.join(self.valid_methods)))
|
||||
return headers
|
||||
|
||||
|
||||
class NotAcceptable(HTTPException):
|
||||
"""*406* `Not Acceptable`
|
||||
|
||||
Raise if the server can't return any content conforming to the
|
||||
`Accept` headers of the client.
|
||||
"""
|
||||
code = 406
|
||||
|
||||
description = (
|
||||
'The resource identified by the request is only capable of '
|
||||
'generating response entities which have content characteristics '
|
||||
'not acceptable according to the accept headers sent in the '
|
||||
'request.'
|
||||
)
|
||||
|
||||
|
||||
class RequestTimeout(HTTPException):
|
||||
"""*408* `Request Timeout`
|
||||
|
||||
Raise to signalize a timeout.
|
||||
"""
|
||||
code = 408
|
||||
description = (
|
||||
'The server closed the network connection because the browser '
|
||||
'didn\'t finish the request within the specified time.'
|
||||
)
|
||||
|
||||
|
||||
class Conflict(HTTPException):
|
||||
"""*409* `Conflict`
|
||||
|
||||
Raise to signal that a request cannot be completed because it conflicts
|
||||
with the current state on the server.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
code = 409
|
||||
description = (
|
||||
'A conflict happened while processing the request. The resource '
|
||||
'might have been modified while the request was being processed.'
|
||||
)
|
||||
|
||||
|
||||
class Gone(HTTPException):
|
||||
"""*410* `Gone`
|
||||
|
||||
Raise if a resource existed previously and went away without new location.
|
||||
"""
|
||||
code = 410
|
||||
description = (
|
||||
'The requested URL is no longer available on this server and '
|
||||
'there is no forwarding address.</p><p>If you followed a link '
|
||||
'from a foreign page, please contact the author of this page.'
|
||||
)
|
||||
|
||||
|
||||
class LengthRequired(HTTPException):
|
||||
"""*411* `Length Required`
|
||||
|
||||
Raise if the browser submitted data but no ``Content-Length`` header which
|
||||
is required for the kind of processing the server does.
|
||||
"""
|
||||
code = 411
|
||||
description = (
|
||||
'A request with this method requires a valid <code>Content-'
|
||||
'Length</code> header.'
|
||||
)
|
||||
|
||||
|
||||
class PreconditionFailed(HTTPException):
|
||||
"""*412* `Precondition Failed`
|
||||
|
||||
Status code used in combination with ``If-Match``, ``If-None-Match``, or
|
||||
``If-Unmodified-Since``.
|
||||
"""
|
||||
code = 412
|
||||
description = (
|
||||
'The precondition on the request for the URL failed positive '
|
||||
'evaluation.'
|
||||
)
|
||||
|
||||
|
||||
class RequestEntityTooLarge(HTTPException):
|
||||
"""*413* `Request Entity Too Large`
|
||||
|
||||
The status code one should return if the data submitted exceeded a given
|
||||
limit.
|
||||
"""
|
||||
code = 413
|
||||
description = (
|
||||
'The data value transmitted exceeds the capacity limit.'
|
||||
)
|
||||
|
||||
|
||||
class RequestURITooLarge(HTTPException):
|
||||
"""*414* `Request URI Too Large`
|
||||
|
||||
Like *413* but for too long URLs.
|
||||
"""
|
||||
code = 414
|
||||
description = (
|
||||
'The length of the requested URL exceeds the capacity limit '
|
||||
'for this server. The request cannot be processed.'
|
||||
)
|
||||
|
||||
|
||||
class UnsupportedMediaType(HTTPException):
|
||||
"""*415* `Unsupported Media Type`
|
||||
|
||||
The status code returned if the server is unable to handle the media type
|
||||
the client transmitted.
|
||||
"""
|
||||
code = 415
|
||||
description = (
|
||||
'The server does not support the media type transmitted in '
|
||||
'the request.'
|
||||
)
|
||||
|
||||
|
||||
class RequestedRangeNotSatisfiable(HTTPException):
|
||||
"""*416* `Requested Range Not Satisfiable`
|
||||
|
||||
The client asked for a part of the file that lies beyond the end
|
||||
of the file.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
code = 416
|
||||
description = (
|
||||
'The server cannot provide the requested range.'
|
||||
)
|
||||
|
||||
|
||||
class ExpectationFailed(HTTPException):
|
||||
"""*417* `Expectation Failed`
|
||||
|
||||
The server cannot meet the requirements of the Expect request-header.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
code = 417
|
||||
description = (
|
||||
'The server could not meet the requirements of the Expect header'
|
||||
)
|
||||
|
||||
|
||||
class ImATeapot(HTTPException):
|
||||
"""*418* `I'm a teapot`
|
||||
|
||||
The server should return this if it is a teapot and someone attempted
|
||||
to brew coffee with it.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
code = 418
|
||||
description = (
|
||||
'This server is a teapot, not a coffee machine'
|
||||
)
|
||||
|
||||
|
||||
class UnprocessableEntity(HTTPException):
|
||||
"""*422* `Unprocessable Entity`
|
||||
|
||||
Used if the request is well formed, but the instructions are otherwise
|
||||
incorrect.
|
||||
"""
|
||||
code = 422
|
||||
description = (
|
||||
'The request was well-formed but was unable to be followed '
|
||||
'due to semantic errors.'
|
||||
)
|
||||
|
||||
|
||||
class PreconditionRequired(HTTPException):
|
||||
"""*428* `Precondition Required`
|
||||
|
||||
The server requires this request to be conditional, typically to prevent
|
||||
the lost update problem, which is a race condition between two or more
|
||||
clients attempting to update a resource through PUT or DELETE. By requiring
|
||||
each client to include a conditional header ("If-Match" or "If-Unmodified-
|
||||
Since") with the proper value retained from a recent GET request, the
|
||||
server ensures that each client has at least seen the previous revision of
|
||||
the resource.
|
||||
"""
|
||||
code = 428
|
||||
description = (
|
||||
'This request is required to be conditional; try using "If-Match" '
|
||||
'or "If-Unmodified-Since".'
|
||||
)
|
||||
|
||||
|
||||
class TooManyRequests(HTTPException):
|
||||
"""*429* `Too Many Requests`
|
||||
|
||||
The server is limiting the rate at which this user receives responses, and
|
||||
this request exceeds that rate. (The server may use any convenient method
|
||||
to identify users and their request rates). The server may include a
|
||||
"Retry-After" header to indicate how long the user should wait before
|
||||
retrying.
|
||||
"""
|
||||
code = 429
|
||||
description = (
|
||||
'This user has exceeded an allotted request count. Try again later.'
|
||||
)
|
||||
|
||||
|
||||
class RequestHeaderFieldsTooLarge(HTTPException):
|
||||
"""*431* `Request Header Fields Too Large`
|
||||
|
||||
The server refuses to process the request because the header fields are too
|
||||
large. One or more individual fields may be too large, or the set of all
|
||||
headers is too large.
|
||||
"""
|
||||
code = 431
|
||||
description = (
|
||||
'One or more header fields exceeds the maximum size.'
|
||||
)
|
||||
|
||||
|
||||
class InternalServerError(HTTPException):
|
||||
"""*500* `Internal Server Error`
|
||||
|
||||
Raise if an internal server error occurred. This is a good fallback if an
|
||||
unknown error occurred in the dispatcher.
|
||||
"""
|
||||
code = 500
|
||||
description = (
|
||||
'The server encountered an internal error and was unable to '
|
||||
'complete your request. Either the server is overloaded or there '
|
||||
'is an error in the application.'
|
||||
)
|
||||
|
||||
|
||||
class NotImplemented(HTTPException):
|
||||
"""*501* `Not Implemented`
|
||||
|
||||
Raise if the application does not support the action requested by the
|
||||
browser.
|
||||
"""
|
||||
code = 501
|
||||
description = (
|
||||
'The server does not support the action requested by the '
|
||||
'browser.'
|
||||
)
|
||||
|
||||
|
||||
class BadGateway(HTTPException):
|
||||
"""*502* `Bad Gateway`
|
||||
|
||||
If you do proxying in your application you should return this status code
|
||||
if you received an invalid response from the upstream server it accessed
|
||||
in attempting to fulfill the request.
|
||||
"""
|
||||
code = 502
|
||||
description = (
|
||||
'The proxy server received an invalid response from an upstream '
|
||||
'server.'
|
||||
)
|
||||
|
||||
|
||||
class ServiceUnavailable(HTTPException):
|
||||
"""*503* `Service Unavailable`
|
||||
|
||||
Status code you should return if a service is temporarily unavailable.
|
||||
"""
|
||||
code = 503
|
||||
description = (
|
||||
'The server is temporarily unable to service your request due to '
|
||||
'maintenance downtime or capacity problems. Please try again '
|
||||
'later.'
|
||||
)
|
||||
|
||||
|
||||
default_exceptions = {}
|
||||
__all__ = ['HTTPException']
|
||||
|
||||
def _find_exceptions():
|
||||
for name, obj in iteritems(globals()):
|
||||
try:
|
||||
if getattr(obj, 'code', None) is not None:
|
||||
default_exceptions[obj.code] = obj
|
||||
__all__.append(obj.__name__)
|
||||
except TypeError: # pragma: no cover
|
||||
continue
|
||||
_find_exceptions()
|
||||
del _find_exceptions
|
||||
|
||||
|
||||
class Aborter(object):
|
||||
"""
|
||||
When passed a dict of code -> exception items it can be used as
|
||||
callable that raises exceptions. If the first argument to the
|
||||
callable is an integer it will be looked up in the mapping, if it's
|
||||
a WSGI application it will be raised in a proxy exception.
|
||||
|
||||
The rest of the arguments are forwarded to the exception constructor.
|
||||
"""
|
||||
|
||||
def __init__(self, mapping=None, extra=None):
|
||||
if mapping is None:
|
||||
mapping = default_exceptions
|
||||
self.mapping = dict(mapping)
|
||||
if extra is not None:
|
||||
self.mapping.update(extra)
|
||||
|
||||
def __call__(self, code, *args, **kwargs):
|
||||
if not args and not kwargs and not isinstance(code, integer_types):
|
||||
raise HTTPException(response=code)
|
||||
if code not in self.mapping:
|
||||
raise LookupError('no exception for %r' % code)
|
||||
raise self.mapping[code](*args, **kwargs)
|
||||
|
||||
abort = Aborter()
|
||||
|
||||
|
||||
#: an exception that is used internally to signal both a key error and a
|
||||
#: bad request. Used by a lot of the datastructures.
|
||||
BadRequestKeyError = BadRequest.wrap(KeyError)
|
||||
|
||||
|
||||
# imported here because of circular dependencies of werkzeug.utils
|
||||
from werkzeug.utils import escape
|
||||
from werkzeug.http import HTTP_STATUS_CODES
|
||||
521
Linux_x86_64/lib/python2.7/site-packages/werkzeug/formparser.py
Normal file
|
|
@ -0,0 +1,521 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.formparser
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module implements the form parsing. It supports url-encoded forms
|
||||
as well as non-nested multipart uploads.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
import codecs
|
||||
from io import BytesIO
|
||||
from tempfile import TemporaryFile
|
||||
from itertools import chain, repeat, tee
|
||||
from functools import update_wrapper
|
||||
|
||||
from werkzeug._compat import to_native, text_type
|
||||
from werkzeug.urls import url_decode_stream
|
||||
from werkzeug.wsgi import make_line_iter, \
|
||||
get_input_stream, get_content_length
|
||||
from werkzeug.datastructures import Headers, FileStorage, MultiDict
|
||||
from werkzeug.http import parse_options_header
|
||||
|
||||
|
||||
#: an iterator that yields empty strings
|
||||
_empty_string_iter = repeat('')
|
||||
|
||||
#: a regular expression for multipart boundaries
|
||||
_multipart_boundary_re = re.compile('^[ -~]{0,200}[!-~]$')
|
||||
|
||||
#: supported http encodings that are also available in python we support
|
||||
#: for multipart messages.
|
||||
_supported_multipart_encodings = frozenset(['base64', 'quoted-printable'])
|
||||
|
||||
|
||||
def default_stream_factory(total_content_length, filename, content_type,
|
||||
content_length=None):
|
||||
"""The stream factory that is used per default."""
|
||||
if total_content_length > 1024 * 500:
|
||||
return TemporaryFile('wb+')
|
||||
return BytesIO()
|
||||
|
||||
|
||||
def parse_form_data(environ, stream_factory=None, charset='utf-8',
|
||||
errors='replace', max_form_memory_size=None,
|
||||
max_content_length=None, cls=None,
|
||||
silent=True):
|
||||
"""Parse the form data in the environ and return it as tuple in the form
|
||||
``(stream, form, files)``. You should only call this method if the
|
||||
transport method is `POST`, `PUT`, or `PATCH`.
|
||||
|
||||
If the mimetype of the data transmitted is `multipart/form-data` the
|
||||
files multidict will be filled with `FileStorage` objects. If the
|
||||
mimetype is unknown the input stream is wrapped and returned as first
|
||||
argument, else the stream is empty.
|
||||
|
||||
This is a shortcut for the common usage of :class:`FormDataParser`.
|
||||
|
||||
Have a look at :ref:`dealing-with-request-data` for more details.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
The `max_form_memory_size`, `max_content_length` and
|
||||
`cls` parameters were added.
|
||||
|
||||
.. versionadded:: 0.5.1
|
||||
The optional `silent` flag was added.
|
||||
|
||||
:param environ: the WSGI environment to be used for parsing.
|
||||
:param stream_factory: An optional callable that returns a new read and
|
||||
writeable file descriptor. This callable works
|
||||
the same as :meth:`~BaseResponse._get_file_stream`.
|
||||
:param charset: The character set for URL and url encoded form data.
|
||||
:param errors: The encoding error behavior.
|
||||
:param max_form_memory_size: the maximum number of bytes to be accepted for
|
||||
in-memory stored form data. If the data
|
||||
exceeds the value specified an
|
||||
:exc:`~exceptions.RequestEntityTooLarge`
|
||||
exception is raised.
|
||||
:param max_content_length: If this is provided and the transmitted data
|
||||
is longer than this value an
|
||||
:exc:`~exceptions.RequestEntityTooLarge`
|
||||
exception is raised.
|
||||
:param cls: an optional dict class to use. If this is not specified
|
||||
or `None` the default :class:`MultiDict` is used.
|
||||
:param silent: If set to False parsing errors will not be caught.
|
||||
:return: A tuple in the form ``(stream, form, files)``.
|
||||
"""
|
||||
return FormDataParser(stream_factory, charset, errors,
|
||||
max_form_memory_size, max_content_length,
|
||||
cls, silent).parse_from_environ(environ)
|
||||
|
||||
|
||||
def exhaust_stream(f):
|
||||
"""Helper decorator for methods that exhausts the stream on return."""
|
||||
def wrapper(self, stream, *args, **kwargs):
|
||||
try:
|
||||
return f(self, stream, *args, **kwargs)
|
||||
finally:
|
||||
exhaust = getattr(stream, 'exhaust', None)
|
||||
if exhaust is not None:
|
||||
exhaust()
|
||||
else:
|
||||
while 1:
|
||||
chunk = stream.read(1024 * 64)
|
||||
if not chunk:
|
||||
break
|
||||
return update_wrapper(wrapper, f)
|
||||
|
||||
|
||||
class FormDataParser(object):
|
||||
"""This class implements parsing of form data for Werkzeug. By itself
|
||||
it can parse multipart and url encoded form data. It can be subclassed
|
||||
and extended but for most mimetypes it is a better idea to use the
|
||||
untouched stream and expose it as separate attributes on a request
|
||||
object.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
|
||||
:param stream_factory: An optional callable that returns a new read and
|
||||
writeable file descriptor. This callable works
|
||||
the same as :meth:`~BaseResponse._get_file_stream`.
|
||||
:param charset: The character set for URL and url encoded form data.
|
||||
:param errors: The encoding error behavior.
|
||||
:param max_form_memory_size: the maximum number of bytes to be accepted for
|
||||
in-memory stored form data. If the data
|
||||
exceeds the value specified an
|
||||
:exc:`~exceptions.RequestEntityTooLarge`
|
||||
exception is raised.
|
||||
:param max_content_length: If this is provided and the transmitted data
|
||||
is longer than this value an
|
||||
:exc:`~exceptions.RequestEntityTooLarge`
|
||||
exception is raised.
|
||||
:param cls: an optional dict class to use. If this is not specified
|
||||
or `None` the default :class:`MultiDict` is used.
|
||||
:param silent: If set to False parsing errors will not be caught.
|
||||
"""
|
||||
|
||||
def __init__(self, stream_factory=None, charset='utf-8',
|
||||
errors='replace', max_form_memory_size=None,
|
||||
max_content_length=None, cls=None,
|
||||
silent=True):
|
||||
if stream_factory is None:
|
||||
stream_factory = default_stream_factory
|
||||
self.stream_factory = stream_factory
|
||||
self.charset = charset
|
||||
self.errors = errors
|
||||
self.max_form_memory_size = max_form_memory_size
|
||||
self.max_content_length = max_content_length
|
||||
if cls is None:
|
||||
cls = MultiDict
|
||||
self.cls = cls
|
||||
self.silent = silent
|
||||
|
||||
def get_parse_func(self, mimetype, options):
|
||||
return self.parse_functions.get(mimetype)
|
||||
|
||||
def parse_from_environ(self, environ):
|
||||
"""Parses the information from the environment as form data.
|
||||
|
||||
:param environ: the WSGI environment to be used for parsing.
|
||||
:return: A tuple in the form ``(stream, form, files)``.
|
||||
"""
|
||||
content_type = environ.get('CONTENT_TYPE', '')
|
||||
content_length = get_content_length(environ)
|
||||
mimetype, options = parse_options_header(content_type)
|
||||
return self.parse(get_input_stream(environ), mimetype,
|
||||
content_length, options)
|
||||
|
||||
def parse(self, stream, mimetype, content_length, options=None):
|
||||
"""Parses the information from the given stream, mimetype,
|
||||
content length and mimetype parameters.
|
||||
|
||||
:param stream: an input stream
|
||||
:param mimetype: the mimetype of the data
|
||||
:param content_length: the content length of the incoming data
|
||||
:param options: optional mimetype parameters (used for
|
||||
the multipart boundary for instance)
|
||||
:return: A tuple in the form ``(stream, form, files)``.
|
||||
"""
|
||||
if self.max_content_length is not None and \
|
||||
content_length is not None and \
|
||||
content_length > self.max_content_length:
|
||||
raise exceptions.RequestEntityTooLarge()
|
||||
if options is None:
|
||||
options = {}
|
||||
|
||||
parse_func = self.get_parse_func(mimetype, options)
|
||||
if parse_func is not None:
|
||||
try:
|
||||
return parse_func(self, stream, mimetype,
|
||||
content_length, options)
|
||||
except ValueError:
|
||||
if not self.silent:
|
||||
raise
|
||||
|
||||
return stream, self.cls(), self.cls()
|
||||
|
||||
@exhaust_stream
|
||||
def _parse_multipart(self, stream, mimetype, content_length, options):
|
||||
parser = MultiPartParser(self.stream_factory, self.charset, self.errors,
|
||||
max_form_memory_size=self.max_form_memory_size,
|
||||
cls=self.cls)
|
||||
boundary = options.get('boundary')
|
||||
if isinstance(boundary, text_type):
|
||||
boundary = boundary.encode('ascii')
|
||||
form, files = parser.parse(stream, boundary, content_length)
|
||||
return stream, form, files
|
||||
|
||||
@exhaust_stream
|
||||
def _parse_urlencoded(self, stream, mimetype, content_length, options):
|
||||
if self.max_form_memory_size is not None and \
|
||||
content_length is not None and \
|
||||
content_length > self.max_form_memory_size:
|
||||
raise exceptions.RequestEntityTooLarge()
|
||||
form = url_decode_stream(stream, self.charset,
|
||||
errors=self.errors, cls=self.cls)
|
||||
return stream, form, self.cls()
|
||||
|
||||
#: mapping of mimetypes to parsing functions
|
||||
parse_functions = {
|
||||
'multipart/form-data': _parse_multipart,
|
||||
'application/x-www-form-urlencoded': _parse_urlencoded,
|
||||
'application/x-url-encoded': _parse_urlencoded
|
||||
}
|
||||
|
||||
|
||||
def is_valid_multipart_boundary(boundary):
|
||||
"""Checks if the string given is a valid multipart boundary."""
|
||||
return _multipart_boundary_re.match(boundary) is not None
|
||||
|
||||
|
||||
def _line_parse(line):
|
||||
"""Removes line ending characters and returns a tuple (`stripped_line`,
|
||||
`is_terminated`).
|
||||
"""
|
||||
if line[-2:] in ['\r\n', b'\r\n']:
|
||||
return line[:-2], True
|
||||
elif line[-1:] in ['\r', '\n', b'\r', b'\n']:
|
||||
return line[:-1], True
|
||||
return line, False
|
||||
|
||||
|
||||
def parse_multipart_headers(iterable):
|
||||
"""Parses multipart headers from an iterable that yields lines (including
|
||||
the trailing newline symbol). The iterable has to be newline terminated.
|
||||
|
||||
The iterable will stop at the line where the headers ended so it can be
|
||||
further consumed.
|
||||
|
||||
:param iterable: iterable of strings that are newline terminated
|
||||
"""
|
||||
result = []
|
||||
for line in iterable:
|
||||
line = to_native(line)
|
||||
line, line_terminated = _line_parse(line)
|
||||
if not line_terminated:
|
||||
raise ValueError('unexpected end of line in multipart header')
|
||||
if not line:
|
||||
break
|
||||
elif line[0] in ' \t' and result:
|
||||
key, value = result[-1]
|
||||
result[-1] = (key, value + '\n ' + line[1:])
|
||||
else:
|
||||
parts = line.split(':', 1)
|
||||
if len(parts) == 2:
|
||||
result.append((parts[0].strip(), parts[1].strip()))
|
||||
|
||||
# we link the list to the headers, no need to create a copy, the
|
||||
# list was not shared anyways.
|
||||
return Headers(result)
|
||||
|
||||
|
||||
_begin_form = 'begin_form'
|
||||
_begin_file = 'begin_file'
|
||||
_cont = 'cont'
|
||||
_end = 'end'
|
||||
|
||||
|
||||
class MultiPartParser(object):
|
||||
|
||||
def __init__(self, stream_factory=None, charset='utf-8', errors='replace',
|
||||
max_form_memory_size=None, cls=None, buffer_size=64 * 1024):
|
||||
self.stream_factory = stream_factory
|
||||
self.charset = charset
|
||||
self.errors = errors
|
||||
self.max_form_memory_size = max_form_memory_size
|
||||
if stream_factory is None:
|
||||
stream_factory = default_stream_factory
|
||||
if cls is None:
|
||||
cls = MultiDict
|
||||
self.cls = cls
|
||||
|
||||
# make sure the buffer size is divisible by four so that we can base64
|
||||
# decode chunk by chunk
|
||||
assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4'
|
||||
# also the buffer size has to be at least 1024 bytes long or long headers
|
||||
# will freak out the system
|
||||
assert buffer_size >= 1024, 'buffer size has to be at least 1KB'
|
||||
|
||||
self.buffer_size = buffer_size
|
||||
|
||||
def _fix_ie_filename(self, filename):
|
||||
"""Internet Explorer 6 transmits the full file name if a file is
|
||||
uploaded. This function strips the full path if it thinks the
|
||||
filename is Windows-like absolute.
|
||||
"""
|
||||
if filename[1:3] == ':\\' or filename[:2] == '\\\\':
|
||||
return filename.split('\\')[-1]
|
||||
return filename
|
||||
|
||||
def _find_terminator(self, iterator):
|
||||
"""The terminator might have some additional newlines before it.
|
||||
There is at least one application that sends additional newlines
|
||||
before headers (the python setuptools package).
|
||||
"""
|
||||
for line in iterator:
|
||||
if not line:
|
||||
break
|
||||
line = line.strip()
|
||||
if line:
|
||||
return line
|
||||
return b''
|
||||
|
||||
def fail(self, message):
|
||||
raise ValueError(message)
|
||||
|
||||
def get_part_encoding(self, headers):
|
||||
transfer_encoding = headers.get('content-transfer-encoding')
|
||||
if transfer_encoding is not None and \
|
||||
transfer_encoding in _supported_multipart_encodings:
|
||||
return transfer_encoding
|
||||
|
||||
def get_part_charset(self, headers):
|
||||
# Figure out input charset for current part
|
||||
content_type = headers.get('content-type')
|
||||
if content_type:
|
||||
mimetype, ct_params = parse_options_header(content_type)
|
||||
return ct_params.get('charset', self.charset)
|
||||
return self.charset
|
||||
|
||||
def start_file_streaming(self, filename, headers, total_content_length):
|
||||
if isinstance(filename, bytes):
|
||||
filename = filename.decode(self.charset, self.errors)
|
||||
filename = self._fix_ie_filename(filename)
|
||||
content_type = headers.get('content-type')
|
||||
try:
|
||||
content_length = int(headers['content-length'])
|
||||
except (KeyError, ValueError):
|
||||
content_length = 0
|
||||
container = self.stream_factory(total_content_length, content_type,
|
||||
filename, content_length)
|
||||
return filename, container
|
||||
|
||||
def in_memory_threshold_reached(self, bytes):
|
||||
raise exceptions.RequestEntityTooLarge()
|
||||
|
||||
def validate_boundary(self, boundary):
|
||||
if not boundary:
|
||||
self.fail('Missing boundary')
|
||||
if not is_valid_multipart_boundary(boundary):
|
||||
self.fail('Invalid boundary: %s' % boundary)
|
||||
if len(boundary) > self.buffer_size: # pragma: no cover
|
||||
# this should never happen because we check for a minimum size
|
||||
# of 1024 and boundaries may not be longer than 200. The only
|
||||
# situation when this happens is for non debug builds where
|
||||
# the assert is skipped.
|
||||
self.fail('Boundary longer than buffer size')
|
||||
|
||||
def parse_lines(self, file, boundary, content_length):
|
||||
"""Generate parts of
|
||||
``('begin_form', (headers, name))``
|
||||
``('begin_file', (headers, name, filename))``
|
||||
``('cont', bytestring)``
|
||||
``('end', None)``
|
||||
|
||||
Always obeys the grammar
|
||||
parts = ( begin_form cont* end |
|
||||
begin_file cont* end )*
|
||||
"""
|
||||
next_part = b'--' + boundary
|
||||
last_part = next_part + b'--'
|
||||
|
||||
iterator = chain(make_line_iter(file, limit=content_length,
|
||||
buffer_size=self.buffer_size),
|
||||
_empty_string_iter)
|
||||
|
||||
terminator = self._find_terminator(iterator)
|
||||
|
||||
if terminator == last_part:
|
||||
return
|
||||
elif terminator != next_part:
|
||||
self.fail('Expected boundary at start of multipart data')
|
||||
|
||||
while terminator != last_part:
|
||||
headers = parse_multipart_headers(iterator)
|
||||
|
||||
disposition = headers.get('content-disposition')
|
||||
if disposition is None:
|
||||
self.fail('Missing Content-Disposition header')
|
||||
disposition, extra = parse_options_header(disposition)
|
||||
transfer_encoding = self.get_part_encoding(headers)
|
||||
name = extra.get('name')
|
||||
filename = extra.get('filename')
|
||||
|
||||
# if no content type is given we stream into memory. A list is
|
||||
# used as a temporary container.
|
||||
if filename is None:
|
||||
yield _begin_form, (headers, name)
|
||||
|
||||
# otherwise we parse the rest of the headers and ask the stream
|
||||
# factory for something we can write in.
|
||||
else:
|
||||
yield _begin_file, (headers, name, filename)
|
||||
|
||||
buf = b''
|
||||
for line in iterator:
|
||||
if not line:
|
||||
self.fail('unexpected end of stream')
|
||||
|
||||
if line[:2] == b'--':
|
||||
terminator = line.rstrip()
|
||||
if terminator in (next_part, last_part):
|
||||
break
|
||||
|
||||
if transfer_encoding is not None:
|
||||
if transfer_encoding == 'base64':
|
||||
transfer_encoding = 'base64_codec'
|
||||
try:
|
||||
line = codecs.decode(line, transfer_encoding)
|
||||
except Exception:
|
||||
self.fail('could not decode transfer encoded chunk')
|
||||
|
||||
# we have something in the buffer from the last iteration.
|
||||
# this is usually a newline delimiter.
|
||||
if buf:
|
||||
yield _cont, buf
|
||||
buf = b''
|
||||
|
||||
# If the line ends with windows CRLF we write everything except
|
||||
# the last two bytes. In all other cases however we write
|
||||
# everything except the last byte. If it was a newline, that's
|
||||
# fine, otherwise it does not matter because we will write it
|
||||
# the next iteration. this ensures we do not write the
|
||||
# final newline into the stream. That way we do not have to
|
||||
# truncate the stream. However we do have to make sure that
|
||||
# if something else than a newline is in there we write it
|
||||
# out.
|
||||
if line[-2:] == b'\r\n':
|
||||
buf = b'\r\n'
|
||||
cutoff = -2
|
||||
else:
|
||||
buf = line[-1:]
|
||||
cutoff = -1
|
||||
yield _cont, line[:cutoff]
|
||||
|
||||
else: # pragma: no cover
|
||||
raise ValueError('unexpected end of part')
|
||||
|
||||
# if we have a leftover in the buffer that is not a newline
|
||||
# character we have to flush it, otherwise we will chop of
|
||||
# certain values.
|
||||
if buf not in (b'', b'\r', b'\n', b'\r\n'):
|
||||
yield _cont, buf
|
||||
|
||||
yield _end, None
|
||||
|
||||
def parse_parts(self, file, boundary, content_length):
|
||||
"""Generate ``('file', (name, val))`` and
|
||||
``('form', (name, val))`` parts.
|
||||
"""
|
||||
in_memory = 0
|
||||
|
||||
for ellt, ell in self.parse_lines(file, boundary, content_length):
|
||||
if ellt == _begin_file:
|
||||
headers, name, filename = ell
|
||||
is_file = True
|
||||
guard_memory = False
|
||||
filename, container = self.start_file_streaming(
|
||||
filename, headers, content_length)
|
||||
_write = container.write
|
||||
|
||||
elif ellt == _begin_form:
|
||||
headers, name = ell
|
||||
is_file = False
|
||||
container = []
|
||||
_write = container.append
|
||||
guard_memory = self.max_form_memory_size is not None
|
||||
|
||||
elif ellt == _cont:
|
||||
_write(ell)
|
||||
# if we write into memory and there is a memory size limit we
|
||||
# count the number of bytes in memory and raise an exception if
|
||||
# there is too much data in memory.
|
||||
if guard_memory:
|
||||
in_memory += len(ell)
|
||||
if in_memory > self.max_form_memory_size:
|
||||
self.in_memory_threshold_reached(in_memory)
|
||||
|
||||
elif ellt == _end:
|
||||
if is_file:
|
||||
container.seek(0)
|
||||
yield ('file',
|
||||
(name, FileStorage(container, filename, name,
|
||||
headers=headers)))
|
||||
else:
|
||||
part_charset = self.get_part_charset(headers)
|
||||
yield ('form',
|
||||
(name, b''.join(container).decode(
|
||||
part_charset, self.errors)))
|
||||
|
||||
def parse(self, file, boundary, content_length):
|
||||
formstream, filestream = tee(
|
||||
self.parse_parts(file, boundary, content_length), 2)
|
||||
form = (p[1] for p in formstream if p[0] == 'form')
|
||||
files = (p[1] for p in filestream if p[0] == 'file')
|
||||
return self.cls(form), self.cls(files)
|
||||
|
||||
|
||||
from werkzeug import exceptions
|
||||
980
Linux_x86_64/lib/python2.7/site-packages/werkzeug/http.py
Normal file
|
|
@ -0,0 +1,980 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.http
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
|
||||
HTTP data. Most of the classes and functions provided by this module are
|
||||
used by the wrappers, but they are useful on their own, too, especially if
|
||||
the response and request objects are not used.
|
||||
|
||||
This covers some of the more HTTP centric features of WSGI, some other
|
||||
utilities such as cookie handling are documented in the `werkzeug.utils`
|
||||
module.
|
||||
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
from time import time, gmtime
|
||||
try:
|
||||
from email.utils import parsedate_tz
|
||||
except ImportError: # pragma: no cover
|
||||
from email.Utils import parsedate_tz
|
||||
try:
|
||||
from urllib2 import parse_http_list as _parse_list_header
|
||||
except ImportError: # pragma: no cover
|
||||
from urllib.request import parse_http_list as _parse_list_header
|
||||
from datetime import datetime, timedelta
|
||||
from hashlib import md5
|
||||
import base64
|
||||
|
||||
from werkzeug._internal import _cookie_quote, _make_cookie_domain, \
|
||||
_cookie_parse_impl
|
||||
from werkzeug._compat import to_unicode, iteritems, text_type, \
|
||||
string_types, try_coerce_native, to_bytes, PY2, \
|
||||
integer_types
|
||||
|
||||
|
||||
# incorrect
|
||||
_cookie_charset = 'latin1'
|
||||
_accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?')
|
||||
_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
'^_`abcdefghijklmnopqrstuvwxyz|~')
|
||||
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
|
||||
_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t')
|
||||
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
|
||||
_option_header_piece_re = re.compile(r';\s*(%s|[^\s;=]+)\s*(?:=\s*(%s|[^;]+))?\s*' %
|
||||
(_quoted_string_re, _quoted_string_re))
|
||||
|
||||
_entity_headers = frozenset([
|
||||
'allow', 'content-encoding', 'content-language', 'content-length',
|
||||
'content-location', 'content-md5', 'content-range', 'content-type',
|
||||
'expires', 'last-modified'
|
||||
])
|
||||
_hop_by_hop_headers = frozenset([
|
||||
'connection', 'keep-alive', 'proxy-authenticate',
|
||||
'proxy-authorization', 'te', 'trailer', 'transfer-encoding',
|
||||
'upgrade'
|
||||
])
|
||||
|
||||
|
||||
HTTP_STATUS_CODES = {
|
||||
100: 'Continue',
|
||||
101: 'Switching Protocols',
|
||||
102: 'Processing',
|
||||
200: 'OK',
|
||||
201: 'Created',
|
||||
202: 'Accepted',
|
||||
203: 'Non Authoritative Information',
|
||||
204: 'No Content',
|
||||
205: 'Reset Content',
|
||||
206: 'Partial Content',
|
||||
207: 'Multi Status',
|
||||
226: 'IM Used', # see RFC 3229
|
||||
300: 'Multiple Choices',
|
||||
301: 'Moved Permanently',
|
||||
302: 'Found',
|
||||
303: 'See Other',
|
||||
304: 'Not Modified',
|
||||
305: 'Use Proxy',
|
||||
307: 'Temporary Redirect',
|
||||
400: 'Bad Request',
|
||||
401: 'Unauthorized',
|
||||
402: 'Payment Required', # unused
|
||||
403: 'Forbidden',
|
||||
404: 'Not Found',
|
||||
405: 'Method Not Allowed',
|
||||
406: 'Not Acceptable',
|
||||
407: 'Proxy Authentication Required',
|
||||
408: 'Request Timeout',
|
||||
409: 'Conflict',
|
||||
410: 'Gone',
|
||||
411: 'Length Required',
|
||||
412: 'Precondition Failed',
|
||||
413: 'Request Entity Too Large',
|
||||
414: 'Request URI Too Long',
|
||||
415: 'Unsupported Media Type',
|
||||
416: 'Requested Range Not Satisfiable',
|
||||
417: 'Expectation Failed',
|
||||
418: 'I\'m a teapot', # see RFC 2324
|
||||
422: 'Unprocessable Entity',
|
||||
423: 'Locked',
|
||||
424: 'Failed Dependency',
|
||||
426: 'Upgrade Required',
|
||||
428: 'Precondition Required', # see RFC 6585
|
||||
429: 'Too Many Requests',
|
||||
431: 'Request Header Fields Too Large',
|
||||
449: 'Retry With', # proprietary MS extension
|
||||
500: 'Internal Server Error',
|
||||
501: 'Not Implemented',
|
||||
502: 'Bad Gateway',
|
||||
503: 'Service Unavailable',
|
||||
504: 'Gateway Timeout',
|
||||
505: 'HTTP Version Not Supported',
|
||||
507: 'Insufficient Storage',
|
||||
510: 'Not Extended'
|
||||
}
|
||||
|
||||
|
||||
def wsgi_to_bytes(data):
|
||||
"""coerce wsgi unicode represented bytes to real ones
|
||||
|
||||
"""
|
||||
if isinstance(data, bytes):
|
||||
return data
|
||||
return data.encode('latin1') #XXX: utf8 fallback?
|
||||
|
||||
|
||||
def bytes_to_wsgi(data):
|
||||
assert isinstance(data, bytes), 'data must be bytes'
|
||||
if isinstance(data, str):
|
||||
return data
|
||||
else:
|
||||
return data.decode('latin1')
|
||||
|
||||
|
||||
def quote_header_value(value, extra_chars='', allow_token=True):
|
||||
"""Quote a header value if necessary.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:param value: the value to quote.
|
||||
:param extra_chars: a list of extra characters to skip quoting.
|
||||
:param allow_token: if this is enabled token values are returned
|
||||
unchanged.
|
||||
"""
|
||||
if isinstance(value, bytes):
|
||||
value = bytes_to_wsgi(value)
|
||||
value = str(value)
|
||||
if allow_token:
|
||||
token_chars = _token_chars | set(extra_chars)
|
||||
if set(value).issubset(token_chars):
|
||||
return value
|
||||
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
|
||||
|
||||
|
||||
def unquote_header_value(value, is_filename=False):
|
||||
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
|
||||
This does not use the real unquoting but what browsers are actually
|
||||
using for quoting.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:param value: the header value to unquote.
|
||||
"""
|
||||
if value and value[0] == value[-1] == '"':
|
||||
# this is not the real unquoting, but fixing this so that the
|
||||
# RFC is met will result in bugs with internet explorer and
|
||||
# probably some other browsers as well. IE for example is
|
||||
# uploading files with "C:\foo\bar.txt" as filename
|
||||
value = value[1:-1]
|
||||
|
||||
# if this is a filename and the starting characters look like
|
||||
# a UNC path, then just return the value without quotes. Using the
|
||||
# replace sequence below on a UNC path has the effect of turning
|
||||
# the leading double slash into a single slash and then
|
||||
# _fix_ie_filename() doesn't work correctly. See #458.
|
||||
if not is_filename or value[:2] != '\\\\':
|
||||
return value.replace('\\\\', '\\').replace('\\"', '"')
|
||||
return value
|
||||
|
||||
|
||||
def dump_options_header(header, options):
|
||||
"""The reverse function to :func:`parse_options_header`.
|
||||
|
||||
:param header: the header to dump
|
||||
:param options: a dict of options to append.
|
||||
"""
|
||||
segments = []
|
||||
if header is not None:
|
||||
segments.append(header)
|
||||
for key, value in iteritems(options):
|
||||
if value is None:
|
||||
segments.append(key)
|
||||
else:
|
||||
segments.append('%s=%s' % (key, quote_header_value(value)))
|
||||
return '; '.join(segments)
|
||||
|
||||
|
||||
def dump_header(iterable, allow_token=True):
|
||||
"""Dump an HTTP header again. This is the reversal of
|
||||
:func:`parse_list_header`, :func:`parse_set_header` and
|
||||
:func:`parse_dict_header`. This also quotes strings that include an
|
||||
equals sign unless you pass it as dict of key, value pairs.
|
||||
|
||||
>>> dump_header({'foo': 'bar baz'})
|
||||
'foo="bar baz"'
|
||||
>>> dump_header(('foo', 'bar baz'))
|
||||
'foo, "bar baz"'
|
||||
|
||||
:param iterable: the iterable or dict of values to quote.
|
||||
:param allow_token: if set to `False` tokens as values are disallowed.
|
||||
See :func:`quote_header_value` for more details.
|
||||
"""
|
||||
if isinstance(iterable, dict):
|
||||
items = []
|
||||
for key, value in iteritems(iterable):
|
||||
if value is None:
|
||||
items.append(key)
|
||||
else:
|
||||
items.append('%s=%s' % (
|
||||
key,
|
||||
quote_header_value(value, allow_token=allow_token)
|
||||
))
|
||||
else:
|
||||
items = [quote_header_value(x, allow_token=allow_token)
|
||||
for x in iterable]
|
||||
return ', '.join(items)
|
||||
|
||||
|
||||
def parse_list_header(value):
|
||||
"""Parse lists as described by RFC 2068 Section 2.
|
||||
|
||||
In particular, parse comma-separated lists where the elements of
|
||||
the list may include quoted-strings. A quoted-string could
|
||||
contain a comma. A non-quoted string could have quotes in the
|
||||
middle. Quotes are removed automatically after parsing.
|
||||
|
||||
It basically works like :func:`parse_set_header` just that items
|
||||
may appear multiple times and case sensitivity is preserved.
|
||||
|
||||
The return value is a standard :class:`list`:
|
||||
|
||||
>>> parse_list_header('token, "quoted value"')
|
||||
['token', 'quoted value']
|
||||
|
||||
To create a header from the :class:`list` again, use the
|
||||
:func:`dump_header` function.
|
||||
|
||||
:param value: a string with a list header.
|
||||
:return: :class:`list`
|
||||
"""
|
||||
result = []
|
||||
for item in _parse_list_header(value):
|
||||
if item[:1] == item[-1:] == '"':
|
||||
item = unquote_header_value(item[1:-1])
|
||||
result.append(item)
|
||||
return result
|
||||
|
||||
|
||||
def parse_dict_header(value, cls=dict):
|
||||
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
|
||||
convert them into a python dict (or any other mapping object created from
|
||||
the type with a dict like interface provided by the `cls` arugment):
|
||||
|
||||
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
|
||||
>>> type(d) is dict
|
||||
True
|
||||
>>> sorted(d.items())
|
||||
[('bar', 'as well'), ('foo', 'is a fish')]
|
||||
|
||||
If there is no value for a key it will be `None`:
|
||||
|
||||
>>> parse_dict_header('key_without_value')
|
||||
{'key_without_value': None}
|
||||
|
||||
To create a header from the :class:`dict` again, use the
|
||||
:func:`dump_header` function.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
Added support for `cls` argument.
|
||||
|
||||
:param value: a string with a dict header.
|
||||
:param cls: callable to use for storage of parsed results.
|
||||
:return: an instance of `cls`
|
||||
"""
|
||||
result = cls()
|
||||
if not isinstance(value, text_type):
|
||||
#XXX: validate
|
||||
value = bytes_to_wsgi(value)
|
||||
for item in _parse_list_header(value):
|
||||
if '=' not in item:
|
||||
result[item] = None
|
||||
continue
|
||||
name, value = item.split('=', 1)
|
||||
if value[:1] == value[-1:] == '"':
|
||||
value = unquote_header_value(value[1:-1])
|
||||
result[name] = value
|
||||
return result
|
||||
|
||||
|
||||
def parse_options_header(value):
|
||||
"""Parse a ``Content-Type`` like header into a tuple with the content
|
||||
type and the options:
|
||||
|
||||
>>> parse_options_header('text/html; charset=utf8')
|
||||
('text/html', {'charset': 'utf8'})
|
||||
|
||||
This should not be used to parse ``Cache-Control`` like headers that use
|
||||
a slightly different format. For these headers use the
|
||||
:func:`parse_dict_header` function.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:param value: the header to parse.
|
||||
:return: (str, options)
|
||||
"""
|
||||
def _tokenize(string):
|
||||
for match in _option_header_piece_re.finditer(string):
|
||||
key, value = match.groups()
|
||||
key = unquote_header_value(key)
|
||||
if value is not None:
|
||||
value = unquote_header_value(value, key == 'filename')
|
||||
yield key, value
|
||||
|
||||
if not value:
|
||||
return '', {}
|
||||
|
||||
parts = _tokenize(';' + value)
|
||||
name = next(parts)[0]
|
||||
extra = dict(parts)
|
||||
return name, extra
|
||||
|
||||
|
||||
def parse_accept_header(value, cls=None):
|
||||
"""Parses an HTTP Accept-* header. This does not implement a complete
|
||||
valid algorithm but one that supports at least value and quality
|
||||
extraction.
|
||||
|
||||
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
|
||||
tuples sorted by the quality with some additional accessor methods).
|
||||
|
||||
The second parameter can be a subclass of :class:`Accept` that is created
|
||||
with the parsed values and returned.
|
||||
|
||||
:param value: the accept header string to be parsed.
|
||||
:param cls: the wrapper class for the return value (can be
|
||||
:class:`Accept` or a subclass thereof)
|
||||
:return: an instance of `cls`.
|
||||
"""
|
||||
if cls is None:
|
||||
cls = Accept
|
||||
|
||||
if not value:
|
||||
return cls(None)
|
||||
|
||||
result = []
|
||||
for match in _accept_re.finditer(value):
|
||||
quality = match.group(2)
|
||||
if not quality:
|
||||
quality = 1
|
||||
else:
|
||||
quality = max(min(float(quality), 1), 0)
|
||||
result.append((match.group(1), quality))
|
||||
return cls(result)
|
||||
|
||||
|
||||
def parse_cache_control_header(value, on_update=None, cls=None):
|
||||
"""Parse a cache control header. The RFC differs between response and
|
||||
request cache control, this method does not. It's your responsibility
|
||||
to not use the wrong control statements.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
The `cls` was added. If not specified an immutable
|
||||
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
|
||||
|
||||
:param value: a cache control header to be parsed.
|
||||
:param on_update: an optional callable that is called every time a value
|
||||
on the :class:`~werkzeug.datastructures.CacheControl`
|
||||
object is changed.
|
||||
:param cls: the class for the returned object. By default
|
||||
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
|
||||
:return: a `cls` object.
|
||||
"""
|
||||
if cls is None:
|
||||
cls = RequestCacheControl
|
||||
if not value:
|
||||
return cls(None, on_update)
|
||||
return cls(parse_dict_header(value), on_update)
|
||||
|
||||
|
||||
def parse_set_header(value, on_update=None):
|
||||
"""Parse a set-like header and return a
|
||||
:class:`~werkzeug.datastructures.HeaderSet` object:
|
||||
|
||||
>>> hs = parse_set_header('token, "quoted value"')
|
||||
|
||||
The return value is an object that treats the items case-insensitively
|
||||
and keeps the order of the items:
|
||||
|
||||
>>> 'TOKEN' in hs
|
||||
True
|
||||
>>> hs.index('quoted value')
|
||||
1
|
||||
>>> hs
|
||||
HeaderSet(['token', 'quoted value'])
|
||||
|
||||
To create a header from the :class:`HeaderSet` again, use the
|
||||
:func:`dump_header` function.
|
||||
|
||||
:param value: a set header to be parsed.
|
||||
:param on_update: an optional callable that is called every time a
|
||||
value on the :class:`~werkzeug.datastructures.HeaderSet`
|
||||
object is changed.
|
||||
:return: a :class:`~werkzeug.datastructures.HeaderSet`
|
||||
"""
|
||||
if not value:
|
||||
return HeaderSet(None, on_update)
|
||||
return HeaderSet(parse_list_header(value), on_update)
|
||||
|
||||
|
||||
def parse_authorization_header(value):
|
||||
"""Parse an HTTP basic/digest authorization header transmitted by the web
|
||||
browser. The return value is either `None` if the header was invalid or
|
||||
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
|
||||
object.
|
||||
|
||||
:param value: the authorization header to parse.
|
||||
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
|
||||
"""
|
||||
if not value:
|
||||
return
|
||||
value = wsgi_to_bytes(value)
|
||||
try:
|
||||
auth_type, auth_info = value.split(None, 1)
|
||||
auth_type = auth_type.lower()
|
||||
except ValueError:
|
||||
return
|
||||
if auth_type == b'basic':
|
||||
try:
|
||||
username, password = base64.b64decode(auth_info).split(b':', 1)
|
||||
except Exception as e:
|
||||
return
|
||||
return Authorization('basic', {'username': bytes_to_wsgi(username),
|
||||
'password': bytes_to_wsgi(password)})
|
||||
elif auth_type == b'digest':
|
||||
auth_map = parse_dict_header(auth_info)
|
||||
for key in 'username', 'realm', 'nonce', 'uri', 'response':
|
||||
if not key in auth_map:
|
||||
return
|
||||
if 'qop' in auth_map:
|
||||
if not auth_map.get('nc') or not auth_map.get('cnonce'):
|
||||
return
|
||||
return Authorization('digest', auth_map)
|
||||
|
||||
|
||||
def parse_www_authenticate_header(value, on_update=None):
|
||||
"""Parse an HTTP WWW-Authenticate header into a
|
||||
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
|
||||
|
||||
:param value: a WWW-Authenticate header to parse.
|
||||
:param on_update: an optional callable that is called every time a value
|
||||
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
|
||||
object is changed.
|
||||
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
|
||||
"""
|
||||
if not value:
|
||||
return WWWAuthenticate(on_update=on_update)
|
||||
try:
|
||||
auth_type, auth_info = value.split(None, 1)
|
||||
auth_type = auth_type.lower()
|
||||
except (ValueError, AttributeError):
|
||||
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
|
||||
return WWWAuthenticate(auth_type, parse_dict_header(auth_info),
|
||||
on_update)
|
||||
|
||||
|
||||
def parse_if_range_header(value):
|
||||
"""Parses an if-range header which can be an etag or a date. Returns
|
||||
a :class:`~werkzeug.datastructures.IfRange` object.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
if not value:
|
||||
return IfRange()
|
||||
date = parse_date(value)
|
||||
if date is not None:
|
||||
return IfRange(date=date)
|
||||
# drop weakness information
|
||||
return IfRange(unquote_etag(value)[0])
|
||||
|
||||
|
||||
def parse_range_header(value, make_inclusive=True):
|
||||
"""Parses a range header into a :class:`~werkzeug.datastructures.Range`
|
||||
object. If the header is missing or malformed `None` is returned.
|
||||
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
|
||||
non-inclusive.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
if not value or '=' not in value:
|
||||
return None
|
||||
|
||||
ranges = []
|
||||
last_end = 0
|
||||
units, rng = value.split('=', 1)
|
||||
units = units.strip().lower()
|
||||
|
||||
for item in rng.split(','):
|
||||
item = item.strip()
|
||||
if '-' not in item:
|
||||
return None
|
||||
if item.startswith('-'):
|
||||
if last_end < 0:
|
||||
return None
|
||||
begin = int(item)
|
||||
end = None
|
||||
last_end = -1
|
||||
elif '-' in item:
|
||||
begin, end = item.split('-', 1)
|
||||
begin = int(begin)
|
||||
if begin < last_end or last_end < 0:
|
||||
return None
|
||||
if end:
|
||||
end = int(end) + 1
|
||||
if begin >= end:
|
||||
return None
|
||||
else:
|
||||
end = None
|
||||
last_end = end
|
||||
ranges.append((begin, end))
|
||||
|
||||
return Range(units, ranges)
|
||||
|
||||
|
||||
def parse_content_range_header(value, on_update=None):
|
||||
"""Parses a range header into a
|
||||
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
|
||||
parsing is not possible.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
|
||||
:param value: a content range header to be parsed.
|
||||
:param on_update: an optional callable that is called every time a value
|
||||
on the :class:`~werkzeug.datastructures.ContentRange`
|
||||
object is changed.
|
||||
"""
|
||||
if value is None:
|
||||
return None
|
||||
try:
|
||||
units, rangedef = (value or '').strip().split(None, 1)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
if '/' not in rangedef:
|
||||
return None
|
||||
rng, length = rangedef.split('/', 1)
|
||||
if length == '*':
|
||||
length = None
|
||||
elif length.isdigit():
|
||||
length = int(length)
|
||||
else:
|
||||
return None
|
||||
|
||||
if rng == '*':
|
||||
return ContentRange(units, None, None, length, on_update=on_update)
|
||||
elif '-' not in rng:
|
||||
return None
|
||||
|
||||
start, stop = rng.split('-', 1)
|
||||
try:
|
||||
start = int(start)
|
||||
stop = int(stop) + 1
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
if is_byte_range_valid(start, stop, length):
|
||||
return ContentRange(units, start, stop, length, on_update=on_update)
|
||||
|
||||
|
||||
def quote_etag(etag, weak=False):
|
||||
"""Quote an etag.
|
||||
|
||||
:param etag: the etag to quote.
|
||||
:param weak: set to `True` to tag it "weak".
|
||||
"""
|
||||
if '"' in etag:
|
||||
raise ValueError('invalid etag')
|
||||
etag = '"%s"' % etag
|
||||
if weak:
|
||||
etag = 'w/' + etag
|
||||
return etag
|
||||
|
||||
|
||||
def unquote_etag(etag):
|
||||
"""Unquote a single etag:
|
||||
|
||||
>>> unquote_etag('w/"bar"')
|
||||
('bar', True)
|
||||
>>> unquote_etag('"bar"')
|
||||
('bar', False)
|
||||
|
||||
:param etag: the etag identifier to unquote.
|
||||
:return: a ``(etag, weak)`` tuple.
|
||||
"""
|
||||
if not etag:
|
||||
return None, None
|
||||
etag = etag.strip()
|
||||
weak = False
|
||||
if etag[:2] in ('w/', 'W/'):
|
||||
weak = True
|
||||
etag = etag[2:]
|
||||
if etag[:1] == etag[-1:] == '"':
|
||||
etag = etag[1:-1]
|
||||
return etag, weak
|
||||
|
||||
|
||||
def parse_etags(value):
|
||||
"""Parse an etag header.
|
||||
|
||||
:param value: the tag header to parse
|
||||
:return: an :class:`~werkzeug.datastructures.ETags` object.
|
||||
"""
|
||||
if not value:
|
||||
return ETags()
|
||||
strong = []
|
||||
weak = []
|
||||
end = len(value)
|
||||
pos = 0
|
||||
while pos < end:
|
||||
match = _etag_re.match(value, pos)
|
||||
if match is None:
|
||||
break
|
||||
is_weak, quoted, raw = match.groups()
|
||||
if raw == '*':
|
||||
return ETags(star_tag=True)
|
||||
elif quoted:
|
||||
raw = quoted
|
||||
if is_weak:
|
||||
weak.append(raw)
|
||||
else:
|
||||
strong.append(raw)
|
||||
pos = match.end()
|
||||
return ETags(strong, weak)
|
||||
|
||||
|
||||
def generate_etag(data):
|
||||
"""Generate an etag for some data."""
|
||||
return md5(data).hexdigest()
|
||||
|
||||
|
||||
def parse_date(value):
|
||||
"""Parse one of the following date formats into a datetime object:
|
||||
|
||||
.. sourcecode:: text
|
||||
|
||||
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
|
||||
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
|
||||
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
|
||||
|
||||
If parsing fails the return value is `None`.
|
||||
|
||||
:param value: a string with a supported date format.
|
||||
:return: a :class:`datetime.datetime` object.
|
||||
"""
|
||||
if value:
|
||||
t = parsedate_tz(value.strip())
|
||||
if t is not None:
|
||||
try:
|
||||
year = t[0]
|
||||
# unfortunately that function does not tell us if two digit
|
||||
# years were part of the string, or if they were prefixed
|
||||
# with two zeroes. So what we do is to assume that 69-99
|
||||
# refer to 1900, and everything below to 2000
|
||||
if year >= 0 and year <= 68:
|
||||
year += 2000
|
||||
elif year >= 69 and year <= 99:
|
||||
year += 1900
|
||||
return datetime(*((year,) + t[1:7])) - \
|
||||
timedelta(seconds=t[-1] or 0)
|
||||
except (ValueError, OverflowError):
|
||||
return None
|
||||
|
||||
|
||||
def _dump_date(d, delim):
|
||||
"""Used for `http_date` and `cookie_date`."""
|
||||
if d is None:
|
||||
d = gmtime()
|
||||
elif isinstance(d, datetime):
|
||||
d = d.utctimetuple()
|
||||
elif isinstance(d, (integer_types, float)):
|
||||
d = gmtime(d)
|
||||
return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (
|
||||
('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],
|
||||
d.tm_mday, delim,
|
||||
('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
|
||||
'Oct', 'Nov', 'Dec')[d.tm_mon - 1],
|
||||
delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec
|
||||
)
|
||||
|
||||
|
||||
def cookie_date(expires=None):
|
||||
"""Formats the time to ensure compatibility with Netscape's cookie
|
||||
standard.
|
||||
|
||||
Accepts a floating point number expressed in seconds since the epoch in, a
|
||||
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
|
||||
function can be used to parse such a date.
|
||||
|
||||
Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
|
||||
|
||||
:param expires: If provided that date is used, otherwise the current.
|
||||
"""
|
||||
return _dump_date(expires, '-')
|
||||
|
||||
|
||||
def http_date(timestamp=None):
|
||||
"""Formats the time to match the RFC1123 date format.
|
||||
|
||||
Accepts a floating point number expressed in seconds since the epoch in, a
|
||||
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
|
||||
function can be used to parse such a date.
|
||||
|
||||
Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
|
||||
|
||||
:param timestamp: If provided that date is used, otherwise the current.
|
||||
"""
|
||||
return _dump_date(timestamp, ' ')
|
||||
|
||||
|
||||
def is_resource_modified(environ, etag=None, data=None, last_modified=None):
|
||||
"""Convenience method for conditional requests.
|
||||
|
||||
:param environ: the WSGI environment of the request to be checked.
|
||||
:param etag: the etag for the response for comparison.
|
||||
:param data: or alternatively the data of the response to automatically
|
||||
generate an etag using :func:`generate_etag`.
|
||||
:param last_modified: an optional date of the last modification.
|
||||
:return: `True` if the resource was modified, otherwise `False`.
|
||||
"""
|
||||
if etag is None and data is not None:
|
||||
etag = generate_etag(data)
|
||||
elif data is not None:
|
||||
raise TypeError('both data and etag given')
|
||||
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
|
||||
return False
|
||||
|
||||
unmodified = False
|
||||
if isinstance(last_modified, string_types):
|
||||
last_modified = parse_date(last_modified)
|
||||
|
||||
# ensure that microsecond is zero because the HTTP spec does not transmit
|
||||
# that either and we might have some false positives. See issue #39
|
||||
if last_modified is not None:
|
||||
last_modified = last_modified.replace(microsecond=0)
|
||||
|
||||
modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))
|
||||
|
||||
if modified_since and last_modified and last_modified <= modified_since:
|
||||
unmodified = True
|
||||
if etag:
|
||||
if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))
|
||||
if if_none_match:
|
||||
unmodified = if_none_match.contains_raw(etag)
|
||||
|
||||
return not unmodified
|
||||
|
||||
|
||||
def remove_entity_headers(headers, allowed=('expires', 'content-location')):
|
||||
"""Remove all entity headers from a list or :class:`Headers` object. This
|
||||
operation works in-place. `Expires` and `Content-Location` headers are
|
||||
by default not removed. The reason for this is :rfc:`2616` section
|
||||
10.3.5 which specifies some entity headers that should be sent.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
added `allowed` parameter.
|
||||
|
||||
:param headers: a list or :class:`Headers` object.
|
||||
:param allowed: a list of headers that should still be allowed even though
|
||||
they are entity headers.
|
||||
"""
|
||||
allowed = set(x.lower() for x in allowed)
|
||||
headers[:] = [(key, value) for key, value in headers if
|
||||
not is_entity_header(key) or key.lower() in allowed]
|
||||
|
||||
|
||||
def remove_hop_by_hop_headers(headers):
|
||||
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
|
||||
:class:`Headers` object. This operation works in-place.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:param headers: a list or :class:`Headers` object.
|
||||
"""
|
||||
headers[:] = [(key, value) for key, value in headers if
|
||||
not is_hop_by_hop_header(key)]
|
||||
|
||||
|
||||
def is_entity_header(header):
|
||||
"""Check if a header is an entity header.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:param header: the header to test.
|
||||
:return: `True` if it's an entity header, `False` otherwise.
|
||||
"""
|
||||
return header.lower() in _entity_headers
|
||||
|
||||
|
||||
def is_hop_by_hop_header(header):
|
||||
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:param header: the header to test.
|
||||
:return: `True` if it's an entity header, `False` otherwise.
|
||||
"""
|
||||
return header.lower() in _hop_by_hop_headers
|
||||
|
||||
|
||||
def parse_cookie(header, charset='utf-8', errors='replace', cls=None):
|
||||
"""Parse a cookie. Either from a string or WSGI environ.
|
||||
|
||||
Per default encoding errors are ignored. If you want a different behavior
|
||||
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
|
||||
:exc:`HTTPUnicodeError` is raised.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
This function now returns a :class:`TypeConversionDict` instead of a
|
||||
regular dict. The `cls` parameter was added.
|
||||
|
||||
:param header: the header to be used to parse the cookie. Alternatively
|
||||
this can be a WSGI environment.
|
||||
:param charset: the charset for the cookie values.
|
||||
:param errors: the error behavior for the charset decoding.
|
||||
:param cls: an optional dict class to use. If this is not specified
|
||||
or `None` the default :class:`TypeConversionDict` is
|
||||
used.
|
||||
"""
|
||||
if isinstance(header, dict):
|
||||
header = header.get('HTTP_COOKIE', '')
|
||||
elif header is None:
|
||||
header = ''
|
||||
|
||||
# If the value is an unicode string it's mangled through latin1. This
|
||||
# is done because on PEP 3333 on Python 3 all headers are assumed latin1
|
||||
# which however is incorrect for cookies, which are sent in page encoding.
|
||||
# As a result we
|
||||
if isinstance(header, text_type):
|
||||
header = header.encode('latin1', 'replace')
|
||||
|
||||
if cls is None:
|
||||
cls = TypeConversionDict
|
||||
|
||||
def _parse_pairs():
|
||||
for key, val in _cookie_parse_impl(header):
|
||||
key = to_unicode(key, charset, errors, allow_none_charset=True)
|
||||
val = to_unicode(val, charset, errors, allow_none_charset=True)
|
||||
yield try_coerce_native(key), val
|
||||
|
||||
return cls(_parse_pairs())
|
||||
|
||||
|
||||
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
|
||||
domain=None, secure=False, httponly=False,
|
||||
charset='utf-8', sync_expires=True):
|
||||
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
|
||||
The parameters are the same as in the cookie Morsel object in the
|
||||
Python standard library but it accepts unicode data, too.
|
||||
|
||||
On Python 3 the return value of this function will be a unicode
|
||||
string, on Python 2 it will be a native string. In both cases the
|
||||
return value is usually restricted to ascii as the vast majority of
|
||||
values are properly escaped, but that is no guarantee. If a unicode
|
||||
string is returned it's tunneled through latin1 as required by
|
||||
PEP 3333.
|
||||
|
||||
The return value is not ASCII safe if the key contains unicode
|
||||
characters. This is technically against the specification but
|
||||
happens in the wild. It's strongly recommended to not use
|
||||
non-ASCII values for the keys.
|
||||
|
||||
:param max_age: should be a number of seconds, or `None` (default) if
|
||||
the cookie should last only as long as the client's
|
||||
browser session. Additionally `timedelta` objects
|
||||
are accepted, too.
|
||||
:param expires: should be a `datetime` object or unix timestamp.
|
||||
:param path: limits the cookie to a given path, per default it will
|
||||
span the whole domain.
|
||||
:param domain: Use this if you want to set a cross-domain cookie. For
|
||||
example, ``domain=".example.com"`` will set a cookie
|
||||
that is readable by the domain ``www.example.com``,
|
||||
``foo.example.com`` etc. Otherwise, a cookie will only
|
||||
be readable by the domain that set it.
|
||||
:param secure: The cookie will only be available via HTTPS
|
||||
:param httponly: disallow JavaScript to access the cookie. This is an
|
||||
extension to the cookie standard and probably not
|
||||
supported by all browsers.
|
||||
:param charset: the encoding for unicode values.
|
||||
:param sync_expires: automatically set expires if max_age is defined
|
||||
but expires not.
|
||||
"""
|
||||
key = to_bytes(key, charset)
|
||||
value = to_bytes(value, charset)
|
||||
|
||||
if path is not None:
|
||||
path = iri_to_uri(path, charset)
|
||||
domain = _make_cookie_domain(domain)
|
||||
if isinstance(max_age, timedelta):
|
||||
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
|
||||
if expires is not None:
|
||||
if not isinstance(expires, string_types):
|
||||
expires = cookie_date(expires)
|
||||
elif max_age is not None and sync_expires:
|
||||
expires = to_bytes(cookie_date(time() + max_age))
|
||||
|
||||
buf = [key + b'=' + _cookie_quote(value)]
|
||||
|
||||
# XXX: In theory all of these parameters that are not marked with `None`
|
||||
# should be quoted. Because stdlib did not quote it before I did not
|
||||
# want to introduce quoting there now.
|
||||
for k, v, q in ((b'Domain', domain, True),
|
||||
(b'Expires', expires, False,),
|
||||
(b'Max-Age', max_age, False),
|
||||
(b'Secure', secure, None),
|
||||
(b'HttpOnly', httponly, None),
|
||||
(b'Path', path, False)):
|
||||
if q is None:
|
||||
if v:
|
||||
buf.append(k)
|
||||
continue
|
||||
|
||||
if v is None:
|
||||
continue
|
||||
|
||||
tmp = bytearray(k)
|
||||
if not isinstance(v, (bytes, bytearray)):
|
||||
v = to_bytes(text_type(v), charset)
|
||||
if q:
|
||||
v = _cookie_quote(v)
|
||||
tmp += b'=' + v
|
||||
buf.append(bytes(tmp))
|
||||
|
||||
# The return value will be an incorrectly encoded latin1 header on
|
||||
# Python 3 for consistency with the headers object and a bytestring
|
||||
# on Python 2 because that's how the API makes more sense.
|
||||
rv = b'; '.join(buf)
|
||||
if not PY2:
|
||||
rv = rv.decode('latin1')
|
||||
return rv
|
||||
|
||||
|
||||
def is_byte_range_valid(start, stop, length):
|
||||
"""Checks if a given byte content range is valid for the given length.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
if (start is None) != (stop is None):
|
||||
return False
|
||||
elif start is None:
|
||||
return length is None or length >= 0
|
||||
elif length is None:
|
||||
return 0 <= start < stop
|
||||
elif start >= stop:
|
||||
return False
|
||||
return 0 <= start < length
|
||||
|
||||
|
||||
# circular dependency fun
|
||||
from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \
|
||||
WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \
|
||||
RequestCacheControl
|
||||
|
||||
|
||||
# DEPRECATED
|
||||
# backwards compatible imports
|
||||
from werkzeug.datastructures import MIMEAccept, CharsetAccept, \
|
||||
LanguageAccept, Headers
|
||||
from werkzeug.urls import iri_to_uri
|
||||
409
Linux_x86_64/lib/python2.7/site-packages/werkzeug/local.py
Normal file
|
|
@ -0,0 +1,409 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.local
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
This module implements context-local objects.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from functools import update_wrapper
|
||||
from werkzeug.wsgi import ClosingIterator
|
||||
from werkzeug._compat import PY2, implements_bool
|
||||
|
||||
# since each thread has its own greenlet we can just use those as identifiers
|
||||
# for the context. If greenlets are not available we fall back to the
|
||||
# current thread ident depending on where it is.
|
||||
try:
|
||||
from greenlet import getcurrent as get_ident
|
||||
except ImportError:
|
||||
try:
|
||||
from thread import get_ident
|
||||
except ImportError:
|
||||
from _thread import get_ident
|
||||
|
||||
|
||||
def release_local(local):
|
||||
"""Releases the contents of the local for the current context.
|
||||
This makes it possible to use locals without a manager.
|
||||
|
||||
Example::
|
||||
|
||||
>>> loc = Local()
|
||||
>>> loc.foo = 42
|
||||
>>> release_local(loc)
|
||||
>>> hasattr(loc, 'foo')
|
||||
False
|
||||
|
||||
With this function one can release :class:`Local` objects as well
|
||||
as :class:`LocalStack` objects. However it is not possible to
|
||||
release data held by proxies that way, one always has to retain
|
||||
a reference to the underlying local object in order to be able
|
||||
to release it.
|
||||
|
||||
.. versionadded:: 0.6.1
|
||||
"""
|
||||
local.__release_local__()
|
||||
|
||||
|
||||
class Local(object):
|
||||
__slots__ = ('__storage__', '__ident_func__')
|
||||
|
||||
def __init__(self):
|
||||
object.__setattr__(self, '__storage__', {})
|
||||
object.__setattr__(self, '__ident_func__', get_ident)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.__storage__.items())
|
||||
|
||||
def __call__(self, proxy):
|
||||
"""Create a proxy for a name."""
|
||||
return LocalProxy(self, proxy)
|
||||
|
||||
def __release_local__(self):
|
||||
self.__storage__.pop(self.__ident_func__(), None)
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return self.__storage__[self.__ident_func__()][name]
|
||||
except KeyError:
|
||||
raise AttributeError(name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
ident = self.__ident_func__()
|
||||
storage = self.__storage__
|
||||
try:
|
||||
storage[ident][name] = value
|
||||
except KeyError:
|
||||
storage[ident] = {name: value}
|
||||
|
||||
def __delattr__(self, name):
|
||||
try:
|
||||
del self.__storage__[self.__ident_func__()][name]
|
||||
except KeyError:
|
||||
raise AttributeError(name)
|
||||
|
||||
|
||||
class LocalStack(object):
|
||||
"""This class works similar to a :class:`Local` but keeps a stack
|
||||
of objects instead. This is best explained with an example::
|
||||
|
||||
>>> ls = LocalStack()
|
||||
>>> ls.push(42)
|
||||
>>> ls.top
|
||||
42
|
||||
>>> ls.push(23)
|
||||
>>> ls.top
|
||||
23
|
||||
>>> ls.pop()
|
||||
23
|
||||
>>> ls.top
|
||||
42
|
||||
|
||||
They can be force released by using a :class:`LocalManager` or with
|
||||
the :func:`release_local` function but the correct way is to pop the
|
||||
item from the stack after using. When the stack is empty it will
|
||||
no longer be bound to the current context (and as such released).
|
||||
|
||||
By calling the stack without arguments it returns a proxy that resolves to
|
||||
the topmost item on the stack.
|
||||
|
||||
.. versionadded:: 0.6.1
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._local = Local()
|
||||
|
||||
def __release_local__(self):
|
||||
self._local.__release_local__()
|
||||
|
||||
def _get__ident_func__(self):
|
||||
return self._local.__ident_func__
|
||||
def _set__ident_func__(self, value):
|
||||
object.__setattr__(self._local, '__ident_func__', value)
|
||||
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
|
||||
del _get__ident_func__, _set__ident_func__
|
||||
|
||||
def __call__(self):
|
||||
def _lookup():
|
||||
rv = self.top
|
||||
if rv is None:
|
||||
raise RuntimeError('object unbound')
|
||||
return rv
|
||||
return LocalProxy(_lookup)
|
||||
|
||||
def push(self, obj):
|
||||
"""Pushes a new item to the stack"""
|
||||
rv = getattr(self._local, 'stack', None)
|
||||
if rv is None:
|
||||
self._local.stack = rv = []
|
||||
rv.append(obj)
|
||||
return rv
|
||||
|
||||
def pop(self):
|
||||
"""Removes the topmost item from the stack, will return the
|
||||
old value or `None` if the stack was already empty.
|
||||
"""
|
||||
stack = getattr(self._local, 'stack', None)
|
||||
if stack is None:
|
||||
return None
|
||||
elif len(stack) == 1:
|
||||
release_local(self._local)
|
||||
return stack[-1]
|
||||
else:
|
||||
return stack.pop()
|
||||
|
||||
@property
|
||||
def top(self):
|
||||
"""The topmost item on the stack. If the stack is empty,
|
||||
`None` is returned.
|
||||
"""
|
||||
try:
|
||||
return self._local.stack[-1]
|
||||
except (AttributeError, IndexError):
|
||||
return None
|
||||
|
||||
|
||||
class LocalManager(object):
|
||||
"""Local objects cannot manage themselves. For that you need a local
|
||||
manager. You can pass a local manager multiple locals or add them later
|
||||
by appending them to `manager.locals`. Everytime the manager cleans up
|
||||
it, will clean up all the data left in the locals for this context.
|
||||
|
||||
The `ident_func` parameter can be added to override the default ident
|
||||
function for the wrapped locals.
|
||||
|
||||
.. versionchanged:: 0.6.1
|
||||
Instead of a manager the :func:`release_local` function can be used
|
||||
as well.
|
||||
|
||||
.. versionchanged:: 0.7
|
||||
`ident_func` was added.
|
||||
"""
|
||||
|
||||
def __init__(self, locals=None, ident_func=None):
|
||||
if locals is None:
|
||||
self.locals = []
|
||||
elif isinstance(locals, Local):
|
||||
self.locals = [locals]
|
||||
else:
|
||||
self.locals = list(locals)
|
||||
if ident_func is not None:
|
||||
self.ident_func = ident_func
|
||||
for local in self.locals:
|
||||
object.__setattr__(local, '__ident_func__', ident_func)
|
||||
else:
|
||||
self.ident_func = get_ident
|
||||
|
||||
def get_ident(self):
|
||||
"""Return the context identifier the local objects use internally for
|
||||
this context. You cannot override this method to change the behavior
|
||||
but use it to link other context local objects (such as SQLAlchemy's
|
||||
scoped sessions) to the Werkzeug locals.
|
||||
|
||||
.. versionchanged:: 0.7
|
||||
Yu can pass a different ident function to the local manager that
|
||||
will then be propagated to all the locals passed to the
|
||||
constructor.
|
||||
"""
|
||||
return self.ident_func()
|
||||
|
||||
def cleanup(self):
|
||||
"""Manually clean up the data in the locals for this context. Call
|
||||
this at the end of the request or use `make_middleware()`.
|
||||
"""
|
||||
for local in self.locals:
|
||||
release_local(local)
|
||||
|
||||
def make_middleware(self, app):
|
||||
"""Wrap a WSGI application so that cleaning up happens after
|
||||
request end.
|
||||
"""
|
||||
def application(environ, start_response):
|
||||
return ClosingIterator(app(environ, start_response), self.cleanup)
|
||||
return application
|
||||
|
||||
def middleware(self, func):
|
||||
"""Like `make_middleware` but for decorating functions.
|
||||
|
||||
Example usage::
|
||||
|
||||
@manager.middleware
|
||||
def application(environ, start_response):
|
||||
...
|
||||
|
||||
The difference to `make_middleware` is that the function passed
|
||||
will have all the arguments copied from the inner application
|
||||
(name, docstring, module).
|
||||
"""
|
||||
return update_wrapper(self.make_middleware(func), func)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s storages: %d>' % (
|
||||
self.__class__.__name__,
|
||||
len(self.locals)
|
||||
)
|
||||
|
||||
|
||||
@implements_bool
|
||||
class LocalProxy(object):
|
||||
"""Acts as a proxy for a werkzeug local. Forwards all operations to
|
||||
a proxied object. The only operations not supported for forwarding
|
||||
are right handed operands and any kind of assignment.
|
||||
|
||||
Example usage::
|
||||
|
||||
from werkzeug.local import Local
|
||||
l = Local()
|
||||
|
||||
# these are proxies
|
||||
request = l('request')
|
||||
user = l('user')
|
||||
|
||||
|
||||
from werkzeug.local import LocalStack
|
||||
_response_local = LocalStack()
|
||||
|
||||
# this is a proxy
|
||||
response = _response_local()
|
||||
|
||||
Whenever something is bound to l.user / l.request the proxy objects
|
||||
will forward all operations. If no object is bound a :exc:`RuntimeError`
|
||||
will be raised.
|
||||
|
||||
To create proxies to :class:`Local` or :class:`LocalStack` objects,
|
||||
call the object as shown above. If you want to have a proxy to an
|
||||
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
|
||||
a function to the :class:`LocalProxy` constructor::
|
||||
|
||||
session = LocalProxy(lambda: get_current_request().session)
|
||||
|
||||
.. versionchanged:: 0.6.1
|
||||
The class can be instanciated with a callable as well now.
|
||||
"""
|
||||
__slots__ = ('__local', '__dict__', '__name__')
|
||||
|
||||
def __init__(self, local, name=None):
|
||||
object.__setattr__(self, '_LocalProxy__local', local)
|
||||
object.__setattr__(self, '__name__', name)
|
||||
|
||||
def _get_current_object(self):
|
||||
"""Return the current object. This is useful if you want the real
|
||||
object behind the proxy at a time for performance reasons or because
|
||||
you want to pass the object into a different context.
|
||||
"""
|
||||
if not hasattr(self.__local, '__release_local__'):
|
||||
return self.__local()
|
||||
try:
|
||||
return getattr(self.__local, self.__name__)
|
||||
except AttributeError:
|
||||
raise RuntimeError('no object bound to %s' % self.__name__)
|
||||
|
||||
@property
|
||||
def __dict__(self):
|
||||
try:
|
||||
return self._get_current_object().__dict__
|
||||
except RuntimeError:
|
||||
raise AttributeError('__dict__')
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
obj = self._get_current_object()
|
||||
except RuntimeError:
|
||||
return '<%s unbound>' % self.__class__.__name__
|
||||
return repr(obj)
|
||||
|
||||
def __bool__(self):
|
||||
try:
|
||||
return bool(self._get_current_object())
|
||||
except RuntimeError:
|
||||
return False
|
||||
|
||||
def __unicode__(self):
|
||||
try:
|
||||
return unicode(self._get_current_object())
|
||||
except RuntimeError:
|
||||
return repr(self)
|
||||
|
||||
def __dir__(self):
|
||||
try:
|
||||
return dir(self._get_current_object())
|
||||
except RuntimeError:
|
||||
return []
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name == '__members__':
|
||||
return dir(self._get_current_object())
|
||||
return getattr(self._get_current_object(), name)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self._get_current_object()[key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self._get_current_object()[key]
|
||||
|
||||
if PY2:
|
||||
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
|
||||
|
||||
def __setslice__(self, i, j, seq):
|
||||
self._get_current_object()[i:j] = seq
|
||||
|
||||
def __delslice__(self, i, j):
|
||||
del self._get_current_object()[i:j]
|
||||
|
||||
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
|
||||
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
|
||||
__str__ = lambda x: str(x._get_current_object())
|
||||
__lt__ = lambda x, o: x._get_current_object() < o
|
||||
__le__ = lambda x, o: x._get_current_object() <= o
|
||||
__eq__ = lambda x, o: x._get_current_object() == o
|
||||
__ne__ = lambda x, o: x._get_current_object() != o
|
||||
__gt__ = lambda x, o: x._get_current_object() > o
|
||||
__ge__ = lambda x, o: x._get_current_object() >= o
|
||||
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
|
||||
__hash__ = lambda x: hash(x._get_current_object())
|
||||
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
|
||||
__len__ = lambda x: len(x._get_current_object())
|
||||
__getitem__ = lambda x, i: x._get_current_object()[i]
|
||||
__iter__ = lambda x: iter(x._get_current_object())
|
||||
__contains__ = lambda x, i: i in x._get_current_object()
|
||||
__add__ = lambda x, o: x._get_current_object() + o
|
||||
__sub__ = lambda x, o: x._get_current_object() - o
|
||||
__mul__ = lambda x, o: x._get_current_object() * o
|
||||
__floordiv__ = lambda x, o: x._get_current_object() // o
|
||||
__mod__ = lambda x, o: x._get_current_object() % o
|
||||
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
|
||||
__pow__ = lambda x, o: x._get_current_object() ** o
|
||||
__lshift__ = lambda x, o: x._get_current_object() << o
|
||||
__rshift__ = lambda x, o: x._get_current_object() >> o
|
||||
__and__ = lambda x, o: x._get_current_object() & o
|
||||
__xor__ = lambda x, o: x._get_current_object() ^ o
|
||||
__or__ = lambda x, o: x._get_current_object() | o
|
||||
__div__ = lambda x, o: x._get_current_object().__div__(o)
|
||||
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
|
||||
__neg__ = lambda x: -(x._get_current_object())
|
||||
__pos__ = lambda x: +(x._get_current_object())
|
||||
__abs__ = lambda x: abs(x._get_current_object())
|
||||
__invert__ = lambda x: ~(x._get_current_object())
|
||||
__complex__ = lambda x: complex(x._get_current_object())
|
||||
__int__ = lambda x: int(x._get_current_object())
|
||||
__long__ = lambda x: long(x._get_current_object())
|
||||
__float__ = lambda x: float(x._get_current_object())
|
||||
__oct__ = lambda x: oct(x._get_current_object())
|
||||
__hex__ = lambda x: hex(x._get_current_object())
|
||||
__index__ = lambda x: x._get_current_object().__index__()
|
||||
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
|
||||
__enter__ = lambda x: x._get_current_object().__enter__()
|
||||
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
|
||||
__radd__ = lambda x, o: o + x._get_current_object()
|
||||
__rsub__ = lambda x, o: o - x._get_current_object()
|
||||
__rmul__ = lambda x, o: o * x._get_current_object()
|
||||
__rdiv__ = lambda x, o: o / x._get_current_object()
|
||||
if PY2:
|
||||
__rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
|
||||
else:
|
||||
__rtruediv__ = __rdiv__
|
||||
__rfloordiv__ = lambda x, o: o // x._get_current_object()
|
||||
__rmod__ = lambda x, o: o % x._get_current_object()
|
||||
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
|
||||
|
|
@ -0,0 +1,105 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
r"""
|
||||
werkzeug.posixemulation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Provides a POSIX emulation for some features that are relevant to
|
||||
web applications. The main purpose is to simplify support for
|
||||
systems such as Windows NT that are not 100% POSIX compatible.
|
||||
|
||||
Currently this only implements a :func:`rename` function that
|
||||
follows POSIX semantics. Eg: if the target file already exists it
|
||||
will be replaced without asking.
|
||||
|
||||
This module was introduced in 0.6.1 and is not a public interface.
|
||||
It might become one in later versions of Werkzeug.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import sys
|
||||
import os
|
||||
import errno
|
||||
import time
|
||||
import random
|
||||
|
||||
|
||||
can_rename_open_file = False
|
||||
if os.name == 'nt': # pragma: no cover
|
||||
_rename = lambda src, dst: False
|
||||
_rename_atomic = lambda src, dst: False
|
||||
|
||||
try:
|
||||
import ctypes
|
||||
|
||||
_MOVEFILE_REPLACE_EXISTING = 0x1
|
||||
_MOVEFILE_WRITE_THROUGH = 0x8
|
||||
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
|
||||
|
||||
def _rename(src, dst):
|
||||
if not isinstance(src, unicode):
|
||||
src = unicode(src, sys.getfilesystemencoding())
|
||||
if not isinstance(dst, unicode):
|
||||
dst = unicode(dst, sys.getfilesystemencoding())
|
||||
if _rename_atomic(src, dst):
|
||||
return True
|
||||
retry = 0
|
||||
rv = False
|
||||
while not rv and retry < 100:
|
||||
rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING |
|
||||
_MOVEFILE_WRITE_THROUGH)
|
||||
if not rv:
|
||||
time.sleep(0.001)
|
||||
retry += 1
|
||||
return rv
|
||||
|
||||
# new in Vista and Windows Server 2008
|
||||
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
|
||||
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
|
||||
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
|
||||
_CloseHandle = ctypes.windll.kernel32.CloseHandle
|
||||
can_rename_open_file = True
|
||||
|
||||
def _rename_atomic(src, dst):
|
||||
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename')
|
||||
if ta == -1:
|
||||
return False
|
||||
try:
|
||||
retry = 0
|
||||
rv = False
|
||||
while not rv and retry < 100:
|
||||
rv = _MoveFileTransacted(src, dst, None, None,
|
||||
_MOVEFILE_REPLACE_EXISTING |
|
||||
_MOVEFILE_WRITE_THROUGH, ta)
|
||||
if rv:
|
||||
rv = _CommitTransaction(ta)
|
||||
break
|
||||
else:
|
||||
time.sleep(0.001)
|
||||
retry += 1
|
||||
return rv
|
||||
finally:
|
||||
_CloseHandle(ta)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def rename(src, dst):
|
||||
# Try atomic or pseudo-atomic rename
|
||||
if _rename(src, dst):
|
||||
return
|
||||
# Fall back to "move away and replace"
|
||||
try:
|
||||
os.rename(src, dst)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
old = "%s-%08x" % (dst, random.randint(0, sys.maxint))
|
||||
os.rename(dst, old)
|
||||
os.rename(src, dst)
|
||||
try:
|
||||
os.unlink(old)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
rename = os.rename
|
||||
can_rename_open_file = True
|
||||
1635
Linux_x86_64/lib/python2.7/site-packages/werkzeug/routing.py
Normal file
316
Linux_x86_64/lib/python2.7/site-packages/werkzeug/script.py
Normal file
|
|
@ -0,0 +1,316 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
r'''
|
||||
werkzeug.script
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. admonition:: Deprecated Functionality
|
||||
|
||||
``werkzeug.script`` is deprecated without replacement functionality.
|
||||
Python's command line support improved greatly with :mod:`argparse`
|
||||
and a bunch of alternative modules.
|
||||
|
||||
Most of the time you have recurring tasks while writing an application
|
||||
such as starting up an interactive python interpreter with some prefilled
|
||||
imports, starting the development server, initializing the database or
|
||||
something similar.
|
||||
|
||||
For that purpose werkzeug provides the `werkzeug.script` module which
|
||||
helps you writing such scripts.
|
||||
|
||||
|
||||
Basic Usage
|
||||
-----------
|
||||
|
||||
The following snippet is roughly the same in every werkzeug script::
|
||||
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
from werkzeug import script
|
||||
|
||||
# actions go here
|
||||
|
||||
if __name__ == '__main__':
|
||||
script.run()
|
||||
|
||||
Starting this script now does nothing because no actions are defined.
|
||||
An action is a function in the same module starting with ``"action_"``
|
||||
which takes a number of arguments where every argument has a default. The
|
||||
type of the default value specifies the type of the argument.
|
||||
|
||||
Arguments can then be passed by position or using ``--name=value`` from
|
||||
the shell.
|
||||
|
||||
Because a runserver and shell command is pretty common there are two
|
||||
factory functions that create such commands::
|
||||
|
||||
def make_app():
|
||||
from yourapplication import YourApplication
|
||||
return YourApplication(...)
|
||||
|
||||
action_runserver = script.make_runserver(make_app, use_reloader=True)
|
||||
action_shell = script.make_shell(lambda: {'app': make_app()})
|
||||
|
||||
|
||||
Using The Scripts
|
||||
-----------------
|
||||
|
||||
The script from above can be used like this from the shell now:
|
||||
|
||||
.. sourcecode:: text
|
||||
|
||||
$ ./manage.py --help
|
||||
$ ./manage.py runserver localhost 8080 --debugger --no-reloader
|
||||
$ ./manage.py runserver -p 4000
|
||||
$ ./manage.py shell
|
||||
|
||||
As you can see it's possible to pass parameters as positional arguments
|
||||
or as named parameters, pretty much like Python function calls.
|
||||
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
'''
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
import inspect
|
||||
import getopt
|
||||
from os.path import basename
|
||||
from werkzeug._compat import iteritems
|
||||
|
||||
|
||||
argument_types = {
|
||||
bool: 'boolean',
|
||||
str: 'string',
|
||||
int: 'integer',
|
||||
float: 'float'
|
||||
}
|
||||
|
||||
|
||||
converters = {
|
||||
'boolean': lambda x: x.lower() in ('1', 'true', 'yes', 'on'),
|
||||
'string': str,
|
||||
'integer': int,
|
||||
'float': float
|
||||
}
|
||||
|
||||
|
||||
def run(namespace=None, action_prefix='action_', args=None):
|
||||
"""Run the script. Participating actions are looked up in the caller's
|
||||
namespace if no namespace is given, otherwise in the dict provided.
|
||||
Only items that start with action_prefix are processed as actions. If
|
||||
you want to use all items in the namespace provided as actions set
|
||||
action_prefix to an empty string.
|
||||
|
||||
:param namespace: An optional dict where the functions are looked up in.
|
||||
By default the local namespace of the caller is used.
|
||||
:param action_prefix: The prefix for the functions. Everything else
|
||||
is ignored.
|
||||
:param args: the arguments for the function. If not specified
|
||||
:data:`sys.argv` without the first argument is used.
|
||||
"""
|
||||
if namespace is None:
|
||||
namespace = sys._getframe(1).f_locals
|
||||
actions = find_actions(namespace, action_prefix)
|
||||
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
if not args or args[0] in ('-h', '--help'):
|
||||
return print_usage(actions)
|
||||
elif args[0] not in actions:
|
||||
fail('Unknown action \'%s\'' % args[0])
|
||||
|
||||
arguments = {}
|
||||
types = {}
|
||||
key_to_arg = {}
|
||||
long_options = []
|
||||
formatstring = ''
|
||||
func, doc, arg_def = actions[args.pop(0)]
|
||||
for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):
|
||||
real_arg = arg.replace('-', '_')
|
||||
if shortcut:
|
||||
formatstring += shortcut
|
||||
if not isinstance(default, bool):
|
||||
formatstring += ':'
|
||||
key_to_arg['-' + shortcut] = real_arg
|
||||
long_options.append(isinstance(default, bool) and arg or arg + '=')
|
||||
key_to_arg['--' + arg] = real_arg
|
||||
key_to_arg[idx] = real_arg
|
||||
types[real_arg] = option_type
|
||||
arguments[real_arg] = default
|
||||
|
||||
try:
|
||||
optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)
|
||||
except getopt.GetoptError as e:
|
||||
fail(str(e))
|
||||
|
||||
specified_arguments = set()
|
||||
for key, value in enumerate(posargs):
|
||||
try:
|
||||
arg = key_to_arg[key]
|
||||
except IndexError:
|
||||
fail('Too many parameters')
|
||||
specified_arguments.add(arg)
|
||||
try:
|
||||
arguments[arg] = converters[types[arg]](value)
|
||||
except ValueError:
|
||||
fail('Invalid value for argument %s (%s): %s' % (key, arg, value))
|
||||
|
||||
for key, value in optlist:
|
||||
arg = key_to_arg[key]
|
||||
if arg in specified_arguments:
|
||||
fail('Argument \'%s\' is specified twice' % arg)
|
||||
if types[arg] == 'boolean':
|
||||
if arg.startswith('no_'):
|
||||
value = 'no'
|
||||
else:
|
||||
value = 'yes'
|
||||
try:
|
||||
arguments[arg] = converters[types[arg]](value)
|
||||
except ValueError:
|
||||
fail('Invalid value for \'%s\': %s' % (key, value))
|
||||
|
||||
newargs = {}
|
||||
for k, v in iteritems(arguments):
|
||||
newargs[k.startswith('no_') and k[3:] or k] = v
|
||||
arguments = newargs
|
||||
return func(**arguments)
|
||||
|
||||
|
||||
def fail(message, code=-1):
|
||||
"""Fail with an error."""
|
||||
print('Error: %s' % message, file=sys.stderr)
|
||||
sys.exit(code)
|
||||
|
||||
|
||||
def find_actions(namespace, action_prefix):
|
||||
"""Find all the actions in the namespace."""
|
||||
actions = {}
|
||||
for key, value in iteritems(namespace):
|
||||
if key.startswith(action_prefix):
|
||||
actions[key[len(action_prefix):]] = analyse_action(value)
|
||||
return actions
|
||||
|
||||
|
||||
def print_usage(actions):
|
||||
"""Print the usage information. (Help screen)"""
|
||||
actions = actions.items()
|
||||
actions.sort()
|
||||
print('usage: %s <action> [<options>]' % basename(sys.argv[0]))
|
||||
print(' %s --help' % basename(sys.argv[0]))
|
||||
print()
|
||||
print('actions:')
|
||||
for name, (func, doc, arguments) in actions:
|
||||
print(' %s:' % name)
|
||||
for line in doc.splitlines():
|
||||
print(' %s' % line)
|
||||
if arguments:
|
||||
print()
|
||||
for arg, shortcut, default, argtype in arguments:
|
||||
if isinstance(default, bool):
|
||||
print(' %s' % (
|
||||
(shortcut and '-%s, ' % shortcut or '') + '--' + arg
|
||||
))
|
||||
else:
|
||||
print(' %-30s%-10s%s' % (
|
||||
(shortcut and '-%s, ' % shortcut or '') + '--' + arg,
|
||||
argtype, default
|
||||
))
|
||||
print()
|
||||
|
||||
|
||||
def analyse_action(func):
|
||||
"""Analyse a function."""
|
||||
description = inspect.getdoc(func) or 'undocumented action'
|
||||
arguments = []
|
||||
args, varargs, kwargs, defaults = inspect.getargspec(func)
|
||||
if varargs or kwargs:
|
||||
raise TypeError('variable length arguments for action not allowed.')
|
||||
if len(args) != len(defaults or ()):
|
||||
raise TypeError('not all arguments have proper definitions')
|
||||
|
||||
for idx, (arg, definition) in enumerate(zip(args, defaults or ())):
|
||||
if arg.startswith('_'):
|
||||
raise TypeError('arguments may not start with an underscore')
|
||||
if not isinstance(definition, tuple):
|
||||
shortcut = None
|
||||
default = definition
|
||||
else:
|
||||
shortcut, default = definition
|
||||
argument_type = argument_types[type(default)]
|
||||
if isinstance(default, bool) and default is True:
|
||||
arg = 'no-' + arg
|
||||
arguments.append((arg.replace('_', '-'), shortcut,
|
||||
default, argument_type))
|
||||
return func, description, arguments
|
||||
|
||||
|
||||
def make_shell(init_func=None, banner=None, use_ipython=True):
|
||||
"""Returns an action callback that spawns a new interactive
|
||||
python shell.
|
||||
|
||||
:param init_func: an optional initialization function that is
|
||||
called before the shell is started. The return
|
||||
value of this function is the initial namespace.
|
||||
:param banner: the banner that is displayed before the shell. If
|
||||
not specified a generic banner is used instead.
|
||||
:param use_ipython: if set to `True` ipython is used if available.
|
||||
"""
|
||||
if banner is None:
|
||||
banner = 'Interactive Werkzeug Shell'
|
||||
if init_func is None:
|
||||
init_func = dict
|
||||
def action(ipython=use_ipython):
|
||||
"""Start a new interactive python session."""
|
||||
namespace = init_func()
|
||||
if ipython:
|
||||
try:
|
||||
try:
|
||||
from IPython.frontend.terminal.embed import InteractiveShellEmbed
|
||||
sh = InteractiveShellEmbed(banner1=banner)
|
||||
except ImportError:
|
||||
from IPython.Shell import IPShellEmbed
|
||||
sh = IPShellEmbed(banner=banner)
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
sh(global_ns={}, local_ns=namespace)
|
||||
return
|
||||
from code import interact
|
||||
interact(banner, local=namespace)
|
||||
return action
|
||||
|
||||
|
||||
def make_runserver(app_factory, hostname='localhost', port=5000,
|
||||
use_reloader=False, use_debugger=False, use_evalex=True,
|
||||
threaded=False, processes=1, static_files=None,
|
||||
extra_files=None, ssl_context=None):
|
||||
"""Returns an action callback that spawns a new development server.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
`static_files` and `extra_files` was added.
|
||||
|
||||
..versionadded:: 0.6.1
|
||||
`ssl_context` was added.
|
||||
|
||||
:param app_factory: a function that returns a new WSGI application.
|
||||
:param hostname: the default hostname the server should listen on.
|
||||
:param port: the default port of the server.
|
||||
:param use_reloader: the default setting for the reloader.
|
||||
:param use_evalex: the default setting for the evalex flag of the debugger.
|
||||
:param threaded: the default threading setting.
|
||||
:param processes: the default number of processes to start.
|
||||
:param static_files: optional dict of static files.
|
||||
:param extra_files: optional list of extra files to track for reloading.
|
||||
:param ssl_context: optional SSL context for running server in HTTPS mode.
|
||||
"""
|
||||
def action(hostname=('h', hostname), port=('p', port),
|
||||
reloader=use_reloader, debugger=use_debugger,
|
||||
evalex=use_evalex, threaded=threaded, processes=processes):
|
||||
"""Start a new development server."""
|
||||
from werkzeug.serving import run_simple
|
||||
app = app_factory()
|
||||
run_simple(hostname, port, app, reloader, debugger, evalex,
|
||||
extra_files, 1, threaded, processes,
|
||||
static_files=static_files, ssl_context=ssl_context)
|
||||
return action
|
||||
240
Linux_x86_64/lib/python2.7/site-packages/werkzeug/security.py
Normal file
|
|
@ -0,0 +1,240 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.security
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Security related helpers such as secure password hashing tools.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import os
|
||||
import hmac
|
||||
import hashlib
|
||||
import posixpath
|
||||
import codecs
|
||||
from struct import Struct
|
||||
from random import SystemRandom
|
||||
from operator import xor
|
||||
from itertools import starmap
|
||||
|
||||
from werkzeug._compat import range_type, PY2, text_type, izip, to_bytes, \
|
||||
string_types, to_native
|
||||
|
||||
|
||||
SALT_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
|
||||
DEFAULT_PBKDF2_ITERATIONS = 1000
|
||||
|
||||
|
||||
_pack_int = Struct('>I').pack
|
||||
_builtin_safe_str_cmp = getattr(hmac, 'compare_digest', None)
|
||||
_sys_rng = SystemRandom()
|
||||
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
|
||||
if sep not in (None, '/'))
|
||||
|
||||
|
||||
def _find_hashlib_algorithms():
|
||||
algos = getattr(hashlib, 'algorithms', None)
|
||||
if algos is None:
|
||||
algos = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
|
||||
rv = {}
|
||||
for algo in algos:
|
||||
func = getattr(hashlib, algo, None)
|
||||
if func is not None:
|
||||
rv[algo] = func
|
||||
return rv
|
||||
_hash_funcs = _find_hashlib_algorithms()
|
||||
|
||||
|
||||
def pbkdf2_hex(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
|
||||
keylen=None, hashfunc=None):
|
||||
"""Like :func:`pbkdf2_bin` but returns a hex encoded string.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
|
||||
:param data: the data to derive.
|
||||
:param salt: the salt for the derivation.
|
||||
:param iterations: the number of iterations.
|
||||
:param keylen: the length of the resulting key. If not provided
|
||||
the digest size will be used.
|
||||
:param hashfunc: the hash function to use. This can either be the
|
||||
string name of a known hash function or a function
|
||||
from the hashlib module. Defaults to sha1.
|
||||
"""
|
||||
rv = pbkdf2_bin(data, salt, iterations, keylen, hashfunc)
|
||||
return to_native(codecs.encode(rv, 'hex_codec'))
|
||||
|
||||
|
||||
def pbkdf2_bin(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
|
||||
keylen=None, hashfunc=None):
|
||||
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
|
||||
with the given `salt`. It iterates `iterations` time and produces a
|
||||
key of `keylen` bytes. By default SHA-1 is used as hash function,
|
||||
a different hashlib `hashfunc` can be provided.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
|
||||
:param data: the data to derive.
|
||||
:param salt: the salt for the derivation.
|
||||
:param iterations: the number of iterations.
|
||||
:param keylen: the length of the resulting key. If not provided
|
||||
the digest size will be used.
|
||||
:param hashfunc: the hash function to use. This can either be the
|
||||
string name of a known hash function or a function
|
||||
from the hashlib module. Defaults to sha1.
|
||||
"""
|
||||
if isinstance(hashfunc, string_types):
|
||||
hashfunc = _hash_funcs[hashfunc]
|
||||
elif not hashfunc:
|
||||
hashfunc = hashlib.sha1
|
||||
salt = to_bytes(salt)
|
||||
mac = hmac.HMAC(to_bytes(data), None, hashfunc)
|
||||
if not keylen:
|
||||
keylen = mac.digest_size
|
||||
def _pseudorandom(x, mac=mac):
|
||||
h = mac.copy()
|
||||
h.update(x)
|
||||
return bytearray(h.digest())
|
||||
buf = bytearray()
|
||||
for block in range_type(1, -(-keylen // mac.digest_size) + 1):
|
||||
rv = u = _pseudorandom(salt + _pack_int(block))
|
||||
for i in range_type(iterations - 1):
|
||||
u = _pseudorandom(bytes(u))
|
||||
rv = bytearray(starmap(xor, izip(rv, u)))
|
||||
buf.extend(rv)
|
||||
return bytes(buf[:keylen])
|
||||
|
||||
|
||||
def safe_str_cmp(a, b):
|
||||
"""This function compares strings in somewhat constant time. This
|
||||
requires that the length of at least one string is known in advance.
|
||||
|
||||
Returns `True` if the two strings are equal or `False` if they are not.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
if _builtin_safe_str_cmp is not None:
|
||||
return _builtin_safe_str_cmp(a, b)
|
||||
if len(a) != len(b):
|
||||
return False
|
||||
rv = 0
|
||||
if isinstance(a, bytes) and isinstance(b, bytes) and not PY2:
|
||||
for x, y in izip(a, b):
|
||||
rv |= x ^ y
|
||||
else:
|
||||
for x, y in izip(a, b):
|
||||
rv |= ord(x) ^ ord(y)
|
||||
return rv == 0
|
||||
|
||||
|
||||
def gen_salt(length):
|
||||
"""Generate a random string of SALT_CHARS with specified ``length``."""
|
||||
if length <= 0:
|
||||
raise ValueError('requested salt of length <= 0')
|
||||
return ''.join(_sys_rng.choice(SALT_CHARS) for _ in range_type(length))
|
||||
|
||||
|
||||
def _hash_internal(method, salt, password):
|
||||
"""Internal password hash helper. Supports plaintext without salt,
|
||||
unsalted and salted passwords. In case salted passwords are used
|
||||
hmac is used.
|
||||
"""
|
||||
if method == 'plain':
|
||||
return password, method
|
||||
|
||||
if isinstance(password, text_type):
|
||||
password = password.encode('utf-8')
|
||||
|
||||
if method.startswith('pbkdf2:'):
|
||||
args = method[7:].split(':')
|
||||
if len(args) not in (1, 2):
|
||||
raise ValueError('Invalid number of arguments for PBKDF2')
|
||||
method = args.pop(0)
|
||||
iterations = args and int(args[0] or 0) or DEFAULT_PBKDF2_ITERATIONS
|
||||
is_pbkdf2 = True
|
||||
actual_method = 'pbkdf2:%s:%d' % (method, iterations)
|
||||
else:
|
||||
is_pbkdf2 = False
|
||||
actual_method = method
|
||||
|
||||
hash_func = _hash_funcs.get(method)
|
||||
if hash_func is None:
|
||||
raise TypeError('invalid method %r' % method)
|
||||
|
||||
if is_pbkdf2:
|
||||
if not salt:
|
||||
raise ValueError('Salt is required for PBKDF2')
|
||||
rv = pbkdf2_hex(password, salt, iterations,
|
||||
hashfunc=hash_func)
|
||||
elif salt:
|
||||
if isinstance(salt, text_type):
|
||||
salt = salt.encode('utf-8')
|
||||
rv = hmac.HMAC(salt, password, hash_func).hexdigest()
|
||||
else:
|
||||
h = hash_func()
|
||||
h.update(password)
|
||||
rv = h.hexdigest()
|
||||
return rv, actual_method
|
||||
|
||||
|
||||
def generate_password_hash(password, method='pbkdf2:sha1', salt_length=8):
|
||||
"""Hash a password with the given method and salt with with a string of
|
||||
the given length. The format of the string returned includes the method
|
||||
that was used so that :func:`check_password_hash` can check the hash.
|
||||
|
||||
The format for the hashed string looks like this::
|
||||
|
||||
method$salt$hash
|
||||
|
||||
This method can **not** generate unsalted passwords but it is possible
|
||||
to set the method to plain to enforce plaintext passwords. If a salt
|
||||
is used, hmac is used internally to salt the password.
|
||||
|
||||
If PBKDF2 is wanted it can be enabled by setting the method to
|
||||
``pbkdf2:method:iterations`` where iterations is optional::
|
||||
|
||||
pbkdf2:sha1:2000$salt$hash
|
||||
pbkdf2:sha1$salt$hash
|
||||
|
||||
:param password: the password to hash
|
||||
:param method: the hash method to use (one that hashlib supports), can
|
||||
optionally be in the format ``pbpdf2:<method>[:iterations]``
|
||||
to enable PBKDF2.
|
||||
:param salt_length: the length of the salt in letters
|
||||
"""
|
||||
salt = method != 'plain' and gen_salt(salt_length) or ''
|
||||
h, actual_method = _hash_internal(method, salt, password)
|
||||
return '%s$%s$%s' % (actual_method, salt, h)
|
||||
|
||||
|
||||
def check_password_hash(pwhash, password):
|
||||
"""check a password against a given salted and hashed password value.
|
||||
In order to support unsalted legacy passwords this method supports
|
||||
plain text passwords, md5 and sha1 hashes (both salted and unsalted).
|
||||
|
||||
Returns `True` if the password matched, `False` otherwise.
|
||||
|
||||
:param pwhash: a hashed string like returned by
|
||||
:func:`generate_password_hash`
|
||||
:param password: the plaintext password to compare against the hash
|
||||
"""
|
||||
if pwhash.count('$') < 2:
|
||||
return False
|
||||
method, salt, hashval = pwhash.split('$', 2)
|
||||
return safe_str_cmp(_hash_internal(method, salt, password)[0], hashval)
|
||||
|
||||
|
||||
def safe_join(directory, filename):
|
||||
"""Safely join `directory` and `filename`. If this cannot be done,
|
||||
this function returns ``None``.
|
||||
|
||||
:param directory: the base directory.
|
||||
:param filename: the untrusted filename relative to that directory.
|
||||
"""
|
||||
filename = posixpath.normpath(filename)
|
||||
for sep in _os_alt_seps:
|
||||
if sep in filename:
|
||||
return None
|
||||
if os.path.isabs(filename) or filename.startswith('../'):
|
||||
return None
|
||||
return os.path.join(directory, filename)
|
||||
749
Linux_x86_64/lib/python2.7/site-packages/werkzeug/serving.py
Normal file
|
|
@ -0,0 +1,749 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.serving
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
There are many ways to serve a WSGI application. While you're developing
|
||||
it you usually don't want a full blown webserver like Apache but a simple
|
||||
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
|
||||
the standard library. If you're using older versions of Python you can
|
||||
download the package from the cheeseshop.
|
||||
|
||||
However there are some caveats. Sourcecode won't reload itself when
|
||||
changed and each time you kill the server using ``^C`` you get an
|
||||
`KeyboardInterrupt` error. While the latter is easy to solve the first
|
||||
one can be a pain in the ass in some situations.
|
||||
|
||||
The easiest way is creating a small ``start-myproject.py`` that runs the
|
||||
application::
|
||||
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
from myproject import make_app
|
||||
from werkzeug.serving import run_simple
|
||||
|
||||
app = make_app(...)
|
||||
run_simple('localhost', 8080, app, use_reloader=True)
|
||||
|
||||
You can also pass it a `extra_files` keyword argument with a list of
|
||||
additional files (like configuration files) you want to observe.
|
||||
|
||||
For bigger applications you should consider using `werkzeug.script`
|
||||
instead of a simple start file.
|
||||
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
from __future__ import with_statement
|
||||
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import signal
|
||||
import subprocess
|
||||
|
||||
try:
|
||||
import thread
|
||||
except ImportError:
|
||||
import _thread as thread
|
||||
|
||||
try:
|
||||
from SocketServer import ThreadingMixIn, ForkingMixIn
|
||||
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
|
||||
except ImportError:
|
||||
from socketserver import ThreadingMixIn, ForkingMixIn
|
||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||
|
||||
import werkzeug
|
||||
from werkzeug._internal import _log
|
||||
from werkzeug._compat import iteritems, PY2, reraise, text_type, \
|
||||
wsgi_encoding_dance
|
||||
from werkzeug.urls import url_parse, url_unquote
|
||||
from werkzeug.exceptions import InternalServerError, BadRequest
|
||||
|
||||
|
||||
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
|
||||
"""A request handler that implements WSGI dispatching."""
|
||||
|
||||
@property
|
||||
def server_version(self):
|
||||
return 'Werkzeug/' + werkzeug.__version__
|
||||
|
||||
def make_environ(self):
|
||||
request_url = url_parse(self.path)
|
||||
|
||||
def shutdown_server():
|
||||
self.server.shutdown_signal = True
|
||||
|
||||
url_scheme = self.server.ssl_context is None and 'http' or 'https'
|
||||
path_info = url_unquote(request_url.path)
|
||||
|
||||
environ = {
|
||||
'wsgi.version': (1, 0),
|
||||
'wsgi.url_scheme': url_scheme,
|
||||
'wsgi.input': self.rfile,
|
||||
'wsgi.errors': sys.stderr,
|
||||
'wsgi.multithread': self.server.multithread,
|
||||
'wsgi.multiprocess': self.server.multiprocess,
|
||||
'wsgi.run_once': False,
|
||||
'werkzeug.server.shutdown':
|
||||
shutdown_server,
|
||||
'SERVER_SOFTWARE': self.server_version,
|
||||
'REQUEST_METHOD': self.command,
|
||||
'SCRIPT_NAME': '',
|
||||
'PATH_INFO': wsgi_encoding_dance(path_info),
|
||||
'QUERY_STRING': wsgi_encoding_dance(request_url.query),
|
||||
'CONTENT_TYPE': self.headers.get('Content-Type', ''),
|
||||
'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
|
||||
'REMOTE_ADDR': self.client_address[0],
|
||||
'REMOTE_PORT': self.client_address[1],
|
||||
'SERVER_NAME': self.server.server_address[0],
|
||||
'SERVER_PORT': str(self.server.server_address[1]),
|
||||
'SERVER_PROTOCOL': self.request_version
|
||||
}
|
||||
|
||||
for key, value in self.headers.items():
|
||||
key = 'HTTP_' + key.upper().replace('-', '_')
|
||||
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
|
||||
environ[key] = value
|
||||
|
||||
if request_url.netloc:
|
||||
environ['HTTP_HOST'] = request_url.netloc
|
||||
|
||||
return environ
|
||||
|
||||
def run_wsgi(self):
|
||||
if self.headers.get('Expect', '').lower().strip() == '100-continue':
|
||||
self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n')
|
||||
|
||||
environ = self.make_environ()
|
||||
headers_set = []
|
||||
headers_sent = []
|
||||
|
||||
def write(data):
|
||||
assert headers_set, 'write() before start_response'
|
||||
if not headers_sent:
|
||||
status, response_headers = headers_sent[:] = headers_set
|
||||
try:
|
||||
code, msg = status.split(None, 1)
|
||||
except ValueError:
|
||||
code, msg = status, ""
|
||||
self.send_response(int(code), msg)
|
||||
header_keys = set()
|
||||
for key, value in response_headers:
|
||||
self.send_header(key, value)
|
||||
key = key.lower()
|
||||
header_keys.add(key)
|
||||
if 'content-length' not in header_keys:
|
||||
self.close_connection = True
|
||||
self.send_header('Connection', 'close')
|
||||
if 'server' not in header_keys:
|
||||
self.send_header('Server', self.version_string())
|
||||
if 'date' not in header_keys:
|
||||
self.send_header('Date', self.date_time_string())
|
||||
self.end_headers()
|
||||
|
||||
assert type(data) is bytes, 'applications must write bytes'
|
||||
self.wfile.write(data)
|
||||
self.wfile.flush()
|
||||
|
||||
def start_response(status, response_headers, exc_info=None):
|
||||
if exc_info:
|
||||
try:
|
||||
if headers_sent:
|
||||
reraise(*exc_info)
|
||||
finally:
|
||||
exc_info = None
|
||||
elif headers_set:
|
||||
raise AssertionError('Headers already set')
|
||||
headers_set[:] = [status, response_headers]
|
||||
return write
|
||||
|
||||
def execute(app):
|
||||
application_iter = app(environ, start_response)
|
||||
try:
|
||||
for data in application_iter:
|
||||
write(data)
|
||||
if not headers_sent:
|
||||
write(b'')
|
||||
finally:
|
||||
if hasattr(application_iter, 'close'):
|
||||
application_iter.close()
|
||||
application_iter = None
|
||||
|
||||
try:
|
||||
execute(self.server.app)
|
||||
except (socket.error, socket.timeout) as e:
|
||||
self.connection_dropped(e, environ)
|
||||
except Exception:
|
||||
if self.server.passthrough_errors:
|
||||
raise
|
||||
from werkzeug.debug.tbtools import get_current_traceback
|
||||
traceback = get_current_traceback(ignore_system_exceptions=True)
|
||||
try:
|
||||
# if we haven't yet sent the headers but they are set
|
||||
# we roll back to be able to set them again.
|
||||
if not headers_sent:
|
||||
del headers_set[:]
|
||||
execute(InternalServerError())
|
||||
except Exception:
|
||||
pass
|
||||
self.server.log('error', 'Error on request:\n%s',
|
||||
traceback.plaintext)
|
||||
|
||||
def handle(self):
|
||||
"""Handles a request ignoring dropped connections."""
|
||||
rv = None
|
||||
try:
|
||||
rv = BaseHTTPRequestHandler.handle(self)
|
||||
except (socket.error, socket.timeout) as e:
|
||||
self.connection_dropped(e)
|
||||
except Exception:
|
||||
if self.server.ssl_context is None or not is_ssl_error():
|
||||
raise
|
||||
if self.server.shutdown_signal:
|
||||
self.initiate_shutdown()
|
||||
return rv
|
||||
|
||||
def initiate_shutdown(self):
|
||||
"""A horrible, horrible way to kill the server for Python 2.6 and
|
||||
later. It's the best we can do.
|
||||
"""
|
||||
# Windows does not provide SIGKILL, go with SIGTERM then.
|
||||
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
|
||||
# reloader active
|
||||
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
|
||||
os.kill(os.getpid(), sig)
|
||||
# python 2.7
|
||||
self.server._BaseServer__shutdown_request = True
|
||||
# python 2.6
|
||||
self.server._BaseServer__serving = False
|
||||
|
||||
def connection_dropped(self, error, environ=None):
|
||||
"""Called if the connection was closed by the client. By default
|
||||
nothing happens.
|
||||
"""
|
||||
|
||||
def handle_one_request(self):
|
||||
"""Handle a single HTTP request."""
|
||||
self.raw_requestline = self.rfile.readline()
|
||||
if not self.raw_requestline:
|
||||
self.close_connection = 1
|
||||
elif self.parse_request():
|
||||
return self.run_wsgi()
|
||||
|
||||
def send_response(self, code, message=None):
|
||||
"""Send the response header and log the response code."""
|
||||
self.log_request(code)
|
||||
if message is None:
|
||||
message = code in self.responses and self.responses[code][0] or ''
|
||||
if self.request_version != 'HTTP/0.9':
|
||||
hdr = "%s %d %s\r\n" % (self.protocol_version, code, message)
|
||||
self.wfile.write(hdr.encode('ascii'))
|
||||
|
||||
def version_string(self):
|
||||
return BaseHTTPRequestHandler.version_string(self).strip()
|
||||
|
||||
def address_string(self):
|
||||
return self.client_address[0]
|
||||
|
||||
def log_request(self, code='-', size='-'):
|
||||
self.log('info', '"%s" %s %s', self.requestline, code, size)
|
||||
|
||||
def log_error(self, *args):
|
||||
self.log('error', *args)
|
||||
|
||||
def log_message(self, format, *args):
|
||||
self.log('info', format, *args)
|
||||
|
||||
def log(self, type, message, *args):
|
||||
_log(type, '%s - - [%s] %s\n' % (self.address_string(),
|
||||
self.log_date_time_string(),
|
||||
message % args))
|
||||
|
||||
|
||||
#: backwards compatible name if someone is subclassing it
|
||||
BaseRequestHandler = WSGIRequestHandler
|
||||
|
||||
|
||||
def generate_adhoc_ssl_pair(cn=None):
|
||||
from random import random
|
||||
from OpenSSL import crypto
|
||||
|
||||
# pretty damn sure that this is not actually accepted by anyone
|
||||
if cn is None:
|
||||
cn = '*'
|
||||
|
||||
cert = crypto.X509()
|
||||
cert.set_serial_number(int(random() * sys.maxint))
|
||||
cert.gmtime_adj_notBefore(0)
|
||||
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
|
||||
|
||||
subject = cert.get_subject()
|
||||
subject.CN = cn
|
||||
subject.O = 'Dummy Certificate'
|
||||
|
||||
issuer = cert.get_issuer()
|
||||
issuer.CN = 'Untrusted Authority'
|
||||
issuer.O = 'Self-Signed'
|
||||
|
||||
pkey = crypto.PKey()
|
||||
pkey.generate_key(crypto.TYPE_RSA, 768)
|
||||
cert.set_pubkey(pkey)
|
||||
cert.sign(pkey, 'md5')
|
||||
|
||||
return cert, pkey
|
||||
|
||||
|
||||
def make_ssl_devcert(base_path, host=None, cn=None):
|
||||
"""Creates an SSL key for development. This should be used instead of
|
||||
the ``'adhoc'`` key which generates a new cert on each server start.
|
||||
It accepts a path for where it should store the key and cert and
|
||||
either a host or CN. If a host is given it will use the CN
|
||||
``*.host/CN=host``.
|
||||
|
||||
For more information see :func:`run_simple`.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
|
||||
:param base_path: the path to the certificate and key. The extension
|
||||
``.crt`` is added for the certificate, ``.key`` is
|
||||
added for the key.
|
||||
:param host: the name of the host. This can be used as an alternative
|
||||
for the `cn`.
|
||||
:param cn: the `CN` to use.
|
||||
"""
|
||||
from OpenSSL import crypto
|
||||
if host is not None:
|
||||
cn = '*.%s/CN=%s' % (host, host)
|
||||
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
|
||||
|
||||
cert_file = base_path + '.crt'
|
||||
pkey_file = base_path + '.key'
|
||||
|
||||
with open(cert_file, 'w') as f:
|
||||
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
|
||||
with open(pkey_file, 'w') as f:
|
||||
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
|
||||
|
||||
return cert_file, pkey_file
|
||||
|
||||
|
||||
def generate_adhoc_ssl_context():
|
||||
"""Generates an adhoc SSL context for the development server."""
|
||||
from OpenSSL import SSL
|
||||
cert, pkey = generate_adhoc_ssl_pair()
|
||||
ctx = SSL.Context(SSL.SSLv23_METHOD)
|
||||
ctx.use_privatekey(pkey)
|
||||
ctx.use_certificate(cert)
|
||||
return ctx
|
||||
|
||||
|
||||
def load_ssl_context(cert_file, pkey_file):
|
||||
"""Loads an SSL context from a certificate and private key file."""
|
||||
from OpenSSL import SSL
|
||||
ctx = SSL.Context(SSL.SSLv23_METHOD)
|
||||
ctx.use_certificate_file(cert_file)
|
||||
ctx.use_privatekey_file(pkey_file)
|
||||
return ctx
|
||||
|
||||
|
||||
def is_ssl_error(error=None):
|
||||
"""Checks if the given error (or the current one) is an SSL error."""
|
||||
if error is None:
|
||||
error = sys.exc_info()[1]
|
||||
from OpenSSL import SSL
|
||||
return isinstance(error, SSL.Error)
|
||||
|
||||
|
||||
class _SSLConnectionFix(object):
|
||||
"""Wrapper around SSL connection to provide a working makefile()."""
|
||||
|
||||
def __init__(self, con):
|
||||
self._con = con
|
||||
|
||||
def makefile(self, mode, bufsize):
|
||||
return socket._fileobject(self._con, mode, bufsize)
|
||||
|
||||
def __getattr__(self, attrib):
|
||||
return getattr(self._con, attrib)
|
||||
|
||||
def shutdown(self, arg=None):
|
||||
try:
|
||||
self._con.shutdown()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def select_ip_version(host, port):
|
||||
"""Returns AF_INET4 or AF_INET6 depending on where to connect to."""
|
||||
# disabled due to problems with current ipv6 implementations
|
||||
# and various operating systems. Probably this code also is
|
||||
# not supposed to work, but I can't come up with any other
|
||||
# ways to implement this.
|
||||
##try:
|
||||
## info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
|
||||
## socket.SOCK_STREAM, 0,
|
||||
## socket.AI_PASSIVE)
|
||||
## if info:
|
||||
## return info[0][0]
|
||||
##except socket.gaierror:
|
||||
## pass
|
||||
if ':' in host and hasattr(socket, 'AF_INET6'):
|
||||
return socket.AF_INET6
|
||||
return socket.AF_INET
|
||||
|
||||
|
||||
class BaseWSGIServer(HTTPServer, object):
|
||||
"""Simple single-threaded, single-process WSGI server."""
|
||||
multithread = False
|
||||
multiprocess = False
|
||||
request_queue_size = 128
|
||||
|
||||
def __init__(self, host, port, app, handler=None,
|
||||
passthrough_errors=False, ssl_context=None):
|
||||
if handler is None:
|
||||
handler = WSGIRequestHandler
|
||||
self.address_family = select_ip_version(host, port)
|
||||
HTTPServer.__init__(self, (host, int(port)), handler)
|
||||
self.app = app
|
||||
self.passthrough_errors = passthrough_errors
|
||||
self.shutdown_signal = False
|
||||
|
||||
if ssl_context is not None:
|
||||
try:
|
||||
from OpenSSL import tsafe
|
||||
except ImportError:
|
||||
raise TypeError('SSL is not available if the OpenSSL '
|
||||
'library is not installed.')
|
||||
if isinstance(ssl_context, tuple):
|
||||
ssl_context = load_ssl_context(*ssl_context)
|
||||
if ssl_context == 'adhoc':
|
||||
ssl_context = generate_adhoc_ssl_context()
|
||||
self.socket = tsafe.Connection(ssl_context, self.socket)
|
||||
self.ssl_context = ssl_context
|
||||
else:
|
||||
self.ssl_context = None
|
||||
|
||||
def log(self, type, message, *args):
|
||||
_log(type, message, *args)
|
||||
|
||||
def serve_forever(self):
|
||||
self.shutdown_signal = False
|
||||
try:
|
||||
HTTPServer.serve_forever(self)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
def handle_error(self, request, client_address):
|
||||
if self.passthrough_errors:
|
||||
raise
|
||||
else:
|
||||
return HTTPServer.handle_error(self, request, client_address)
|
||||
|
||||
def get_request(self):
|
||||
con, info = self.socket.accept()
|
||||
if self.ssl_context is not None:
|
||||
con = _SSLConnectionFix(con)
|
||||
return con, info
|
||||
|
||||
|
||||
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
|
||||
"""A WSGI server that does threading."""
|
||||
multithread = True
|
||||
|
||||
|
||||
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
|
||||
"""A WSGI server that does forking."""
|
||||
multiprocess = True
|
||||
|
||||
def __init__(self, host, port, app, processes=40, handler=None,
|
||||
passthrough_errors=False, ssl_context=None):
|
||||
BaseWSGIServer.__init__(self, host, port, app, handler,
|
||||
passthrough_errors, ssl_context)
|
||||
self.max_children = processes
|
||||
|
||||
|
||||
def make_server(host, port, app=None, threaded=False, processes=1,
|
||||
request_handler=None, passthrough_errors=False,
|
||||
ssl_context=None):
|
||||
"""Create a new server instance that is either threaded, or forks
|
||||
or just processes one request after another.
|
||||
"""
|
||||
if threaded and processes > 1:
|
||||
raise ValueError("cannot have a multithreaded and "
|
||||
"multi process server.")
|
||||
elif threaded:
|
||||
return ThreadedWSGIServer(host, port, app, request_handler,
|
||||
passthrough_errors, ssl_context)
|
||||
elif processes > 1:
|
||||
return ForkingWSGIServer(host, port, app, processes, request_handler,
|
||||
passthrough_errors, ssl_context)
|
||||
else:
|
||||
return BaseWSGIServer(host, port, app, request_handler,
|
||||
passthrough_errors, ssl_context)
|
||||
|
||||
|
||||
def _iter_module_files():
|
||||
# The list call is necessary on Python 3 in case the module
|
||||
# dictionary modifies during iteration.
|
||||
for module in list(sys.modules.values()):
|
||||
filename = getattr(module, '__file__', None)
|
||||
if filename:
|
||||
old = None
|
||||
while not os.path.isfile(filename):
|
||||
old = filename
|
||||
filename = os.path.dirname(filename)
|
||||
if filename == old:
|
||||
break
|
||||
else:
|
||||
if filename[-4:] in ('.pyc', '.pyo'):
|
||||
filename = filename[:-1]
|
||||
yield filename
|
||||
|
||||
|
||||
def _reloader_stat_loop(extra_files=None, interval=1):
|
||||
"""When this function is run from the main thread, it will force other
|
||||
threads to exit when any modules currently loaded change.
|
||||
|
||||
Copyright notice. This function is based on the autoreload.py from
|
||||
the CherryPy trac which originated from WSGIKit which is now dead.
|
||||
|
||||
:param extra_files: a list of additional files it should watch.
|
||||
"""
|
||||
from itertools import chain
|
||||
mtimes = {}
|
||||
while 1:
|
||||
for filename in chain(_iter_module_files(), extra_files or ()):
|
||||
try:
|
||||
mtime = os.stat(filename).st_mtime
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
old_time = mtimes.get(filename)
|
||||
if old_time is None:
|
||||
mtimes[filename] = mtime
|
||||
continue
|
||||
elif mtime > old_time:
|
||||
_log('info', ' * Detected change in %r, reloading' % filename)
|
||||
sys.exit(3)
|
||||
time.sleep(interval)
|
||||
|
||||
|
||||
def _reloader_inotify(extra_files=None, interval=None):
|
||||
# Mutated by inotify loop when changes occur.
|
||||
changed = [False]
|
||||
|
||||
# Setup inotify watches
|
||||
from pyinotify import WatchManager, Notifier
|
||||
|
||||
# this API changed at one point, support both
|
||||
try:
|
||||
from pyinotify import EventsCodes as ec
|
||||
ec.IN_ATTRIB
|
||||
except (ImportError, AttributeError):
|
||||
import pyinotify as ec
|
||||
|
||||
wm = WatchManager()
|
||||
mask = ec.IN_DELETE_SELF | ec.IN_MOVE_SELF | ec.IN_MODIFY | ec.IN_ATTRIB
|
||||
|
||||
def signal_changed(event):
|
||||
if changed[0]:
|
||||
return
|
||||
_log('info', ' * Detected change in %r, reloading' % event.path)
|
||||
changed[:] = [True]
|
||||
|
||||
for fname in extra_files or ():
|
||||
wm.add_watch(fname, mask, signal_changed)
|
||||
|
||||
# ... And now we wait...
|
||||
notif = Notifier(wm)
|
||||
try:
|
||||
while not changed[0]:
|
||||
# always reiterate through sys.modules, adding them
|
||||
for fname in _iter_module_files():
|
||||
wm.add_watch(fname, mask, signal_changed)
|
||||
notif.process_events()
|
||||
if notif.check_events(timeout=interval):
|
||||
notif.read_events()
|
||||
# TODO Set timeout to something small and check parent liveliness
|
||||
finally:
|
||||
notif.stop()
|
||||
sys.exit(3)
|
||||
|
||||
|
||||
# currently we always use the stat loop reloader for the simple reason
|
||||
# that the inotify one does not respond to added files properly. Also
|
||||
# it's quite buggy and the API is a mess.
|
||||
reloader_loop = _reloader_stat_loop
|
||||
|
||||
|
||||
def restart_with_reloader():
|
||||
"""Spawn a new Python interpreter with the same arguments as this one,
|
||||
but running the reloader thread.
|
||||
"""
|
||||
while 1:
|
||||
_log('info', ' * Restarting with reloader')
|
||||
args = [sys.executable] + sys.argv
|
||||
new_environ = os.environ.copy()
|
||||
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
|
||||
|
||||
# a weird bug on windows. sometimes unicode strings end up in the
|
||||
# environment and subprocess.call does not like this, encode them
|
||||
# to latin1 and continue.
|
||||
if os.name == 'nt' and PY2:
|
||||
for key, value in iteritems(new_environ):
|
||||
if isinstance(value, text_type):
|
||||
new_environ[key] = value.encode('iso-8859-1')
|
||||
|
||||
exit_code = subprocess.call(args, env=new_environ)
|
||||
if exit_code != 3:
|
||||
return exit_code
|
||||
|
||||
|
||||
def run_with_reloader(main_func, extra_files=None, interval=1):
|
||||
"""Run the given function in an independent python interpreter."""
|
||||
import signal
|
||||
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
|
||||
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
|
||||
thread.start_new_thread(main_func, ())
|
||||
try:
|
||||
reloader_loop(extra_files, interval)
|
||||
except KeyboardInterrupt:
|
||||
return
|
||||
try:
|
||||
sys.exit(restart_with_reloader())
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
def run_simple(hostname, port, application, use_reloader=False,
|
||||
use_debugger=False, use_evalex=True,
|
||||
extra_files=None, reloader_interval=1, threaded=False,
|
||||
processes=1, request_handler=None, static_files=None,
|
||||
passthrough_errors=False, ssl_context=None):
|
||||
"""Start an application using wsgiref and with an optional reloader. This
|
||||
wraps `wsgiref` to fix the wrong default reporting of the multithreaded
|
||||
WSGI variable and adds optional multithreading and fork support.
|
||||
|
||||
This function has a command-line interface too::
|
||||
|
||||
python -m werkzeug.serving --help
|
||||
|
||||
.. versionadded:: 0.5
|
||||
`static_files` was added to simplify serving of static files as well
|
||||
as `passthrough_errors`.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
support for SSL was added.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
Added support for automatically loading a SSL context from certificate
|
||||
file and private key.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
Added command-line interface.
|
||||
|
||||
:param hostname: The host for the application. eg: ``'localhost'``
|
||||
:param port: The port for the server. eg: ``8080``
|
||||
:param application: the WSGI application to execute
|
||||
:param use_reloader: should the server automatically restart the python
|
||||
process if modules were changed?
|
||||
:param use_debugger: should the werkzeug debugging system be used?
|
||||
:param use_evalex: should the exception evaluation feature be enabled?
|
||||
:param extra_files: a list of files the reloader should watch
|
||||
additionally to the modules. For example configuration
|
||||
files.
|
||||
:param reloader_interval: the interval for the reloader in seconds.
|
||||
:param threaded: should the process handle each request in a separate
|
||||
thread?
|
||||
:param processes: if greater than 1 then handle each request in a new process
|
||||
up to this maximum number of concurrent processes.
|
||||
:param request_handler: optional parameter that can be used to replace
|
||||
the default one. You can use this to replace it
|
||||
with a different
|
||||
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
|
||||
subclass.
|
||||
:param static_files: a dict of paths for static files. This works exactly
|
||||
like :class:`SharedDataMiddleware`, it's actually
|
||||
just wrapping the application in that middleware before
|
||||
serving.
|
||||
:param passthrough_errors: set this to `True` to disable the error catching.
|
||||
This means that the server will die on errors but
|
||||
it can be useful to hook debuggers in (pdb etc.)
|
||||
:param ssl_context: an SSL context for the connection. Either an OpenSSL
|
||||
context, a tuple in the form ``(cert_file, pkey_file)``,
|
||||
the string ``'adhoc'`` if the server should
|
||||
automatically create one, or `None` to disable SSL
|
||||
(which is the default).
|
||||
"""
|
||||
if use_debugger:
|
||||
from werkzeug.debug import DebuggedApplication
|
||||
application = DebuggedApplication(application, use_evalex)
|
||||
if static_files:
|
||||
from werkzeug.wsgi import SharedDataMiddleware
|
||||
application = SharedDataMiddleware(application, static_files)
|
||||
|
||||
def inner():
|
||||
make_server(hostname, port, application, threaded,
|
||||
processes, request_handler,
|
||||
passthrough_errors, ssl_context).serve_forever()
|
||||
|
||||
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
|
||||
display_hostname = hostname != '*' and hostname or 'localhost'
|
||||
if ':' in display_hostname:
|
||||
display_hostname = '[%s]' % display_hostname
|
||||
_log('info', ' * Running on %s://%s:%d/', ssl_context is None
|
||||
and 'http' or 'https', display_hostname, port)
|
||||
if use_reloader:
|
||||
# Create and destroy a socket so that any exceptions are raised before
|
||||
# we spawn a separate Python interpreter and lose this ability.
|
||||
address_family = select_ip_version(hostname, port)
|
||||
test_socket = socket.socket(address_family, socket.SOCK_STREAM)
|
||||
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
test_socket.bind((hostname, port))
|
||||
test_socket.close()
|
||||
run_with_reloader(inner, extra_files, reloader_interval)
|
||||
else:
|
||||
inner()
|
||||
|
||||
def main():
|
||||
'''A simple command-line interface for :py:func:`run_simple`.'''
|
||||
|
||||
# in contrast to argparse, this works at least under Python < 2.7
|
||||
import optparse
|
||||
from werkzeug.utils import import_string
|
||||
|
||||
parser = optparse.OptionParser(usage='Usage: %prog [options] app_module:app_object')
|
||||
parser.add_option('-b', '--bind', dest='address',
|
||||
help='The hostname:port the app should listen on.')
|
||||
parser.add_option('-d', '--debug', dest='use_debugger',
|
||||
action='store_true', default=False,
|
||||
help='Use Werkzeug\'s debugger.')
|
||||
parser.add_option('-r', '--reload', dest='use_reloader',
|
||||
action='store_true', default=False,
|
||||
help='Reload Python process if modules change.')
|
||||
options, args = parser.parse_args()
|
||||
|
||||
hostname, port = None, None
|
||||
if options.address:
|
||||
address = options.address.split(':')
|
||||
hostname = address[0]
|
||||
if len(address) > 1:
|
||||
port = address[1]
|
||||
|
||||
if len(args) != 1:
|
||||
sys.stdout.write('No application supplied, or too much. See --help\n')
|
||||
sys.exit(1)
|
||||
app = import_string(args[0])
|
||||
|
||||
run_simple(
|
||||
hostname=(hostname or '127.0.0.1'), port=int(port or 5000),
|
||||
application=app, use_reloader=options.use_reloader,
|
||||
use_debugger=options.use_debugger
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
879
Linux_x86_64/lib/python2.7/site-packages/werkzeug/test.py
Normal file
|
|
@ -0,0 +1,879 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.test
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
This module implements a client to WSGI applications for testing.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import sys
|
||||
import mimetypes
|
||||
from time import time
|
||||
from random import random
|
||||
from itertools import chain
|
||||
from tempfile import TemporaryFile
|
||||
from io import BytesIO
|
||||
|
||||
try:
|
||||
from urllib2 import Request as U2Request
|
||||
except ImportError:
|
||||
from urllib.request import Request as U2Request
|
||||
try:
|
||||
from http.cookiejar import CookieJar
|
||||
except ImportError: # Py2
|
||||
from cookielib import CookieJar
|
||||
|
||||
from werkzeug._compat import iterlists, iteritems, itervalues, to_native, \
|
||||
string_types, text_type, reraise, wsgi_encoding_dance, \
|
||||
make_literal_wrapper
|
||||
from werkzeug._internal import _empty_stream, _get_environ
|
||||
from werkzeug.wrappers import BaseRequest
|
||||
from werkzeug.urls import url_encode, url_fix, iri_to_uri, url_unquote, \
|
||||
url_unparse, url_parse
|
||||
from werkzeug.wsgi import get_host, get_current_url, ClosingIterator
|
||||
from werkzeug.utils import dump_cookie
|
||||
from werkzeug.datastructures import FileMultiDict, MultiDict, \
|
||||
CombinedMultiDict, Headers, FileStorage
|
||||
|
||||
|
||||
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
|
||||
boundary=None, charset='utf-8'):
|
||||
"""Encode a dict of values (either strings or file descriptors or
|
||||
:class:`FileStorage` objects.) into a multipart encoded string stored
|
||||
in a file descriptor.
|
||||
"""
|
||||
if boundary is None:
|
||||
boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
|
||||
_closure = [BytesIO(), 0, False]
|
||||
|
||||
if use_tempfile:
|
||||
def write_binary(string):
|
||||
stream, total_length, on_disk = _closure
|
||||
if on_disk:
|
||||
stream.write(string)
|
||||
else:
|
||||
length = len(string)
|
||||
if length + _closure[1] <= threshold:
|
||||
stream.write(string)
|
||||
else:
|
||||
new_stream = TemporaryFile('wb+')
|
||||
new_stream.write(stream.getvalue())
|
||||
new_stream.write(string)
|
||||
_closure[0] = new_stream
|
||||
_closure[2] = True
|
||||
_closure[1] = total_length + length
|
||||
else:
|
||||
write_binary = _closure[0].write
|
||||
|
||||
def write(string):
|
||||
write_binary(string.encode(charset))
|
||||
|
||||
if not isinstance(values, MultiDict):
|
||||
values = MultiDict(values)
|
||||
|
||||
for key, values in iterlists(values):
|
||||
for value in values:
|
||||
write('--%s\r\nContent-Disposition: form-data; name="%s"' %
|
||||
(boundary, key))
|
||||
reader = getattr(value, 'read', None)
|
||||
if reader is not None:
|
||||
filename = getattr(value, 'filename',
|
||||
getattr(value, 'name', None))
|
||||
content_type = getattr(value, 'content_type', None)
|
||||
if content_type is None:
|
||||
content_type = filename and \
|
||||
mimetypes.guess_type(filename)[0] or \
|
||||
'application/octet-stream'
|
||||
if filename is not None:
|
||||
write('; filename="%s"\r\n' % filename)
|
||||
else:
|
||||
write('\r\n')
|
||||
write('Content-Type: %s\r\n\r\n' % content_type)
|
||||
while 1:
|
||||
chunk = reader(16384)
|
||||
if not chunk:
|
||||
break
|
||||
write_binary(chunk)
|
||||
else:
|
||||
if isinstance(value, string_types):
|
||||
value = to_native(value, charset)
|
||||
else:
|
||||
value = str(value)
|
||||
write('\r\n\r\n' + value)
|
||||
write('\r\n')
|
||||
write('--%s--\r\n' % boundary)
|
||||
|
||||
length = int(_closure[0].tell())
|
||||
_closure[0].seek(0)
|
||||
return _closure[0], length, boundary
|
||||
|
||||
|
||||
def encode_multipart(values, boundary=None, charset='utf-8'):
|
||||
"""Like `stream_encode_multipart` but returns a tuple in the form
|
||||
(``boundary``, ``data``) where data is a bytestring.
|
||||
"""
|
||||
stream, length, boundary = stream_encode_multipart(
|
||||
values, use_tempfile=False, boundary=boundary, charset=charset)
|
||||
return boundary, stream.read()
|
||||
|
||||
|
||||
def File(fd, filename=None, mimetype=None):
|
||||
"""Backwards compat."""
|
||||
from warnings import warn
|
||||
warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
|
||||
'EnvironBuilder or FileStorage instead'))
|
||||
return FileStorage(fd, filename=filename, content_type=mimetype)
|
||||
|
||||
|
||||
class _TestCookieHeaders(object):
|
||||
"""A headers adapter for cookielib
|
||||
"""
|
||||
|
||||
def __init__(self, headers):
|
||||
self.headers = headers
|
||||
|
||||
def getheaders(self, name):
|
||||
headers = []
|
||||
name = name.lower()
|
||||
for k, v in self.headers:
|
||||
if k.lower() == name:
|
||||
headers.append(v)
|
||||
return headers
|
||||
|
||||
def get_all(self, name, default=None):
|
||||
rv = []
|
||||
for k, v in self.headers:
|
||||
if k.lower() == name.lower():
|
||||
rv.append(v)
|
||||
return rv or default or []
|
||||
|
||||
|
||||
class _TestCookieResponse(object):
|
||||
"""Something that looks like a httplib.HTTPResponse, but is actually just an
|
||||
adapter for our test responses to make them available for cookielib.
|
||||
"""
|
||||
|
||||
def __init__(self, headers):
|
||||
self.headers = _TestCookieHeaders(headers)
|
||||
|
||||
def info(self):
|
||||
return self.headers
|
||||
|
||||
|
||||
class _TestCookieJar(CookieJar):
|
||||
"""A cookielib.CookieJar modified to inject and read cookie headers from
|
||||
and to wsgi environments, and wsgi application responses.
|
||||
"""
|
||||
|
||||
def inject_wsgi(self, environ):
|
||||
"""Inject the cookies as client headers into the server's wsgi
|
||||
environment.
|
||||
"""
|
||||
cvals = []
|
||||
for cookie in self:
|
||||
cvals.append('%s=%s' % (cookie.name, cookie.value))
|
||||
if cvals:
|
||||
environ['HTTP_COOKIE'] = '; '.join(cvals)
|
||||
|
||||
def extract_wsgi(self, environ, headers):
|
||||
"""Extract the server's set-cookie headers as cookies into the
|
||||
cookie jar.
|
||||
"""
|
||||
self.extract_cookies(
|
||||
_TestCookieResponse(headers),
|
||||
U2Request(get_current_url(environ)),
|
||||
)
|
||||
|
||||
|
||||
def _iter_data(data):
|
||||
"""Iterates over a dict or multidict yielding all keys and values.
|
||||
This is used to iterate over the data passed to the
|
||||
:class:`EnvironBuilder`.
|
||||
"""
|
||||
if isinstance(data, MultiDict):
|
||||
for key, values in iterlists(data):
|
||||
for value in values:
|
||||
yield key, value
|
||||
else:
|
||||
for key, values in iteritems(data):
|
||||
if isinstance(values, list):
|
||||
for value in values:
|
||||
yield key, value
|
||||
else:
|
||||
yield key, values
|
||||
|
||||
|
||||
class EnvironBuilder(object):
|
||||
"""This class can be used to conveniently create a WSGI environment
|
||||
for testing purposes. It can be used to quickly create WSGI environments
|
||||
or request objects from arbitrary data.
|
||||
|
||||
The signature of this class is also used in some other places as of
|
||||
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
|
||||
:meth:`Client.open`). Because of this most of the functionality is
|
||||
available through the constructor alone.
|
||||
|
||||
Files and regular form data can be manipulated independently of each
|
||||
other with the :attr:`form` and :attr:`files` attributes, but are
|
||||
passed with the same argument to the constructor: `data`.
|
||||
|
||||
`data` can be any of these values:
|
||||
|
||||
- a `str`: If it's a string it is converted into a :attr:`input_stream`,
|
||||
the :attr:`content_length` is set and you have to provide a
|
||||
:attr:`content_type`.
|
||||
- a `dict`: If it's a dict the keys have to be strings and the values
|
||||
any of the following objects:
|
||||
|
||||
- a :class:`file`-like object. These are converted into
|
||||
:class:`FileStorage` objects automatically.
|
||||
- a tuple. The :meth:`~FileMultiDict.add_file` method is called
|
||||
with the tuple items as positional arguments.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
`path` and `base_url` can now be unicode strings that are encoded using
|
||||
the :func:`iri_to_uri` function.
|
||||
|
||||
:param path: the path of the request. In the WSGI environment this will
|
||||
end up as `PATH_INFO`. If the `query_string` is not defined
|
||||
and there is a question mark in the `path` everything after
|
||||
it is used as query string.
|
||||
:param base_url: the base URL is a URL that is used to extract the WSGI
|
||||
URL scheme, host (server name + server port) and the
|
||||
script root (`SCRIPT_NAME`).
|
||||
:param query_string: an optional string or dict with URL parameters.
|
||||
:param method: the HTTP method to use, defaults to `GET`.
|
||||
:param input_stream: an optional input stream. Do not specify this and
|
||||
`data`. As soon as an input stream is set you can't
|
||||
modify :attr:`args` and :attr:`files` unless you
|
||||
set the :attr:`input_stream` to `None` again.
|
||||
:param content_type: The content type for the request. As of 0.5 you
|
||||
don't have to provide this when specifying files
|
||||
and form data via `data`.
|
||||
:param content_length: The content length for the request. You don't
|
||||
have to specify this when providing data via
|
||||
`data`.
|
||||
:param errors_stream: an optional error stream that is used for
|
||||
`wsgi.errors`. Defaults to :data:`stderr`.
|
||||
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
|
||||
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
|
||||
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
|
||||
:param headers: an optional list or :class:`Headers` object of headers.
|
||||
:param data: a string or dict of form data. See explanation above.
|
||||
:param environ_base: an optional dict of environment defaults.
|
||||
:param environ_overrides: an optional dict of environment overrides.
|
||||
:param charset: the charset used to encode unicode data.
|
||||
"""
|
||||
|
||||
#: the server protocol to use. defaults to HTTP/1.1
|
||||
server_protocol = 'HTTP/1.1'
|
||||
|
||||
#: the wsgi version to use. defaults to (1, 0)
|
||||
wsgi_version = (1, 0)
|
||||
|
||||
#: the default request class for :meth:`get_request`
|
||||
request_class = BaseRequest
|
||||
|
||||
def __init__(self, path='/', base_url=None, query_string=None,
|
||||
method='GET', input_stream=None, content_type=None,
|
||||
content_length=None, errors_stream=None, multithread=False,
|
||||
multiprocess=False, run_once=False, headers=None, data=None,
|
||||
environ_base=None, environ_overrides=None, charset='utf-8'):
|
||||
path_s = make_literal_wrapper(path)
|
||||
if query_string is None and path_s('?') in path:
|
||||
path, query_string = path.split(path_s('?'), 1)
|
||||
self.charset = charset
|
||||
self.path = iri_to_uri(path)
|
||||
if base_url is not None:
|
||||
base_url = url_fix(iri_to_uri(base_url, charset), charset)
|
||||
self.base_url = base_url
|
||||
if isinstance(query_string, (bytes, text_type)):
|
||||
self.query_string = query_string
|
||||
else:
|
||||
if query_string is None:
|
||||
query_string = MultiDict()
|
||||
elif not isinstance(query_string, MultiDict):
|
||||
query_string = MultiDict(query_string)
|
||||
self.args = query_string
|
||||
self.method = method
|
||||
if headers is None:
|
||||
headers = Headers()
|
||||
elif not isinstance(headers, Headers):
|
||||
headers = Headers(headers)
|
||||
self.headers = headers
|
||||
if content_type is not None:
|
||||
self.content_type = content_type
|
||||
if errors_stream is None:
|
||||
errors_stream = sys.stderr
|
||||
self.errors_stream = errors_stream
|
||||
self.multithread = multithread
|
||||
self.multiprocess = multiprocess
|
||||
self.run_once = run_once
|
||||
self.environ_base = environ_base
|
||||
self.environ_overrides = environ_overrides
|
||||
self.input_stream = input_stream
|
||||
self.content_length = content_length
|
||||
self.closed = False
|
||||
|
||||
if data:
|
||||
if input_stream is not None:
|
||||
raise TypeError('can\'t provide input stream and data')
|
||||
if isinstance(data, text_type):
|
||||
data = data.encode(self.charset)
|
||||
if isinstance(data, bytes):
|
||||
self.input_stream = BytesIO(data)
|
||||
if self.content_length is None:
|
||||
self.content_length = len(data)
|
||||
else:
|
||||
for key, value in _iter_data(data):
|
||||
if isinstance(value, (tuple, dict)) or \
|
||||
hasattr(value, 'read'):
|
||||
self._add_file_from_data(key, value)
|
||||
else:
|
||||
self.form.setlistdefault(key).append(value)
|
||||
|
||||
def _add_file_from_data(self, key, value):
|
||||
"""Called in the EnvironBuilder to add files from the data dict."""
|
||||
if isinstance(value, tuple):
|
||||
self.files.add_file(key, *value)
|
||||
elif isinstance(value, dict):
|
||||
from warnings import warn
|
||||
warn(DeprecationWarning('it\'s no longer possible to pass dicts '
|
||||
'as `data`. Use tuples or FileStorage '
|
||||
'objects instead'), stacklevel=2)
|
||||
value = dict(value)
|
||||
mimetype = value.pop('mimetype', None)
|
||||
if mimetype is not None:
|
||||
value['content_type'] = mimetype
|
||||
self.files.add_file(key, **value)
|
||||
else:
|
||||
self.files.add_file(key, value)
|
||||
|
||||
def _get_base_url(self):
|
||||
return url_unparse((self.url_scheme, self.host,
|
||||
self.script_root, '', '')).rstrip('/') + '/'
|
||||
|
||||
def _set_base_url(self, value):
|
||||
if value is None:
|
||||
scheme = 'http'
|
||||
netloc = 'localhost'
|
||||
script_root = ''
|
||||
else:
|
||||
scheme, netloc, script_root, qs, anchor = url_parse(value)
|
||||
if qs or anchor:
|
||||
raise ValueError('base url must not contain a query string '
|
||||
'or fragment')
|
||||
self.script_root = script_root.rstrip('/')
|
||||
self.host = netloc
|
||||
self.url_scheme = scheme
|
||||
|
||||
base_url = property(_get_base_url, _set_base_url, doc='''
|
||||
The base URL is a URL that is used to extract the WSGI
|
||||
URL scheme, host (server name + server port) and the
|
||||
script root (`SCRIPT_NAME`).''')
|
||||
del _get_base_url, _set_base_url
|
||||
|
||||
def _get_content_type(self):
|
||||
ct = self.headers.get('Content-Type')
|
||||
if ct is None and not self._input_stream:
|
||||
if self.method in ('POST', 'PUT', 'PATCH'):
|
||||
if self._files:
|
||||
return 'multipart/form-data'
|
||||
return 'application/x-www-form-urlencoded'
|
||||
return None
|
||||
return ct
|
||||
|
||||
def _set_content_type(self, value):
|
||||
if value is None:
|
||||
self.headers.pop('Content-Type', None)
|
||||
else:
|
||||
self.headers['Content-Type'] = value
|
||||
|
||||
content_type = property(_get_content_type, _set_content_type, doc='''
|
||||
The content type for the request. Reflected from and to the
|
||||
:attr:`headers`. Do not set if you set :attr:`files` or
|
||||
:attr:`form` for auto detection.''')
|
||||
del _get_content_type, _set_content_type
|
||||
|
||||
def _get_content_length(self):
|
||||
return self.headers.get('Content-Length', type=int)
|
||||
|
||||
def _set_content_length(self, value):
|
||||
if value is None:
|
||||
self.headers.pop('Content-Length', None)
|
||||
else:
|
||||
self.headers['Content-Length'] = str(value)
|
||||
|
||||
content_length = property(_get_content_length, _set_content_length, doc='''
|
||||
The content length as integer. Reflected from and to the
|
||||
:attr:`headers`. Do not set if you set :attr:`files` or
|
||||
:attr:`form` for auto detection.''')
|
||||
del _get_content_length, _set_content_length
|
||||
|
||||
def form_property(name, storage, doc):
|
||||
key = '_' + name
|
||||
def getter(self):
|
||||
if self._input_stream is not None:
|
||||
raise AttributeError('an input stream is defined')
|
||||
rv = getattr(self, key)
|
||||
if rv is None:
|
||||
rv = storage()
|
||||
setattr(self, key, rv)
|
||||
return rv
|
||||
def setter(self, value):
|
||||
self._input_stream = None
|
||||
setattr(self, key, value)
|
||||
return property(getter, setter, doc)
|
||||
|
||||
form = form_property('form', MultiDict, doc='''
|
||||
A :class:`MultiDict` of form values.''')
|
||||
files = form_property('files', FileMultiDict, doc='''
|
||||
A :class:`FileMultiDict` of uploaded files. You can use the
|
||||
:meth:`~FileMultiDict.add_file` method to add new files to the
|
||||
dict.''')
|
||||
del form_property
|
||||
|
||||
def _get_input_stream(self):
|
||||
return self._input_stream
|
||||
|
||||
def _set_input_stream(self, value):
|
||||
self._input_stream = value
|
||||
self._form = self._files = None
|
||||
|
||||
input_stream = property(_get_input_stream, _set_input_stream, doc='''
|
||||
An optional input stream. If you set this it will clear
|
||||
:attr:`form` and :attr:`files`.''')
|
||||
del _get_input_stream, _set_input_stream
|
||||
|
||||
def _get_query_string(self):
|
||||
if self._query_string is None:
|
||||
if self._args is not None:
|
||||
return url_encode(self._args, charset=self.charset)
|
||||
return ''
|
||||
return self._query_string
|
||||
|
||||
def _set_query_string(self, value):
|
||||
self._query_string = value
|
||||
self._args = None
|
||||
|
||||
query_string = property(_get_query_string, _set_query_string, doc='''
|
||||
The query string. If you set this to a string :attr:`args` will
|
||||
no longer be available.''')
|
||||
del _get_query_string, _set_query_string
|
||||
|
||||
def _get_args(self):
|
||||
if self._query_string is not None:
|
||||
raise AttributeError('a query string is defined')
|
||||
if self._args is None:
|
||||
self._args = MultiDict()
|
||||
return self._args
|
||||
|
||||
def _set_args(self, value):
|
||||
self._query_string = None
|
||||
self._args = value
|
||||
|
||||
args = property(_get_args, _set_args, doc='''
|
||||
The URL arguments as :class:`MultiDict`.''')
|
||||
del _get_args, _set_args
|
||||
|
||||
@property
|
||||
def server_name(self):
|
||||
"""The server name (read-only, use :attr:`host` to set)"""
|
||||
return self.host.split(':', 1)[0]
|
||||
|
||||
@property
|
||||
def server_port(self):
|
||||
"""The server port as integer (read-only, use :attr:`host` to set)"""
|
||||
pieces = self.host.split(':', 1)
|
||||
if len(pieces) == 2 and pieces[1].isdigit():
|
||||
return int(pieces[1])
|
||||
elif self.url_scheme == 'https':
|
||||
return 443
|
||||
return 80
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
self.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
"""Closes all files. If you put real :class:`file` objects into the
|
||||
:attr:`files` dict you can call this method to automatically close
|
||||
them all in one go.
|
||||
"""
|
||||
if self.closed:
|
||||
return
|
||||
try:
|
||||
files = itervalues(self.files)
|
||||
except AttributeError:
|
||||
files = ()
|
||||
for f in files:
|
||||
try:
|
||||
f.close()
|
||||
except Exception:
|
||||
pass
|
||||
self.closed = True
|
||||
|
||||
def get_environ(self):
|
||||
"""Return the built environ."""
|
||||
input_stream = self.input_stream
|
||||
content_length = self.content_length
|
||||
content_type = self.content_type
|
||||
|
||||
if input_stream is not None:
|
||||
start_pos = input_stream.tell()
|
||||
input_stream.seek(0, 2)
|
||||
end_pos = input_stream.tell()
|
||||
input_stream.seek(start_pos)
|
||||
content_length = end_pos - start_pos
|
||||
elif content_type == 'multipart/form-data':
|
||||
values = CombinedMultiDict([self.form, self.files])
|
||||
input_stream, content_length, boundary = \
|
||||
stream_encode_multipart(values, charset=self.charset)
|
||||
content_type += '; boundary="%s"' % boundary
|
||||
elif content_type == 'application/x-www-form-urlencoded':
|
||||
#py2v3 review
|
||||
values = url_encode(self.form, charset=self.charset)
|
||||
values = values.encode('ascii')
|
||||
content_length = len(values)
|
||||
input_stream = BytesIO(values)
|
||||
else:
|
||||
input_stream = _empty_stream
|
||||
|
||||
result = {}
|
||||
if self.environ_base:
|
||||
result.update(self.environ_base)
|
||||
|
||||
def _path_encode(x):
|
||||
return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
|
||||
|
||||
qs = wsgi_encoding_dance(self.query_string)
|
||||
|
||||
result.update({
|
||||
'REQUEST_METHOD': self.method,
|
||||
'SCRIPT_NAME': _path_encode(self.script_root),
|
||||
'PATH_INFO': _path_encode(self.path),
|
||||
'QUERY_STRING': qs,
|
||||
'SERVER_NAME': self.server_name,
|
||||
'SERVER_PORT': str(self.server_port),
|
||||
'HTTP_HOST': self.host,
|
||||
'SERVER_PROTOCOL': self.server_protocol,
|
||||
'CONTENT_TYPE': content_type or '',
|
||||
'CONTENT_LENGTH': str(content_length or '0'),
|
||||
'wsgi.version': self.wsgi_version,
|
||||
'wsgi.url_scheme': self.url_scheme,
|
||||
'wsgi.input': input_stream,
|
||||
'wsgi.errors': self.errors_stream,
|
||||
'wsgi.multithread': self.multithread,
|
||||
'wsgi.multiprocess': self.multiprocess,
|
||||
'wsgi.run_once': self.run_once
|
||||
})
|
||||
for key, value in self.headers.to_wsgi_list():
|
||||
result['HTTP_%s' % key.upper().replace('-', '_')] = value
|
||||
if self.environ_overrides:
|
||||
result.update(self.environ_overrides)
|
||||
return result
|
||||
|
||||
def get_request(self, cls=None):
|
||||
"""Returns a request with the data. If the request class is not
|
||||
specified :attr:`request_class` is used.
|
||||
|
||||
:param cls: The request wrapper to use.
|
||||
"""
|
||||
if cls is None:
|
||||
cls = self.request_class
|
||||
return cls(self.get_environ())
|
||||
|
||||
|
||||
class ClientRedirectError(Exception):
|
||||
"""
|
||||
If a redirect loop is detected when using follow_redirects=True with
|
||||
the :cls:`Client`, then this exception is raised.
|
||||
"""
|
||||
|
||||
|
||||
class Client(object):
|
||||
"""This class allows to send requests to a wrapped application.
|
||||
|
||||
The response wrapper can be a class or factory function that takes
|
||||
three arguments: app_iter, status and headers. The default response
|
||||
wrapper just returns a tuple.
|
||||
|
||||
Example::
|
||||
|
||||
class ClientResponse(BaseResponse):
|
||||
...
|
||||
|
||||
client = Client(MyApplication(), response_wrapper=ClientResponse)
|
||||
|
||||
The use_cookies parameter indicates whether cookies should be stored and
|
||||
sent for subsequent requests. This is True by default, but passing False
|
||||
will disable this behaviour.
|
||||
|
||||
If you want to request some subdomain of your application you may set
|
||||
`allow_subdomain_redirects` to `True` as if not no external redirects
|
||||
are allowed.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
`use_cookies` is new in this version. Older versions did not provide
|
||||
builtin cookie support.
|
||||
"""
|
||||
|
||||
def __init__(self, application, response_wrapper=None, use_cookies=True,
|
||||
allow_subdomain_redirects=False):
|
||||
self.application = application
|
||||
self.response_wrapper = response_wrapper
|
||||
if use_cookies:
|
||||
self.cookie_jar = _TestCookieJar()
|
||||
else:
|
||||
self.cookie_jar = None
|
||||
self.allow_subdomain_redirects = allow_subdomain_redirects
|
||||
|
||||
def set_cookie(self, server_name, key, value='', max_age=None,
|
||||
expires=None, path='/', domain=None, secure=None,
|
||||
httponly=False, charset='utf-8'):
|
||||
"""Sets a cookie in the client's cookie jar. The server name
|
||||
is required and has to match the one that is also passed to
|
||||
the open call.
|
||||
"""
|
||||
assert self.cookie_jar is not None, 'cookies disabled'
|
||||
header = dump_cookie(key, value, max_age, expires, path, domain,
|
||||
secure, httponly, charset)
|
||||
environ = create_environ(path, base_url='http://' + server_name)
|
||||
headers = [('Set-Cookie', header)]
|
||||
self.cookie_jar.extract_wsgi(environ, headers)
|
||||
|
||||
def delete_cookie(self, server_name, key, path='/', domain=None):
|
||||
"""Deletes a cookie in the test client."""
|
||||
self.set_cookie(server_name, key, expires=0, max_age=0,
|
||||
path=path, domain=domain)
|
||||
|
||||
def run_wsgi_app(self, environ, buffered=False):
|
||||
"""Runs the wrapped WSGI app with the given environment."""
|
||||
if self.cookie_jar is not None:
|
||||
self.cookie_jar.inject_wsgi(environ)
|
||||
rv = run_wsgi_app(self.application, environ, buffered=buffered)
|
||||
if self.cookie_jar is not None:
|
||||
self.cookie_jar.extract_wsgi(environ, rv[2])
|
||||
return rv
|
||||
|
||||
def resolve_redirect(self, response, new_location, environ, buffered=False):
|
||||
"""Resolves a single redirect and triggers the request again
|
||||
directly on this redirect client.
|
||||
"""
|
||||
scheme, netloc, script_root, qs, anchor = url_parse(new_location)
|
||||
base_url = url_unparse((scheme, netloc, '', '', '')).rstrip('/') + '/'
|
||||
|
||||
cur_server_name = netloc.split(':', 1)[0].split('.')
|
||||
real_server_name = get_host(environ).rsplit(':', 1)[0].split('.')
|
||||
|
||||
if self.allow_subdomain_redirects:
|
||||
allowed = cur_server_name[-len(real_server_name):] == real_server_name
|
||||
else:
|
||||
allowed = cur_server_name == real_server_name
|
||||
|
||||
if not allowed:
|
||||
raise RuntimeError('%r does not support redirect to '
|
||||
'external targets' % self.__class__)
|
||||
|
||||
# For redirect handling we temporarily disable the response
|
||||
# wrapper. This is not threadsafe but not a real concern
|
||||
# since the test client must not be shared anyways.
|
||||
old_response_wrapper = self.response_wrapper
|
||||
self.response_wrapper = None
|
||||
try:
|
||||
return self.open(path=script_root, base_url=base_url,
|
||||
query_string=qs, as_tuple=True,
|
||||
buffered=buffered)
|
||||
finally:
|
||||
self.response_wrapper = old_response_wrapper
|
||||
|
||||
def open(self, *args, **kwargs):
|
||||
"""Takes the same arguments as the :class:`EnvironBuilder` class with
|
||||
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
|
||||
environment as only argument instead of the :class:`EnvironBuilder`
|
||||
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
|
||||
that change the type of the return value or the way the application is
|
||||
executed.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
If a dict is provided as file in the dict for the `data` parameter
|
||||
the content type has to be called `content_type` now instead of
|
||||
`mimetype`. This change was made for consistency with
|
||||
:class:`werkzeug.FileWrapper`.
|
||||
|
||||
The `follow_redirects` parameter was added to :func:`open`.
|
||||
|
||||
Additional parameters:
|
||||
|
||||
:param as_tuple: Returns a tuple in the form ``(environ, result)``
|
||||
:param buffered: Set this to True to buffer the application run.
|
||||
This will automatically close the application for
|
||||
you as well.
|
||||
:param follow_redirects: Set this to True if the `Client` should
|
||||
follow HTTP redirects.
|
||||
"""
|
||||
as_tuple = kwargs.pop('as_tuple', False)
|
||||
buffered = kwargs.pop('buffered', False)
|
||||
follow_redirects = kwargs.pop('follow_redirects', False)
|
||||
environ = None
|
||||
if not kwargs and len(args) == 1:
|
||||
if isinstance(args[0], EnvironBuilder):
|
||||
environ = args[0].get_environ()
|
||||
elif isinstance(args[0], dict):
|
||||
environ = args[0]
|
||||
if environ is None:
|
||||
builder = EnvironBuilder(*args, **kwargs)
|
||||
try:
|
||||
environ = builder.get_environ()
|
||||
finally:
|
||||
builder.close()
|
||||
|
||||
response = self.run_wsgi_app(environ, buffered=buffered)
|
||||
|
||||
# handle redirects
|
||||
redirect_chain = []
|
||||
while 1:
|
||||
status_code = int(response[1].split(None, 1)[0])
|
||||
if status_code not in (301, 302, 303, 305, 307) \
|
||||
or not follow_redirects:
|
||||
break
|
||||
new_location = response[2]['location']
|
||||
new_redirect_entry = (new_location, status_code)
|
||||
if new_redirect_entry in redirect_chain:
|
||||
raise ClientRedirectError('loop detected')
|
||||
redirect_chain.append(new_redirect_entry)
|
||||
environ, response = self.resolve_redirect(response, new_location,
|
||||
environ, buffered=buffered)
|
||||
|
||||
if self.response_wrapper is not None:
|
||||
response = self.response_wrapper(*response)
|
||||
if as_tuple:
|
||||
return environ, response
|
||||
return response
|
||||
|
||||
def get(self, *args, **kw):
|
||||
"""Like open but method is enforced to GET."""
|
||||
kw['method'] = 'GET'
|
||||
return self.open(*args, **kw)
|
||||
|
||||
def patch(self, *args, **kw):
|
||||
"""Like open but method is enforced to PATCH."""
|
||||
kw['method'] = 'PATCH'
|
||||
return self.open(*args, **kw)
|
||||
|
||||
def post(self, *args, **kw):
|
||||
"""Like open but method is enforced to POST."""
|
||||
kw['method'] = 'POST'
|
||||
return self.open(*args, **kw)
|
||||
|
||||
def head(self, *args, **kw):
|
||||
"""Like open but method is enforced to HEAD."""
|
||||
kw['method'] = 'HEAD'
|
||||
return self.open(*args, **kw)
|
||||
|
||||
def put(self, *args, **kw):
|
||||
"""Like open but method is enforced to PUT."""
|
||||
kw['method'] = 'PUT'
|
||||
return self.open(*args, **kw)
|
||||
|
||||
def delete(self, *args, **kw):
|
||||
"""Like open but method is enforced to DELETE."""
|
||||
kw['method'] = 'DELETE'
|
||||
return self.open(*args, **kw)
|
||||
|
||||
def options(self, *args, **kw):
|
||||
"""Like open but method is enforced to OPTIONS."""
|
||||
kw['method'] = 'OPTIONS'
|
||||
return self.open(*args, **kw)
|
||||
|
||||
def trace(self, *args, **kw):
|
||||
"""Like open but method is enforced to TRACE."""
|
||||
kw['method'] = 'TRACE'
|
||||
return self.open(*args, **kw)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %r>' % (
|
||||
self.__class__.__name__,
|
||||
self.application
|
||||
)
|
||||
|
||||
|
||||
def create_environ(*args, **kwargs):
|
||||
"""Create a new WSGI environ dict based on the values passed. The first
|
||||
parameter should be the path of the request which defaults to '/'. The
|
||||
second one can either be an absolute path (in that case the host is
|
||||
localhost:80) or a full path to the request with scheme, netloc port and
|
||||
the path to the script.
|
||||
|
||||
This accepts the same arguments as the :class:`EnvironBuilder`
|
||||
constructor.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
This function is now a thin wrapper over :class:`EnvironBuilder` which
|
||||
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
|
||||
and `charset` parameters were added.
|
||||
"""
|
||||
builder = EnvironBuilder(*args, **kwargs)
|
||||
try:
|
||||
return builder.get_environ()
|
||||
finally:
|
||||
builder.close()
|
||||
|
||||
|
||||
def run_wsgi_app(app, environ, buffered=False):
|
||||
"""Return a tuple in the form (app_iter, status, headers) of the
|
||||
application output. This works best if you pass it an application that
|
||||
returns an iterator all the time.
|
||||
|
||||
Sometimes applications may use the `write()` callable returned
|
||||
by the `start_response` function. This tries to resolve such edge
|
||||
cases automatically. But if you don't get the expected output you
|
||||
should set `buffered` to `True` which enforces buffering.
|
||||
|
||||
If passed an invalid WSGI application the behavior of this function is
|
||||
undefined. Never pass non-conforming WSGI applications to this function.
|
||||
|
||||
:param app: the application to execute.
|
||||
:param buffered: set to `True` to enforce buffering.
|
||||
:return: tuple in the form ``(app_iter, status, headers)``
|
||||
"""
|
||||
environ = _get_environ(environ)
|
||||
response = []
|
||||
buffer = []
|
||||
|
||||
def start_response(status, headers, exc_info=None):
|
||||
if exc_info is not None:
|
||||
reraise(*exc_info)
|
||||
response[:] = [status, headers]
|
||||
return buffer.append
|
||||
|
||||
app_iter = app(environ, start_response)
|
||||
|
||||
# when buffering we emit the close call early and convert the
|
||||
# application iterator into a regular list
|
||||
if buffered:
|
||||
close_func = getattr(app_iter, 'close', None)
|
||||
try:
|
||||
app_iter = list(app_iter)
|
||||
finally:
|
||||
if close_func is not None:
|
||||
close_func()
|
||||
|
||||
# otherwise we iterate the application iter until we have
|
||||
# a response, chain the already received data with the already
|
||||
# collected data and wrap it in a new `ClosingIterator` if
|
||||
# we have a close callable.
|
||||
else:
|
||||
while not response:
|
||||
buffer.append(next(app_iter))
|
||||
if buffer:
|
||||
close_func = getattr(app_iter, 'close', None)
|
||||
app_iter = chain(buffer, app_iter)
|
||||
if close_func is not None:
|
||||
app_iter = ClosingIterator(app_iter, close_func)
|
||||
|
||||
return app_iter, response[0], Headers(response[1])
|
||||
230
Linux_x86_64/lib/python2.7/site-packages/werkzeug/testapp.py
Normal file
|
|
@ -0,0 +1,230 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testapp
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Provide a small test application that can be used to test a WSGI server
|
||||
and check it for WSGI compliance.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import werkzeug
|
||||
from textwrap import wrap
|
||||
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
|
||||
from werkzeug.utils import escape
|
||||
import base64
|
||||
|
||||
logo = Response(base64.b64decode(
|
||||
'''R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
|
||||
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
|
||||
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
|
||||
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
|
||||
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
|
||||
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
|
||||
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
|
||||
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
|
||||
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
|
||||
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
|
||||
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
|
||||
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
|
||||
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
|
||||
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
|
||||
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
|
||||
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
|
||||
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
|
||||
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
|
||||
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
|
||||
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
|
||||
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
|
||||
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
|
||||
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
|
||||
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
|
||||
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
|
||||
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
|
||||
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
|
||||
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
|
||||
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
|
||||
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
|
||||
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
|
||||
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
|
||||
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
|
||||
='''), mimetype='image/png')
|
||||
|
||||
|
||||
TEMPLATE = u'''\
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
|
||||
"http://www.w3.org/TR/html4/loose.dtd">
|
||||
<title>WSGI Information</title>
|
||||
<style type="text/css">
|
||||
@import url(http://fonts.googleapis.com/css?family=Ubuntu);
|
||||
|
||||
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
|
||||
'Verdana', sans-serif; background-color: white; color: #000;
|
||||
font-size: 15px; text-align: center; }
|
||||
#logo { float: right; padding: 0 0 10px 10px; }
|
||||
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
|
||||
background-color: white; }
|
||||
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
|
||||
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
|
||||
h1 { margin: 0 0 30px 0; }
|
||||
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
|
||||
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
|
||||
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
|
||||
font-weight: normal; width: 18em; vertical-align: top;
|
||||
padding: 0.5em 0 0.1em 0.5em; }
|
||||
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
|
||||
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
|
||||
monospace; font-size: 0.7em; }
|
||||
ul li { line-height: 1.5em; }
|
||||
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
|
||||
list-style: none; background: #E8EFF0; }
|
||||
ul.path li { line-height: 1.6em; }
|
||||
li.virtual { color: #999; text-decoration: underline; }
|
||||
li.exp { background: white; }
|
||||
</style>
|
||||
<div class="box">
|
||||
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
|
||||
<h1>WSGI Information</h1>
|
||||
<p>
|
||||
This page displays all available information about the WSGI server and
|
||||
the underlying Python interpreter.
|
||||
<h2 id="python-interpreter">Python Interpreter</h2>
|
||||
<table>
|
||||
<tr>
|
||||
<th>Python Version
|
||||
<td>%(python_version)s
|
||||
<tr>
|
||||
<th>Platform
|
||||
<td>%(platform)s [%(os)s]
|
||||
<tr>
|
||||
<th>API Version
|
||||
<td>%(api_version)s
|
||||
<tr>
|
||||
<th>Byteorder
|
||||
<td>%(byteorder)s
|
||||
<tr>
|
||||
<th>Werkzeug Version
|
||||
<td>%(werkzeug_version)s
|
||||
</table>
|
||||
<h2 id="wsgi-environment">WSGI Environment</h2>
|
||||
<table>%(wsgi_env)s</table>
|
||||
<h2 id="installed-eggs">Installed Eggs</h2>
|
||||
<p>
|
||||
The following python packages were installed on the system as
|
||||
Python eggs:
|
||||
<ul>%(python_eggs)s</ul>
|
||||
<h2 id="sys-path">System Path</h2>
|
||||
<p>
|
||||
The following paths are the current contents of the load path. The
|
||||
following entries are looked up for Python packages. Note that not
|
||||
all items in this path are folders. Gray and underlined items are
|
||||
entries pointing to invalid resources or used by custom import hooks
|
||||
such as the zip importer.
|
||||
<p>
|
||||
Items with a bright background were expanded for display from a relative
|
||||
path. If you encounter such paths in the output you might want to check
|
||||
your setup as relative paths are usually problematic in multithreaded
|
||||
environments.
|
||||
<ul class="path">%(sys_path)s</ul>
|
||||
</div>
|
||||
'''
|
||||
|
||||
|
||||
def iter_sys_path():
|
||||
if os.name == 'posix':
|
||||
def strip(x):
|
||||
prefix = os.path.expanduser('~')
|
||||
if x.startswith(prefix):
|
||||
x = '~' + x[len(prefix):]
|
||||
return x
|
||||
else:
|
||||
strip = lambda x: x
|
||||
|
||||
cwd = os.path.abspath(os.getcwd())
|
||||
for item in sys.path:
|
||||
path = os.path.join(cwd, item or os.path.curdir)
|
||||
yield strip(os.path.normpath(path)), \
|
||||
not os.path.isdir(path), path != item
|
||||
|
||||
|
||||
def render_testapp(req):
|
||||
try:
|
||||
import pkg_resources
|
||||
except ImportError:
|
||||
eggs = ()
|
||||
else:
|
||||
eggs = sorted(pkg_resources.working_set,
|
||||
key=lambda x: x.project_name.lower())
|
||||
python_eggs = []
|
||||
for egg in eggs:
|
||||
try:
|
||||
version = egg.version
|
||||
except (ValueError, AttributeError):
|
||||
version = 'unknown'
|
||||
python_eggs.append('<li>%s <small>[%s]</small>' % (
|
||||
escape(egg.project_name),
|
||||
escape(version)
|
||||
))
|
||||
|
||||
wsgi_env = []
|
||||
sorted_environ = sorted(req.environ.items(),
|
||||
key=lambda x: repr(x[0]).lower())
|
||||
for key, value in sorted_environ:
|
||||
wsgi_env.append('<tr><th>%s<td><code>%s</code>' % (
|
||||
escape(str(key)),
|
||||
' '.join(wrap(escape(repr(value))))
|
||||
))
|
||||
|
||||
sys_path = []
|
||||
for item, virtual, expanded in iter_sys_path():
|
||||
class_ = []
|
||||
if virtual:
|
||||
class_.append('virtual')
|
||||
if expanded:
|
||||
class_.append('exp')
|
||||
sys_path.append('<li%s>%s' % (
|
||||
class_ and ' class="%s"' % ' '.join(class_) or '',
|
||||
escape(item)
|
||||
))
|
||||
|
||||
return (TEMPLATE % {
|
||||
'python_version': '<br>'.join(escape(sys.version).splitlines()),
|
||||
'platform': escape(sys.platform),
|
||||
'os': escape(os.name),
|
||||
'api_version': sys.api_version,
|
||||
'byteorder': sys.byteorder,
|
||||
'werkzeug_version': werkzeug.__version__,
|
||||
'python_eggs': '\n'.join(python_eggs),
|
||||
'wsgi_env': '\n'.join(wsgi_env),
|
||||
'sys_path': '\n'.join(sys_path)
|
||||
}).encode('utf-8')
|
||||
|
||||
|
||||
def test_app(environ, start_response):
|
||||
"""Simple test application that dumps the environment. You can use
|
||||
it to check if Werkzeug is working properly:
|
||||
|
||||
.. sourcecode:: pycon
|
||||
|
||||
>>> from werkzeug.serving import run_simple
|
||||
>>> from werkzeug.testapp import test_app
|
||||
>>> run_simple('localhost', 3000, test_app)
|
||||
* Running on http://localhost:3000/
|
||||
|
||||
The application displays important information from the WSGI environment,
|
||||
the Python interpreter and the installed libraries.
|
||||
"""
|
||||
req = Request(environ, populate_request=False)
|
||||
if req.args.get('resource') == 'logo':
|
||||
response = logo
|
||||
else:
|
||||
response = Response(render_testapp(req), mimetype='text/html')
|
||||
return response(environ, start_response)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from werkzeug.serving import run_simple
|
||||
run_simple('localhost', 5000, test_app, use_reloader=True)
|
||||
|
|
@ -0,0 +1,267 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Contains all test Werkzeug tests.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
import re
|
||||
import sys
|
||||
import unittest
|
||||
import shutil
|
||||
import tempfile
|
||||
import atexit
|
||||
|
||||
from werkzeug.utils import find_modules
|
||||
from werkzeug._compat import text_type, integer_types, reraise
|
||||
|
||||
|
||||
def get_temporary_directory():
|
||||
directory = tempfile.mkdtemp()
|
||||
@atexit.register
|
||||
def remove_directory():
|
||||
try:
|
||||
shutil.rmtree(directory)
|
||||
except EnvironmentError:
|
||||
pass
|
||||
return directory
|
||||
|
||||
|
||||
def iter_suites(package):
|
||||
"""Yields all testsuites."""
|
||||
for module in find_modules(package, include_packages=True):
|
||||
mod = __import__(module, fromlist=['*'])
|
||||
if hasattr(mod, 'suite'):
|
||||
yield mod.suite()
|
||||
|
||||
|
||||
def find_all_tests(suite):
|
||||
"""Yields all the tests and their names from a given suite."""
|
||||
suites = [suite]
|
||||
while suites:
|
||||
s = suites.pop()
|
||||
try:
|
||||
suites.extend(s)
|
||||
except TypeError:
|
||||
yield s, '%s.%s.%s' % (
|
||||
s.__class__.__module__,
|
||||
s.__class__.__name__,
|
||||
s._testMethodName
|
||||
)
|
||||
|
||||
|
||||
class WerkzeugTestCase(unittest.TestCase):
|
||||
"""Baseclass for all the tests that Werkzeug uses. Use these
|
||||
methods for testing instead of the camelcased ones in the
|
||||
baseclass for consistency.
|
||||
"""
|
||||
|
||||
def setup(self):
|
||||
pass
|
||||
|
||||
def teardown(self):
|
||||
pass
|
||||
|
||||
def setUp(self):
|
||||
self.setup()
|
||||
|
||||
def tearDown(self):
|
||||
unittest.TestCase.tearDown(self)
|
||||
self.teardown()
|
||||
|
||||
def assert_line_equal(self, x, y):
|
||||
assert x == y, "lines not equal\n a = %r\n b = %r" % (x, y)
|
||||
|
||||
def assert_equal(self, x, y, msg=None):
|
||||
return self.assertEqual(x, y, msg)
|
||||
|
||||
def assert_not_equal(self, x, y):
|
||||
return self.assertNotEqual(x, y)
|
||||
|
||||
def assert_raises(self, exc_type, callable=None, *args, **kwargs):
|
||||
catcher = _ExceptionCatcher(self, exc_type)
|
||||
if callable is None:
|
||||
return catcher
|
||||
with catcher:
|
||||
callable(*args, **kwargs)
|
||||
|
||||
if sys.version_info[:2] == (2, 6):
|
||||
def assertIsNone(self, x):
|
||||
assert x is None, "%r is not None" % (x,)
|
||||
|
||||
def assertIsNotNone(self, x):
|
||||
assert x is not None, "%r is None" % (x, )
|
||||
|
||||
def assertIn(self, x, y):
|
||||
assert x in y, "%r not in %r" % (x, y)
|
||||
|
||||
def assertNotIn(self, x, y):
|
||||
assert x not in y, "%r in %r" % (x, y)
|
||||
|
||||
def assertIsInstance(self, x, y):
|
||||
assert isinstance(x, y), "not isinstance(%r, %r)" % (x, y)
|
||||
|
||||
def assertIs(self, x, y):
|
||||
assert x is y, "%r is not %r" % (x, y)
|
||||
|
||||
def assertIsNot(self, x, y):
|
||||
assert x is not y, "%r is %r" % (x, y)
|
||||
|
||||
def assertSequenceEqual(self, x, y):
|
||||
self.assertEqual(x, y)
|
||||
|
||||
def assertRaisesRegex(self, exc_type, regex, *args, **kwargs):
|
||||
catcher = _ExceptionCatcher(self, exc_type)
|
||||
if not args:
|
||||
return catcher
|
||||
elif callable(args[0]):
|
||||
with catcher:
|
||||
args[0](*args[1:], **kwargs)
|
||||
if args[0] is not None:
|
||||
assert re.search(args[0], catcher.exc_value[0])
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
elif sys.version_info[0] == 2:
|
||||
def assertRaisesRegex(self, *args, **kwargs):
|
||||
return self.assertRaisesRegexp(*args, **kwargs)
|
||||
|
||||
def assert_is_none(self, x):
|
||||
self.assertIsNone(x)
|
||||
|
||||
def assert_is_not_none(self, x):
|
||||
self.assertIsNotNone(x)
|
||||
|
||||
def assert_in(self, x, y):
|
||||
self.assertIn(x, y)
|
||||
|
||||
def assert_is_instance(self, x, y):
|
||||
self.assertIsInstance(x, y)
|
||||
|
||||
def assert_not_in(self, x, y):
|
||||
self.assertNotIn(x, y)
|
||||
|
||||
def assert_is(self, x, y):
|
||||
self.assertIs(x, y)
|
||||
|
||||
def assert_is_not(self, x, y):
|
||||
self.assertIsNot(x, y)
|
||||
|
||||
def assert_true(self, x):
|
||||
self.assertTrue(x)
|
||||
|
||||
def assert_false(self, x):
|
||||
self.assertFalse(x)
|
||||
|
||||
def assert_raises_regex(self, *args, **kwargs):
|
||||
return self.assertRaisesRegex(*args, **kwargs)
|
||||
|
||||
def assert_sequence_equal(self, x, y):
|
||||
self.assertSequenceEqual(x, y)
|
||||
|
||||
def assert_strict_equal(self, x, y):
|
||||
'''Stricter version of assert_equal that doesn't do implicit conversion
|
||||
between unicode and strings'''
|
||||
self.assert_equal(x, y)
|
||||
assert issubclass(type(x), type(y)) or issubclass(type(y), type(x)), \
|
||||
'%s != %s' % (type(x), type(y))
|
||||
if isinstance(x, (bytes, text_type, integer_types)) or x is None:
|
||||
return
|
||||
elif isinstance(x, dict) or isinstance(y, dict):
|
||||
x = sorted(x.items())
|
||||
y = sorted(y.items())
|
||||
elif isinstance(x, set) or isinstance(y, set):
|
||||
x = sorted(x)
|
||||
y = sorted(y)
|
||||
rx, ry = repr(x), repr(y)
|
||||
if rx != ry:
|
||||
rx = rx[:200] + (rx[200:] and '...')
|
||||
ry = ry[:200] + (ry[200:] and '...')
|
||||
raise AssertionError(rx, ry)
|
||||
assert repr(x) == repr(y), repr((x, y))[:200]
|
||||
|
||||
|
||||
class _ExceptionCatcher(object):
|
||||
|
||||
def __init__(self, test_case, exc_type):
|
||||
self.test_case = test_case
|
||||
self.exc_type = exc_type
|
||||
self.exc_value = None
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
exception_name = self.exc_type.__name__
|
||||
if exc_type is None:
|
||||
self.test_case.fail('Expected exception of type %r' %
|
||||
exception_name)
|
||||
elif not issubclass(exc_type, self.exc_type):
|
||||
reraise(exc_type, exc_value, tb)
|
||||
self.exc_value = exc_value
|
||||
return True
|
||||
|
||||
|
||||
class BetterLoader(unittest.TestLoader):
|
||||
"""A nicer loader that solves two problems. First of all we are setting
|
||||
up tests from different sources and we're doing this programmatically
|
||||
which breaks the default loading logic so this is required anyways.
|
||||
Secondly this loader has a nicer interpolation for test names than the
|
||||
default one so you can just do ``run-tests.py ViewTestCase`` and it
|
||||
will work.
|
||||
"""
|
||||
|
||||
def getRootSuite(self):
|
||||
return suite()
|
||||
|
||||
def loadTestsFromName(self, name, module=None):
|
||||
root = self.getRootSuite()
|
||||
if name == 'suite':
|
||||
return root
|
||||
|
||||
all_tests = []
|
||||
for testcase, testname in find_all_tests(root):
|
||||
if testname == name or \
|
||||
testname.endswith('.' + name) or \
|
||||
('.' + name + '.') in testname or \
|
||||
testname.startswith(name + '.'):
|
||||
all_tests.append(testcase)
|
||||
|
||||
if not all_tests:
|
||||
raise LookupError('could not find test case for "%s"' % name)
|
||||
|
||||
if len(all_tests) == 1:
|
||||
return all_tests[0]
|
||||
rv = unittest.TestSuite()
|
||||
for test in all_tests:
|
||||
rv.addTest(test)
|
||||
return rv
|
||||
|
||||
|
||||
def suite():
|
||||
"""A testsuite that has all the Flask tests. You can use this
|
||||
function to integrate the Flask tests into your own testsuite
|
||||
in case you want to test that monkeypatches to Flask do not
|
||||
break it.
|
||||
"""
|
||||
suite = unittest.TestSuite()
|
||||
for other_suite in iter_suites(__name__):
|
||||
suite.addTest(other_suite)
|
||||
return suite
|
||||
|
||||
|
||||
def main():
|
||||
"""Runs the testsuite as command line application."""
|
||||
try:
|
||||
unittest.main(testLoader=BetterLoader(), defaultTest='suite')
|
||||
except Exception:
|
||||
import sys
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.compat
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Ensure that old stuff does not break on update.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
import warnings
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
|
||||
from werkzeug.wrappers import Response
|
||||
from werkzeug.test import create_environ
|
||||
|
||||
|
||||
class CompatTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_old_imports(self):
|
||||
from werkzeug.utils import Headers, MultiDict, CombinedMultiDict, \
|
||||
Headers, EnvironHeaders
|
||||
from werkzeug.http import Accept, MIMEAccept, CharsetAccept, \
|
||||
LanguageAccept, ETags, HeaderSet, WWWAuthenticate, \
|
||||
Authorization
|
||||
|
||||
def test_exposed_werkzeug_mod(self):
|
||||
import werkzeug
|
||||
for key in werkzeug.__all__:
|
||||
# deprecated, skip it
|
||||
if key in ('templates', 'Template'):
|
||||
continue
|
||||
getattr(werkzeug, key)
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(CompatTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.contrib
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tests the contrib modules.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
from werkzeug.testsuite import iter_suites
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
for other_suite in iter_suites(__name__):
|
||||
suite.addTest(other_suite)
|
||||
return suite
|
||||
|
|
@ -0,0 +1,257 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.cache
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tests the cache system
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
import unittest
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
from werkzeug.contrib import cache
|
||||
|
||||
try:
|
||||
import redis
|
||||
try:
|
||||
from redis.exceptions import ConnectionError as RedisConnectionError
|
||||
cache.RedisCache(key_prefix='werkzeug-test-case:')._client.set('test','connection')
|
||||
except RedisConnectionError:
|
||||
redis = None
|
||||
except ImportError:
|
||||
redis = None
|
||||
try:
|
||||
import pylibmc as memcache
|
||||
except ImportError:
|
||||
try:
|
||||
from google.appengine.api import memcache
|
||||
except ImportError:
|
||||
try:
|
||||
import memcache
|
||||
except ImportError:
|
||||
memcache = None
|
||||
|
||||
|
||||
class SimpleCacheTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_get_dict(self):
|
||||
c = cache.SimpleCache()
|
||||
c.set('a', 'a')
|
||||
c.set('b', 'b')
|
||||
d = c.get_dict('a', 'b')
|
||||
assert 'a' in d
|
||||
assert 'a' == d['a']
|
||||
assert 'b' in d
|
||||
assert 'b' == d['b']
|
||||
|
||||
def test_set_many(self):
|
||||
c = cache.SimpleCache()
|
||||
c.set_many({0: 0, 1: 1, 2: 4})
|
||||
assert c.get(2) == 4
|
||||
c.set_many((i, i*i) for i in range(3))
|
||||
assert c.get(2) == 4
|
||||
|
||||
|
||||
class FileSystemCacheTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_set_get(self):
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
try:
|
||||
c = cache.FileSystemCache(cache_dir=tmp_dir)
|
||||
for i in range(3):
|
||||
c.set(str(i), i * i)
|
||||
for i in range(3):
|
||||
result = c.get(str(i))
|
||||
assert result == i * i
|
||||
finally:
|
||||
shutil.rmtree(tmp_dir)
|
||||
|
||||
def test_filesystemcache_prune(self):
|
||||
THRESHOLD = 13
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
c = cache.FileSystemCache(cache_dir=tmp_dir, threshold=THRESHOLD)
|
||||
for i in range(2 * THRESHOLD):
|
||||
c.set(str(i), i)
|
||||
cache_files = os.listdir(tmp_dir)
|
||||
shutil.rmtree(tmp_dir)
|
||||
assert len(cache_files) <= THRESHOLD
|
||||
|
||||
|
||||
def test_filesystemcache_clear(self):
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
c = cache.FileSystemCache(cache_dir=tmp_dir)
|
||||
c.set('foo', 'bar')
|
||||
cache_files = os.listdir(tmp_dir)
|
||||
assert len(cache_files) == 1
|
||||
c.clear()
|
||||
cache_files = os.listdir(tmp_dir)
|
||||
assert len(cache_files) == 0
|
||||
shutil.rmtree(tmp_dir)
|
||||
|
||||
|
||||
class RedisCacheTestCase(WerkzeugTestCase):
|
||||
|
||||
def make_cache(self):
|
||||
return cache.RedisCache(key_prefix='werkzeug-test-case:')
|
||||
|
||||
def teardown(self):
|
||||
self.make_cache().clear()
|
||||
|
||||
def test_compat(self):
|
||||
c = self.make_cache()
|
||||
c._client.set(c.key_prefix + 'foo', b'Awesome')
|
||||
self.assert_equal(c.get('foo'), b'Awesome')
|
||||
c._client.set(c.key_prefix + 'foo', b'42')
|
||||
self.assert_equal(c.get('foo'), 42)
|
||||
|
||||
def test_get_set(self):
|
||||
c = self.make_cache()
|
||||
c.set('foo', ['bar'])
|
||||
assert c.get('foo') == ['bar']
|
||||
|
||||
def test_get_many(self):
|
||||
c = self.make_cache()
|
||||
c.set('foo', ['bar'])
|
||||
c.set('spam', 'eggs')
|
||||
assert c.get_many('foo', 'spam') == [['bar'], 'eggs']
|
||||
|
||||
def test_set_many(self):
|
||||
c = self.make_cache()
|
||||
c.set_many({'foo': 'bar', 'spam': ['eggs']})
|
||||
assert c.get('foo') == 'bar'
|
||||
assert c.get('spam') == ['eggs']
|
||||
|
||||
def test_expire(self):
|
||||
c = self.make_cache()
|
||||
c.set('foo', 'bar', 1)
|
||||
time.sleep(2)
|
||||
assert c.get('foo') is None
|
||||
|
||||
def test_add(self):
|
||||
c = self.make_cache()
|
||||
# sanity check that add() works like set()
|
||||
c.add('foo', 'bar')
|
||||
assert c.get('foo') == 'bar'
|
||||
c.add('foo', 'qux')
|
||||
assert c.get('foo') == 'bar'
|
||||
|
||||
def test_delete(self):
|
||||
c = self.make_cache()
|
||||
c.add('foo', 'bar')
|
||||
assert c.get('foo') == 'bar'
|
||||
c.delete('foo')
|
||||
assert c.get('foo') is None
|
||||
|
||||
def test_delete_many(self):
|
||||
c = self.make_cache()
|
||||
c.add('foo', 'bar')
|
||||
c.add('spam', 'eggs')
|
||||
c.delete_many('foo', 'spam')
|
||||
assert c.get('foo') is None
|
||||
assert c.get('spam') is None
|
||||
|
||||
def test_inc_dec(self):
|
||||
c = self.make_cache()
|
||||
c.set('foo', 1)
|
||||
self.assert_equal(c.inc('foo'), 2)
|
||||
self.assert_equal(c.dec('foo'), 1)
|
||||
c.delete('foo')
|
||||
|
||||
def test_true_false(self):
|
||||
c = self.make_cache()
|
||||
c.set('foo', True)
|
||||
assert c.get('foo') == True
|
||||
c.set('bar', False)
|
||||
assert c.get('bar') == False
|
||||
|
||||
|
||||
class MemcachedCacheTestCase(WerkzeugTestCase):
|
||||
|
||||
def make_cache(self):
|
||||
return cache.MemcachedCache(key_prefix='werkzeug-test-case:')
|
||||
|
||||
def teardown(self):
|
||||
self.make_cache().clear()
|
||||
|
||||
def test_compat(self):
|
||||
c = self.make_cache()
|
||||
c._client.set(c.key_prefix + b'foo', 'bar')
|
||||
self.assert_equal(c.get('foo'), 'bar')
|
||||
|
||||
def test_get_set(self):
|
||||
c = self.make_cache()
|
||||
c.set('foo', 'bar')
|
||||
self.assert_equal(c.get('foo'), 'bar')
|
||||
|
||||
def test_get_many(self):
|
||||
c = self.make_cache()
|
||||
c.set('foo', 'bar')
|
||||
c.set('spam', 'eggs')
|
||||
self.assert_equal(c.get_many('foo', 'spam'), ['bar', 'eggs'])
|
||||
|
||||
def test_set_many(self):
|
||||
c = self.make_cache()
|
||||
c.set_many({'foo': 'bar', 'spam': 'eggs'})
|
||||
self.assert_equal(c.get('foo'), 'bar')
|
||||
self.assert_equal(c.get('spam'), 'eggs')
|
||||
|
||||
def test_expire(self):
|
||||
c = self.make_cache()
|
||||
c.set('foo', 'bar', 1)
|
||||
time.sleep(2)
|
||||
self.assert_is_none(c.get('foo'))
|
||||
|
||||
def test_add(self):
|
||||
c = self.make_cache()
|
||||
c.add('foo', 'bar')
|
||||
self.assert_equal(c.get('foo'), 'bar')
|
||||
c.add('foo', 'baz')
|
||||
self.assert_equal(c.get('foo'), 'bar')
|
||||
|
||||
def test_delete(self):
|
||||
c = self.make_cache()
|
||||
c.add('foo', 'bar')
|
||||
self.assert_equal(c.get('foo'), 'bar')
|
||||
c.delete('foo')
|
||||
self.assert_is_none(c.get('foo'))
|
||||
|
||||
def test_delete_many(self):
|
||||
c = self.make_cache()
|
||||
c.add('foo', 'bar')
|
||||
c.add('spam', 'eggs')
|
||||
c.delete_many('foo', 'spam')
|
||||
self.assert_is_none(c.get('foo'))
|
||||
self.assert_is_none(c.get('spam'))
|
||||
|
||||
def test_inc_dec(self):
|
||||
c = self.make_cache()
|
||||
c.set('foo', 1)
|
||||
# XXX: Is this an intended difference?
|
||||
c.inc('foo')
|
||||
self.assert_equal(c.get('foo'), 2)
|
||||
c.dec('foo')
|
||||
self.assert_equal(c.get('foo'), 1)
|
||||
|
||||
def test_true_false(self):
|
||||
c = self.make_cache()
|
||||
c.set('foo', True)
|
||||
self.assert_equal(c.get('foo'), True)
|
||||
c.set('bar', False)
|
||||
self.assert_equal(c.get('bar'), False)
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(SimpleCacheTestCase))
|
||||
suite.addTest(unittest.makeSuite(FileSystemCacheTestCase))
|
||||
if redis is not None:
|
||||
suite.addTest(unittest.makeSuite(RedisCacheTestCase))
|
||||
if memcache is not None:
|
||||
suite.addTest(unittest.makeSuite(MemcachedCacheTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,193 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.fixers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Server / Browser fixers.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
from werkzeug.datastructures import ResponseCacheControl
|
||||
from werkzeug.http import parse_cache_control_header
|
||||
|
||||
from werkzeug.test import create_environ, Client
|
||||
from werkzeug.wrappers import Request, Response
|
||||
from werkzeug.contrib import fixers
|
||||
from werkzeug.utils import redirect
|
||||
|
||||
|
||||
@Request.application
|
||||
def path_check_app(request):
|
||||
return Response('PATH_INFO: %s\nSCRIPT_NAME: %s' % (
|
||||
request.environ.get('PATH_INFO', ''),
|
||||
request.environ.get('SCRIPT_NAME', '')
|
||||
))
|
||||
|
||||
|
||||
class ServerFixerTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_cgi_root_fix(self):
|
||||
app = fixers.CGIRootFix(path_check_app)
|
||||
response = Response.from_app(app, dict(create_environ(),
|
||||
SCRIPT_NAME='/foo',
|
||||
PATH_INFO='/bar',
|
||||
SERVER_SOFTWARE='lighttpd/1.4.27'
|
||||
))
|
||||
self.assert_equal(response.get_data(),
|
||||
b'PATH_INFO: /foo/bar\nSCRIPT_NAME: ')
|
||||
|
||||
def test_cgi_root_fix_custom_app_root(self):
|
||||
app = fixers.CGIRootFix(path_check_app, app_root='/baz/poop/')
|
||||
response = Response.from_app(app, dict(create_environ(),
|
||||
SCRIPT_NAME='/foo',
|
||||
PATH_INFO='/bar'
|
||||
))
|
||||
self.assert_equal(response.get_data(), b'PATH_INFO: /foo/bar\nSCRIPT_NAME: baz/poop')
|
||||
|
||||
def test_path_info_from_request_uri_fix(self):
|
||||
app = fixers.PathInfoFromRequestUriFix(path_check_app)
|
||||
for key in 'REQUEST_URI', 'REQUEST_URL', 'UNENCODED_URL':
|
||||
env = dict(create_environ(), SCRIPT_NAME='/test', PATH_INFO='/?????')
|
||||
env[key] = '/test/foo%25bar?drop=this'
|
||||
response = Response.from_app(app, env)
|
||||
self.assert_equal(response.get_data(), b'PATH_INFO: /foo%bar\nSCRIPT_NAME: /test')
|
||||
|
||||
def test_proxy_fix(self):
|
||||
@Request.application
|
||||
def app(request):
|
||||
return Response('%s|%s' % (
|
||||
request.remote_addr,
|
||||
# do not use request.host as this fixes too :)
|
||||
request.environ['HTTP_HOST']
|
||||
))
|
||||
app = fixers.ProxyFix(app, num_proxies=2)
|
||||
environ = dict(create_environ(),
|
||||
HTTP_X_FORWARDED_PROTO="https",
|
||||
HTTP_X_FORWARDED_HOST='example.com',
|
||||
HTTP_X_FORWARDED_FOR='1.2.3.4, 5.6.7.8',
|
||||
REMOTE_ADDR='127.0.0.1',
|
||||
HTTP_HOST='fake'
|
||||
)
|
||||
|
||||
response = Response.from_app(app, environ)
|
||||
|
||||
self.assert_equal(response.get_data(), b'1.2.3.4|example.com')
|
||||
|
||||
# And we must check that if it is a redirection it is
|
||||
# correctly done:
|
||||
|
||||
redirect_app = redirect('/foo/bar.hml')
|
||||
response = Response.from_app(redirect_app, environ)
|
||||
|
||||
wsgi_headers = response.get_wsgi_headers(environ)
|
||||
assert wsgi_headers['Location'] == 'https://example.com/foo/bar.hml'
|
||||
|
||||
def test_proxy_fix_weird_enum(self):
|
||||
@fixers.ProxyFix
|
||||
@Request.application
|
||||
def app(request):
|
||||
return Response(request.remote_addr)
|
||||
environ = dict(create_environ(),
|
||||
HTTP_X_FORWARDED_FOR=',',
|
||||
REMOTE_ADDR='127.0.0.1',
|
||||
)
|
||||
|
||||
response = Response.from_app(app, environ)
|
||||
self.assert_strict_equal(response.get_data(), b'127.0.0.1')
|
||||
|
||||
def test_header_rewriter_fix(self):
|
||||
@Request.application
|
||||
def application(request):
|
||||
return Response("", headers=[
|
||||
('X-Foo', 'bar')
|
||||
])
|
||||
application = fixers.HeaderRewriterFix(application, ('X-Foo',), (('X-Bar', '42'),))
|
||||
response = Response.from_app(application, create_environ())
|
||||
assert response.headers['Content-Type'] == 'text/plain; charset=utf-8'
|
||||
assert 'X-Foo' not in response.headers
|
||||
assert response.headers['X-Bar'] == '42'
|
||||
|
||||
|
||||
class BrowserFixerTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_ie_fixes(self):
|
||||
@fixers.InternetExplorerFix
|
||||
@Request.application
|
||||
def application(request):
|
||||
response = Response('binary data here', mimetype='application/vnd.ms-excel')
|
||||
response.headers['Vary'] = 'Cookie'
|
||||
response.headers['Content-Disposition'] = 'attachment; filename=foo.xls'
|
||||
return response
|
||||
|
||||
c = Client(application, Response)
|
||||
response = c.get('/', headers=[
|
||||
('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
|
||||
])
|
||||
|
||||
# IE gets no vary
|
||||
self.assert_equal(response.get_data(), b'binary data here')
|
||||
assert 'vary' not in response.headers
|
||||
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
|
||||
assert response.headers['content-type'] == 'application/vnd.ms-excel'
|
||||
|
||||
# other browsers do
|
||||
c = Client(application, Response)
|
||||
response = c.get('/')
|
||||
self.assert_equal(response.get_data(), b'binary data here')
|
||||
assert 'vary' in response.headers
|
||||
|
||||
cc = ResponseCacheControl()
|
||||
cc.no_cache = True
|
||||
|
||||
@fixers.InternetExplorerFix
|
||||
@Request.application
|
||||
def application(request):
|
||||
response = Response('binary data here', mimetype='application/vnd.ms-excel')
|
||||
response.headers['Pragma'] = ', '.join(pragma)
|
||||
response.headers['Cache-Control'] = cc.to_header()
|
||||
response.headers['Content-Disposition'] = 'attachment; filename=foo.xls'
|
||||
return response
|
||||
|
||||
|
||||
# IE has no pragma or cache control
|
||||
pragma = ('no-cache',)
|
||||
c = Client(application, Response)
|
||||
response = c.get('/', headers=[
|
||||
('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
|
||||
])
|
||||
self.assert_equal(response.get_data(), b'binary data here')
|
||||
assert 'pragma' not in response.headers
|
||||
assert 'cache-control' not in response.headers
|
||||
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
|
||||
|
||||
# IE has simplified pragma
|
||||
pragma = ('no-cache', 'x-foo')
|
||||
cc.proxy_revalidate = True
|
||||
response = c.get('/', headers=[
|
||||
('User-Agent', 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
|
||||
])
|
||||
self.assert_equal(response.get_data(), b'binary data here')
|
||||
assert response.headers['pragma'] == 'x-foo'
|
||||
assert response.headers['cache-control'] == 'proxy-revalidate'
|
||||
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
|
||||
|
||||
# regular browsers get everything
|
||||
response = c.get('/')
|
||||
self.assert_equal(response.get_data(), b'binary data here')
|
||||
assert response.headers['pragma'] == 'no-cache, x-foo'
|
||||
cc = parse_cache_control_header(response.headers['cache-control'],
|
||||
cls=ResponseCacheControl)
|
||||
assert cc.no_cache
|
||||
assert cc.proxy_revalidate
|
||||
assert response.headers['content-disposition'] == 'attachment; filename=foo.xls'
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(ServerFixerTestCase))
|
||||
suite.addTest(unittest.makeSuite(BrowserFixerTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,184 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.iterio
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tests the iterio object.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
from functools import partial
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
from werkzeug.contrib.iterio import IterIO, greenlet
|
||||
|
||||
|
||||
class IterOTestSuite(WerkzeugTestCase):
|
||||
|
||||
def test_basic_native(self):
|
||||
io = IterIO(["Hello", "World", "1", "2", "3"])
|
||||
self.assert_equal(io.tell(), 0)
|
||||
self.assert_equal(io.read(2), "He")
|
||||
self.assert_equal(io.tell(), 2)
|
||||
self.assert_equal(io.read(3), "llo")
|
||||
self.assert_equal(io.tell(), 5)
|
||||
io.seek(0)
|
||||
self.assert_equal(io.read(5), "Hello")
|
||||
self.assert_equal(io.tell(), 5)
|
||||
self.assert_equal(io._buf, "Hello")
|
||||
self.assert_equal(io.read(), "World123")
|
||||
self.assert_equal(io.tell(), 13)
|
||||
io.close()
|
||||
assert io.closed
|
||||
|
||||
io = IterIO(["Hello\n", "World!"])
|
||||
self.assert_equal(io.readline(), 'Hello\n')
|
||||
self.assert_equal(io._buf, 'Hello\n')
|
||||
self.assert_equal(io.read(), 'World!')
|
||||
self.assert_equal(io._buf, 'Hello\nWorld!')
|
||||
self.assert_equal(io.tell(), 12)
|
||||
io.seek(0)
|
||||
self.assert_equal(io.readlines(), ['Hello\n', 'World!'])
|
||||
|
||||
io = IterIO(["foo\n", "bar"])
|
||||
io.seek(-4, 2)
|
||||
self.assert_equal(io.read(4), '\nbar')
|
||||
|
||||
self.assert_raises(IOError, io.seek, 2, 100)
|
||||
io.close()
|
||||
self.assert_raises(ValueError, io.read)
|
||||
|
||||
def test_basic_bytes(self):
|
||||
io = IterIO([b"Hello", b"World", b"1", b"2", b"3"])
|
||||
self.assert_equal(io.tell(), 0)
|
||||
self.assert_equal(io.read(2), b"He")
|
||||
self.assert_equal(io.tell(), 2)
|
||||
self.assert_equal(io.read(3), b"llo")
|
||||
self.assert_equal(io.tell(), 5)
|
||||
io.seek(0)
|
||||
self.assert_equal(io.read(5), b"Hello")
|
||||
self.assert_equal(io.tell(), 5)
|
||||
self.assert_equal(io._buf, b"Hello")
|
||||
self.assert_equal(io.read(), b"World123")
|
||||
self.assert_equal(io.tell(), 13)
|
||||
io.close()
|
||||
assert io.closed
|
||||
|
||||
io = IterIO([b"Hello\n", b"World!"])
|
||||
self.assert_equal(io.readline(), b'Hello\n')
|
||||
self.assert_equal(io._buf, b'Hello\n')
|
||||
self.assert_equal(io.read(), b'World!')
|
||||
self.assert_equal(io._buf, b'Hello\nWorld!')
|
||||
self.assert_equal(io.tell(), 12)
|
||||
io.seek(0)
|
||||
self.assert_equal(io.readlines(), [b'Hello\n', b'World!'])
|
||||
|
||||
io = IterIO([b"foo\n", b"bar"])
|
||||
io.seek(-4, 2)
|
||||
self.assert_equal(io.read(4), b'\nbar')
|
||||
|
||||
self.assert_raises(IOError, io.seek, 2, 100)
|
||||
io.close()
|
||||
self.assert_raises(ValueError, io.read)
|
||||
|
||||
def test_basic_unicode(self):
|
||||
io = IterIO([u"Hello", u"World", u"1", u"2", u"3"])
|
||||
self.assert_equal(io.tell(), 0)
|
||||
self.assert_equal(io.read(2), u"He")
|
||||
self.assert_equal(io.tell(), 2)
|
||||
self.assert_equal(io.read(3), u"llo")
|
||||
self.assert_equal(io.tell(), 5)
|
||||
io.seek(0)
|
||||
self.assert_equal(io.read(5), u"Hello")
|
||||
self.assert_equal(io.tell(), 5)
|
||||
self.assert_equal(io._buf, u"Hello")
|
||||
self.assert_equal(io.read(), u"World123")
|
||||
self.assert_equal(io.tell(), 13)
|
||||
io.close()
|
||||
assert io.closed
|
||||
|
||||
io = IterIO([u"Hello\n", u"World!"])
|
||||
self.assert_equal(io.readline(), u'Hello\n')
|
||||
self.assert_equal(io._buf, u'Hello\n')
|
||||
self.assert_equal(io.read(), u'World!')
|
||||
self.assert_equal(io._buf, u'Hello\nWorld!')
|
||||
self.assert_equal(io.tell(), 12)
|
||||
io.seek(0)
|
||||
self.assert_equal(io.readlines(), [u'Hello\n', u'World!'])
|
||||
|
||||
io = IterIO([u"foo\n", u"bar"])
|
||||
io.seek(-4, 2)
|
||||
self.assert_equal(io.read(4), u'\nbar')
|
||||
|
||||
self.assert_raises(IOError, io.seek, 2, 100)
|
||||
io.close()
|
||||
self.assert_raises(ValueError, io.read)
|
||||
|
||||
def test_sentinel_cases(self):
|
||||
io = IterIO([])
|
||||
self.assert_strict_equal(io.read(), '')
|
||||
io = IterIO([], b'')
|
||||
self.assert_strict_equal(io.read(), b'')
|
||||
io = IterIO([], u'')
|
||||
self.assert_strict_equal(io.read(), u'')
|
||||
|
||||
io = IterIO([])
|
||||
self.assert_strict_equal(io.read(), '')
|
||||
io = IterIO([b''])
|
||||
self.assert_strict_equal(io.read(), b'')
|
||||
io = IterIO([u''])
|
||||
self.assert_strict_equal(io.read(), u'')
|
||||
|
||||
io = IterIO([])
|
||||
self.assert_strict_equal(io.readline(), '')
|
||||
io = IterIO([], b'')
|
||||
self.assert_strict_equal(io.readline(), b'')
|
||||
io = IterIO([], u'')
|
||||
self.assert_strict_equal(io.readline(), u'')
|
||||
|
||||
io = IterIO([])
|
||||
self.assert_strict_equal(io.readline(), '')
|
||||
io = IterIO([b''])
|
||||
self.assert_strict_equal(io.readline(), b'')
|
||||
io = IterIO([u''])
|
||||
self.assert_strict_equal(io.readline(), u'')
|
||||
|
||||
|
||||
class IterITestSuite(WerkzeugTestCase):
|
||||
|
||||
def test_basic(self):
|
||||
def producer(out):
|
||||
out.write('1\n')
|
||||
out.write('2\n')
|
||||
out.flush()
|
||||
out.write('3\n')
|
||||
iterable = IterIO(producer)
|
||||
self.assert_equal(next(iterable), '1\n2\n')
|
||||
self.assert_equal(next(iterable), '3\n')
|
||||
self.assert_raises(StopIteration, next, iterable)
|
||||
|
||||
def test_sentinel_cases(self):
|
||||
def producer_dummy_flush(out):
|
||||
out.flush()
|
||||
iterable = IterIO(producer_dummy_flush)
|
||||
self.assert_strict_equal(next(iterable), '')
|
||||
|
||||
def producer_empty(out):
|
||||
pass
|
||||
iterable = IterIO(producer_empty)
|
||||
self.assert_raises(StopIteration, next, iterable)
|
||||
|
||||
iterable = IterIO(producer_dummy_flush, b'')
|
||||
self.assert_strict_equal(next(iterable), b'')
|
||||
iterable = IterIO(producer_dummy_flush, u'')
|
||||
self.assert_strict_equal(next(iterable), u'')
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(IterOTestSuite))
|
||||
if greenlet is not None:
|
||||
suite.addTest(unittest.makeSuite(IterITestSuite))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.securecookie
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tests the secure cookie.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
|
||||
from werkzeug.utils import parse_cookie
|
||||
from werkzeug.wrappers import Request, Response
|
||||
from werkzeug.contrib.securecookie import SecureCookie
|
||||
|
||||
|
||||
class SecureCookieTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_basic_support(self):
|
||||
c = SecureCookie(secret_key=b'foo')
|
||||
assert c.new
|
||||
assert not c.modified
|
||||
assert not c.should_save
|
||||
c['x'] = 42
|
||||
assert c.modified
|
||||
assert c.should_save
|
||||
s = c.serialize()
|
||||
|
||||
c2 = SecureCookie.unserialize(s, b'foo')
|
||||
assert c is not c2
|
||||
assert not c2.new
|
||||
assert not c2.modified
|
||||
assert not c2.should_save
|
||||
self.assert_equal(c2, c)
|
||||
|
||||
c3 = SecureCookie.unserialize(s, b'wrong foo')
|
||||
assert not c3.modified
|
||||
assert not c3.new
|
||||
self.assert_equal(c3, {})
|
||||
|
||||
def test_wrapper_support(self):
|
||||
req = Request.from_values()
|
||||
resp = Response()
|
||||
c = SecureCookie.load_cookie(req, secret_key=b'foo')
|
||||
assert c.new
|
||||
c['foo'] = 42
|
||||
self.assert_equal(c.secret_key, b'foo')
|
||||
c.save_cookie(resp)
|
||||
|
||||
req = Request.from_values(headers={
|
||||
'Cookie': 'session="%s"' % parse_cookie(resp.headers['set-cookie'])['session']
|
||||
})
|
||||
c2 = SecureCookie.load_cookie(req, secret_key=b'foo')
|
||||
assert not c2.new
|
||||
self.assert_equal(c2, c)
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(SecureCookieTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.sessions
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Added tests for the sessions.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
import shutil
|
||||
from tempfile import mkdtemp, gettempdir
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
from werkzeug.contrib.sessions import FilesystemSessionStore
|
||||
|
||||
|
||||
|
||||
class SessionTestCase(WerkzeugTestCase):
|
||||
|
||||
def setup(self):
|
||||
self.session_folder = mkdtemp()
|
||||
|
||||
def teardown(self):
|
||||
shutil.rmtree(self.session_folder)
|
||||
|
||||
def test_default_tempdir(self):
|
||||
store = FilesystemSessionStore()
|
||||
assert store.path == gettempdir()
|
||||
|
||||
def test_basic_fs_sessions(self):
|
||||
store = FilesystemSessionStore(self.session_folder)
|
||||
x = store.new()
|
||||
assert x.new
|
||||
assert not x.modified
|
||||
x['foo'] = [1, 2, 3]
|
||||
assert x.modified
|
||||
store.save(x)
|
||||
|
||||
x2 = store.get(x.sid)
|
||||
assert not x2.new
|
||||
assert not x2.modified
|
||||
assert x2 is not x
|
||||
assert x2 == x
|
||||
x2['test'] = 3
|
||||
assert x2.modified
|
||||
assert not x2.new
|
||||
store.save(x2)
|
||||
|
||||
x = store.get(x.sid)
|
||||
store.delete(x)
|
||||
x2 = store.get(x.sid)
|
||||
# the session is not new when it was used previously.
|
||||
assert not x2.new
|
||||
|
||||
def test_renewing_fs_session(self):
|
||||
store = FilesystemSessionStore(self.session_folder, renew_missing=True)
|
||||
x = store.new()
|
||||
store.save(x)
|
||||
store.delete(x)
|
||||
x2 = store.get(x.sid)
|
||||
assert x2.new
|
||||
|
||||
def test_fs_session_lising(self):
|
||||
store = FilesystemSessionStore(self.session_folder, renew_missing=True)
|
||||
sessions = set()
|
||||
for x in range(10):
|
||||
sess = store.new()
|
||||
store.save(sess)
|
||||
sessions.add(sess.sid)
|
||||
|
||||
listed_sessions = set(store.list())
|
||||
assert sessions == listed_sessions
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(SessionTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.contrib.wrappers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Added tests for the sessions.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
import unittest
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
|
||||
from werkzeug.contrib import wrappers
|
||||
from werkzeug import routing
|
||||
from werkzeug.wrappers import Request, Response
|
||||
|
||||
|
||||
class WrappersTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_reverse_slash_behavior(self):
|
||||
class MyRequest(wrappers.ReverseSlashBehaviorRequestMixin, Request):
|
||||
pass
|
||||
req = MyRequest.from_values('/foo/bar', 'http://example.com/test')
|
||||
assert req.url == 'http://example.com/test/foo/bar'
|
||||
assert req.path == 'foo/bar'
|
||||
assert req.script_root == '/test/'
|
||||
|
||||
# make sure the routing system works with the slashes in
|
||||
# reverse order as well.
|
||||
map = routing.Map([routing.Rule('/foo/bar', endpoint='foo')])
|
||||
adapter = map.bind_to_environ(req.environ)
|
||||
assert adapter.match() == ('foo', {})
|
||||
adapter = map.bind(req.host, req.script_root)
|
||||
assert adapter.match(req.path) == ('foo', {})
|
||||
|
||||
def test_dynamic_charset_request_mixin(self):
|
||||
class MyRequest(wrappers.DynamicCharsetRequestMixin, Request):
|
||||
pass
|
||||
env = {'CONTENT_TYPE': 'text/html'}
|
||||
req = MyRequest(env)
|
||||
assert req.charset == 'latin1'
|
||||
|
||||
env = {'CONTENT_TYPE': 'text/html; charset=utf-8'}
|
||||
req = MyRequest(env)
|
||||
assert req.charset == 'utf-8'
|
||||
|
||||
env = {'CONTENT_TYPE': 'application/octet-stream'}
|
||||
req = MyRequest(env)
|
||||
assert req.charset == 'latin1'
|
||||
assert req.url_charset == 'latin1'
|
||||
|
||||
MyRequest.url_charset = 'utf-8'
|
||||
env = {'CONTENT_TYPE': 'application/octet-stream'}
|
||||
req = MyRequest(env)
|
||||
assert req.charset == 'latin1'
|
||||
assert req.url_charset == 'utf-8'
|
||||
|
||||
def return_ascii(x):
|
||||
return "ascii"
|
||||
env = {'CONTENT_TYPE': 'text/plain; charset=x-weird-charset'}
|
||||
req = MyRequest(env)
|
||||
req.unknown_charset = return_ascii
|
||||
assert req.charset == 'ascii'
|
||||
assert req.url_charset == 'utf-8'
|
||||
|
||||
def test_dynamic_charset_response_mixin(self):
|
||||
class MyResponse(wrappers.DynamicCharsetResponseMixin, Response):
|
||||
default_charset = 'utf-7'
|
||||
resp = MyResponse(mimetype='text/html')
|
||||
assert resp.charset == 'utf-7'
|
||||
resp.charset = 'utf-8'
|
||||
assert resp.charset == 'utf-8'
|
||||
assert resp.mimetype == 'text/html'
|
||||
assert resp.mimetype_params == {'charset': 'utf-8'}
|
||||
resp.mimetype_params['charset'] = 'iso-8859-15'
|
||||
assert resp.charset == 'iso-8859-15'
|
||||
resp.set_data(u'Hällo Wörld')
|
||||
assert b''.join(resp.iter_encoded()) == \
|
||||
u'Hällo Wörld'.encode('iso-8859-15')
|
||||
del resp.headers['content-type']
|
||||
try:
|
||||
resp.charset = 'utf-8'
|
||||
except TypeError as e:
|
||||
pass
|
||||
else:
|
||||
assert False, 'expected type error on charset setting without ct'
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(WrappersTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,788 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.datastructures
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tests the functionality of the provided Werkzeug
|
||||
datastructures.
|
||||
|
||||
TODO:
|
||||
|
||||
- FileMultiDict
|
||||
- Immutable types undertested
|
||||
- Split up dict tests
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
import unittest
|
||||
import pickle
|
||||
from contextlib import contextmanager
|
||||
from copy import copy
|
||||
|
||||
from werkzeug import datastructures
|
||||
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
|
||||
iterlistvalues, text_type
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
from werkzeug.exceptions import BadRequestKeyError
|
||||
|
||||
|
||||
class NativeItermethodsTestCase(WerkzeugTestCase):
|
||||
def test_basic(self):
|
||||
@datastructures.native_itermethods(['keys', 'values', 'items'])
|
||||
class StupidDict(object):
|
||||
def keys(self, multi=1):
|
||||
return iter(['a', 'b', 'c'] * multi)
|
||||
|
||||
def values(self, multi=1):
|
||||
return iter([1, 2, 3] * multi)
|
||||
|
||||
def items(self, multi=1):
|
||||
return iter(zip(iterkeys(self, multi=multi),
|
||||
itervalues(self, multi=multi)))
|
||||
|
||||
d = StupidDict()
|
||||
expected_keys = ['a', 'b', 'c']
|
||||
expected_values = [1, 2, 3]
|
||||
expected_items = list(zip(expected_keys, expected_values))
|
||||
|
||||
self.assert_equal(list(iterkeys(d)), expected_keys)
|
||||
self.assert_equal(list(itervalues(d)), expected_values)
|
||||
self.assert_equal(list(iteritems(d)), expected_items)
|
||||
|
||||
self.assert_equal(list(iterkeys(d, 2)), expected_keys * 2)
|
||||
self.assert_equal(list(itervalues(d, 2)), expected_values * 2)
|
||||
self.assert_equal(list(iteritems(d, 2)), expected_items * 2)
|
||||
|
||||
|
||||
class MutableMultiDictBaseTestCase(WerkzeugTestCase):
|
||||
storage_class = None
|
||||
|
||||
def test_pickle(self):
|
||||
cls = self.storage_class
|
||||
|
||||
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
|
||||
d = cls()
|
||||
d.setlist(b'foo', [1, 2, 3, 4])
|
||||
d.setlist(b'bar', b'foo bar baz'.split())
|
||||
s = pickle.dumps(d, protocol)
|
||||
ud = pickle.loads(s)
|
||||
self.assert_equal(type(ud), type(d))
|
||||
self.assert_equal(ud, d)
|
||||
self.assert_equal(pickle.loads(
|
||||
s.replace(b'werkzeug.datastructures', b'werkzeug')), d)
|
||||
ud[b'newkey'] = b'bla'
|
||||
self.assert_not_equal(ud, d)
|
||||
|
||||
def test_basic_interface(self):
|
||||
md = self.storage_class()
|
||||
assert isinstance(md, dict)
|
||||
|
||||
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
|
||||
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
|
||||
md = self.storage_class(mapping)
|
||||
|
||||
# simple getitem gives the first value
|
||||
self.assert_equal(md['a'], 1)
|
||||
self.assert_equal(md['c'], 3)
|
||||
with self.assert_raises(KeyError):
|
||||
md['e']
|
||||
self.assert_equal(md.get('a'), 1)
|
||||
|
||||
# list getitem
|
||||
self.assert_equal(md.getlist('a'), [1, 2, 1, 3])
|
||||
self.assert_equal(md.getlist('d'), [3, 4])
|
||||
# do not raise if key not found
|
||||
self.assert_equal(md.getlist('x'), [])
|
||||
|
||||
# simple setitem overwrites all values
|
||||
md['a'] = 42
|
||||
self.assert_equal(md.getlist('a'), [42])
|
||||
|
||||
# list setitem
|
||||
md.setlist('a', [1, 2, 3])
|
||||
self.assert_equal(md['a'], 1)
|
||||
self.assert_equal(md.getlist('a'), [1, 2, 3])
|
||||
|
||||
# verify that it does not change original lists
|
||||
l1 = [1, 2, 3]
|
||||
md.setlist('a', l1)
|
||||
del l1[:]
|
||||
self.assert_equal(md['a'], 1)
|
||||
|
||||
# setdefault, setlistdefault
|
||||
self.assert_equal(md.setdefault('u', 23), 23)
|
||||
self.assert_equal(md.getlist('u'), [23])
|
||||
del md['u']
|
||||
|
||||
md.setlist('u', [-1, -2])
|
||||
|
||||
# delitem
|
||||
del md['u']
|
||||
with self.assert_raises(KeyError):
|
||||
md['u']
|
||||
del md['d']
|
||||
self.assert_equal(md.getlist('d'), [])
|
||||
|
||||
# keys, values, items, lists
|
||||
self.assert_equal(list(sorted(md.keys())), ['a', 'b', 'c'])
|
||||
self.assert_equal(list(sorted(iterkeys(md))), ['a', 'b', 'c'])
|
||||
|
||||
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
|
||||
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
|
||||
|
||||
self.assert_equal(list(sorted(md.items())),
|
||||
[('a', 1), ('b', 2), ('c', 3)])
|
||||
self.assert_equal(list(sorted(md.items(multi=True))),
|
||||
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
|
||||
self.assert_equal(list(sorted(iteritems(md))),
|
||||
[('a', 1), ('b', 2), ('c', 3)])
|
||||
self.assert_equal(list(sorted(iteritems(md, multi=True))),
|
||||
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
|
||||
|
||||
self.assert_equal(list(sorted(md.lists())),
|
||||
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
|
||||
self.assert_equal(list(sorted(iterlists(md))),
|
||||
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
|
||||
|
||||
# copy method
|
||||
c = md.copy()
|
||||
self.assert_equal(c['a'], 1)
|
||||
self.assert_equal(c.getlist('a'), [1, 2, 3])
|
||||
|
||||
# copy method 2
|
||||
c = copy(md)
|
||||
self.assert_equal(c['a'], 1)
|
||||
self.assert_equal(c.getlist('a'), [1, 2, 3])
|
||||
|
||||
# update with a multidict
|
||||
od = self.storage_class([('a', 4), ('a', 5), ('y', 0)])
|
||||
md.update(od)
|
||||
self.assert_equal(md.getlist('a'), [1, 2, 3, 4, 5])
|
||||
self.assert_equal(md.getlist('y'), [0])
|
||||
|
||||
# update with a regular dict
|
||||
md = c
|
||||
od = {'a': 4, 'y': 0}
|
||||
md.update(od)
|
||||
self.assert_equal(md.getlist('a'), [1, 2, 3, 4])
|
||||
self.assert_equal(md.getlist('y'), [0])
|
||||
|
||||
# pop, poplist, popitem, popitemlist
|
||||
self.assert_equal(md.pop('y'), 0)
|
||||
assert 'y' not in md
|
||||
self.assert_equal(md.poplist('a'), [1, 2, 3, 4])
|
||||
assert 'a' not in md
|
||||
self.assert_equal(md.poplist('missing'), [])
|
||||
|
||||
# remaining: b=2, c=3
|
||||
popped = md.popitem()
|
||||
assert popped in [('b', 2), ('c', 3)]
|
||||
popped = md.popitemlist()
|
||||
assert popped in [('b', [2]), ('c', [3])]
|
||||
|
||||
# type conversion
|
||||
md = self.storage_class({'a': '4', 'b': ['2', '3']})
|
||||
self.assert_equal(md.get('a', type=int), 4)
|
||||
self.assert_equal(md.getlist('b', type=int), [2, 3])
|
||||
|
||||
# repr
|
||||
md = self.storage_class([('a', 1), ('a', 2), ('b', 3)])
|
||||
assert "('a', 1)" in repr(md)
|
||||
assert "('a', 2)" in repr(md)
|
||||
assert "('b', 3)" in repr(md)
|
||||
|
||||
# add and getlist
|
||||
md.add('c', '42')
|
||||
md.add('c', '23')
|
||||
self.assert_equal(md.getlist('c'), ['42', '23'])
|
||||
md.add('c', 'blah')
|
||||
self.assert_equal(md.getlist('c', type=int), [42, 23])
|
||||
|
||||
# setdefault
|
||||
md = self.storage_class()
|
||||
md.setdefault('x', []).append(42)
|
||||
md.setdefault('x', []).append(23)
|
||||
self.assert_equal(md['x'], [42, 23])
|
||||
|
||||
# to dict
|
||||
md = self.storage_class()
|
||||
md['foo'] = 42
|
||||
md.add('bar', 1)
|
||||
md.add('bar', 2)
|
||||
self.assert_equal(md.to_dict(), {'foo': 42, 'bar': 1})
|
||||
self.assert_equal(md.to_dict(flat=False), {'foo': [42], 'bar': [1, 2]})
|
||||
|
||||
# popitem from empty dict
|
||||
with self.assert_raises(KeyError):
|
||||
self.storage_class().popitem()
|
||||
|
||||
with self.assert_raises(KeyError):
|
||||
self.storage_class().popitemlist()
|
||||
|
||||
# key errors are of a special type
|
||||
with self.assert_raises(BadRequestKeyError):
|
||||
self.storage_class()[42]
|
||||
|
||||
# setlist works
|
||||
md = self.storage_class()
|
||||
md['foo'] = 42
|
||||
md.setlist('foo', [1, 2])
|
||||
self.assert_equal(md.getlist('foo'), [1, 2])
|
||||
|
||||
|
||||
class ImmutableDictBaseTestCase(WerkzeugTestCase):
|
||||
storage_class = None
|
||||
|
||||
def test_follows_dict_interface(self):
|
||||
cls = self.storage_class
|
||||
|
||||
data = {'foo': 1, 'bar': 2, 'baz': 3}
|
||||
d = cls(data)
|
||||
|
||||
self.assert_equal(d['foo'], 1)
|
||||
self.assert_equal(d['bar'], 2)
|
||||
self.assert_equal(d['baz'], 3)
|
||||
self.assert_equal(sorted(d.keys()), ['bar', 'baz', 'foo'])
|
||||
self.assert_true('foo' in d)
|
||||
self.assert_true('foox' not in d)
|
||||
self.assert_equal(len(d), 3)
|
||||
|
||||
def test_copies_are_mutable(self):
|
||||
cls = self.storage_class
|
||||
immutable = cls({'a': 1})
|
||||
with self.assert_raises(TypeError):
|
||||
immutable.pop('a')
|
||||
|
||||
mutable = immutable.copy()
|
||||
mutable.pop('a')
|
||||
self.assert_true('a' in immutable)
|
||||
self.assert_true(mutable is not immutable)
|
||||
self.assert_true(copy(immutable) is immutable)
|
||||
|
||||
def test_dict_is_hashable(self):
|
||||
cls = self.storage_class
|
||||
immutable = cls({'a': 1, 'b': 2})
|
||||
immutable2 = cls({'a': 2, 'b': 2})
|
||||
x = set([immutable])
|
||||
self.assert_true(immutable in x)
|
||||
self.assert_true(immutable2 not in x)
|
||||
x.discard(immutable)
|
||||
self.assert_true(immutable not in x)
|
||||
self.assert_true(immutable2 not in x)
|
||||
x.add(immutable2)
|
||||
self.assert_true(immutable not in x)
|
||||
self.assert_true(immutable2 in x)
|
||||
x.add(immutable)
|
||||
self.assert_true(immutable in x)
|
||||
self.assert_true(immutable2 in x)
|
||||
|
||||
|
||||
class ImmutableTypeConversionDictTestCase(ImmutableDictBaseTestCase):
|
||||
storage_class = datastructures.ImmutableTypeConversionDict
|
||||
|
||||
|
||||
class ImmutableMultiDictTestCase(ImmutableDictBaseTestCase):
|
||||
storage_class = datastructures.ImmutableMultiDict
|
||||
|
||||
def test_multidict_is_hashable(self):
|
||||
cls = self.storage_class
|
||||
immutable = cls({'a': [1, 2], 'b': 2})
|
||||
immutable2 = cls({'a': [1], 'b': 2})
|
||||
x = set([immutable])
|
||||
self.assert_true(immutable in x)
|
||||
self.assert_true(immutable2 not in x)
|
||||
x.discard(immutable)
|
||||
self.assert_true(immutable not in x)
|
||||
self.assert_true(immutable2 not in x)
|
||||
x.add(immutable2)
|
||||
self.assert_true(immutable not in x)
|
||||
self.assert_true(immutable2 in x)
|
||||
x.add(immutable)
|
||||
self.assert_true(immutable in x)
|
||||
self.assert_true(immutable2 in x)
|
||||
|
||||
|
||||
class ImmutableDictTestCase(ImmutableDictBaseTestCase):
|
||||
storage_class = datastructures.ImmutableDict
|
||||
|
||||
|
||||
class ImmutableOrderedMultiDictTestCase(ImmutableDictBaseTestCase):
|
||||
storage_class = datastructures.ImmutableOrderedMultiDict
|
||||
|
||||
def test_ordered_multidict_is_hashable(self):
|
||||
a = self.storage_class([('a', 1), ('b', 1), ('a', 2)])
|
||||
b = self.storage_class([('a', 1), ('a', 2), ('b', 1)])
|
||||
self.assert_not_equal(hash(a), hash(b))
|
||||
|
||||
|
||||
class MultiDictTestCase(MutableMultiDictBaseTestCase):
|
||||
storage_class = datastructures.MultiDict
|
||||
|
||||
def test_multidict_pop(self):
|
||||
make_d = lambda: self.storage_class({'foo': [1, 2, 3, 4]})
|
||||
d = make_d()
|
||||
self.assert_equal(d.pop('foo'), 1)
|
||||
assert not d
|
||||
d = make_d()
|
||||
self.assert_equal(d.pop('foo', 32), 1)
|
||||
assert not d
|
||||
d = make_d()
|
||||
self.assert_equal(d.pop('foos', 32), 32)
|
||||
assert d
|
||||
|
||||
with self.assert_raises(KeyError):
|
||||
d.pop('foos')
|
||||
|
||||
def test_setlistdefault(self):
|
||||
md = self.storage_class()
|
||||
self.assert_equal(md.setlistdefault('u', [-1, -2]), [-1, -2])
|
||||
self.assert_equal(md.getlist('u'), [-1, -2])
|
||||
self.assert_equal(md['u'], -1)
|
||||
|
||||
def test_iter_interfaces(self):
|
||||
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
|
||||
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
|
||||
md = self.storage_class(mapping)
|
||||
self.assert_equal(list(zip(md.keys(), md.listvalues())),
|
||||
list(md.lists()))
|
||||
self.assert_equal(list(zip(md, iterlistvalues(md))),
|
||||
list(iterlists(md)))
|
||||
self.assert_equal(list(zip(iterkeys(md), iterlistvalues(md))),
|
||||
list(iterlists(md)))
|
||||
|
||||
|
||||
class OrderedMultiDictTestCase(MutableMultiDictBaseTestCase):
|
||||
storage_class = datastructures.OrderedMultiDict
|
||||
|
||||
def test_ordered_interface(self):
|
||||
cls = self.storage_class
|
||||
|
||||
d = cls()
|
||||
assert not d
|
||||
d.add('foo', 'bar')
|
||||
self.assert_equal(len(d), 1)
|
||||
d.add('foo', 'baz')
|
||||
self.assert_equal(len(d), 1)
|
||||
self.assert_equal(list(iteritems(d)), [('foo', 'bar')])
|
||||
self.assert_equal(list(d), ['foo'])
|
||||
self.assert_equal(list(iteritems(d, multi=True)),
|
||||
[('foo', 'bar'), ('foo', 'baz')])
|
||||
del d['foo']
|
||||
assert not d
|
||||
self.assert_equal(len(d), 0)
|
||||
self.assert_equal(list(d), [])
|
||||
|
||||
d.update([('foo', 1), ('foo', 2), ('bar', 42)])
|
||||
d.add('foo', 3)
|
||||
self.assert_equal(d.getlist('foo'), [1, 2, 3])
|
||||
self.assert_equal(d.getlist('bar'), [42])
|
||||
self.assert_equal(list(iteritems(d)), [('foo', 1), ('bar', 42)])
|
||||
|
||||
expected = ['foo', 'bar']
|
||||
|
||||
self.assert_sequence_equal(list(d.keys()), expected)
|
||||
self.assert_sequence_equal(list(d), expected)
|
||||
self.assert_sequence_equal(list(iterkeys(d)), expected)
|
||||
|
||||
self.assert_equal(list(iteritems(d, multi=True)),
|
||||
[('foo', 1), ('foo', 2), ('bar', 42), ('foo', 3)])
|
||||
self.assert_equal(len(d), 2)
|
||||
|
||||
self.assert_equal(d.pop('foo'), 1)
|
||||
assert d.pop('blafasel', None) is None
|
||||
self.assert_equal(d.pop('blafasel', 42), 42)
|
||||
self.assert_equal(len(d), 1)
|
||||
self.assert_equal(d.poplist('bar'), [42])
|
||||
assert not d
|
||||
|
||||
d.get('missingkey') is None
|
||||
|
||||
d.add('foo', 42)
|
||||
d.add('foo', 23)
|
||||
d.add('bar', 2)
|
||||
d.add('foo', 42)
|
||||
self.assert_equal(d, datastructures.MultiDict(d))
|
||||
id = self.storage_class(d)
|
||||
self.assert_equal(d, id)
|
||||
d.add('foo', 2)
|
||||
assert d != id
|
||||
|
||||
d.update({'blah': [1, 2, 3]})
|
||||
self.assert_equal(d['blah'], 1)
|
||||
self.assert_equal(d.getlist('blah'), [1, 2, 3])
|
||||
|
||||
# setlist works
|
||||
d = self.storage_class()
|
||||
d['foo'] = 42
|
||||
d.setlist('foo', [1, 2])
|
||||
self.assert_equal(d.getlist('foo'), [1, 2])
|
||||
|
||||
with self.assert_raises(BadRequestKeyError):
|
||||
d.pop('missing')
|
||||
with self.assert_raises(BadRequestKeyError):
|
||||
d['missing']
|
||||
|
||||
# popping
|
||||
d = self.storage_class()
|
||||
d.add('foo', 23)
|
||||
d.add('foo', 42)
|
||||
d.add('foo', 1)
|
||||
self.assert_equal(d.popitem(), ('foo', 23))
|
||||
with self.assert_raises(BadRequestKeyError):
|
||||
d.popitem()
|
||||
assert not d
|
||||
|
||||
d.add('foo', 23)
|
||||
d.add('foo', 42)
|
||||
d.add('foo', 1)
|
||||
self.assert_equal(d.popitemlist(), ('foo', [23, 42, 1]))
|
||||
|
||||
with self.assert_raises(BadRequestKeyError):
|
||||
d.popitemlist()
|
||||
|
||||
def test_iterables(self):
|
||||
a = datastructures.MultiDict((("key_a", "value_a"),))
|
||||
b = datastructures.MultiDict((("key_b", "value_b"),))
|
||||
ab = datastructures.CombinedMultiDict((a,b))
|
||||
|
||||
self.assert_equal(sorted(ab.lists()), [('key_a', ['value_a']), ('key_b', ['value_b'])])
|
||||
self.assert_equal(sorted(ab.listvalues()), [['value_a'], ['value_b']])
|
||||
self.assert_equal(sorted(ab.keys()), ["key_a", "key_b"])
|
||||
|
||||
self.assert_equal(sorted(iterlists(ab)), [('key_a', ['value_a']), ('key_b', ['value_b'])])
|
||||
self.assert_equal(sorted(iterlistvalues(ab)), [['value_a'], ['value_b']])
|
||||
self.assert_equal(sorted(iterkeys(ab)), ["key_a", "key_b"])
|
||||
|
||||
|
||||
class CombinedMultiDictTestCase(WerkzeugTestCase):
|
||||
storage_class = datastructures.CombinedMultiDict
|
||||
|
||||
def test_basic_interface(self):
|
||||
d1 = datastructures.MultiDict([('foo', '1')])
|
||||
d2 = datastructures.MultiDict([('bar', '2'), ('bar', '3')])
|
||||
d = self.storage_class([d1, d2])
|
||||
|
||||
# lookup
|
||||
self.assert_equal(d['foo'], '1')
|
||||
self.assert_equal(d['bar'], '2')
|
||||
self.assert_equal(d.getlist('bar'), ['2', '3'])
|
||||
|
||||
self.assert_equal(sorted(d.items()),
|
||||
[('bar', '2'), ('foo', '1')])
|
||||
self.assert_equal(sorted(d.items(multi=True)),
|
||||
[('bar', '2'), ('bar', '3'), ('foo', '1')])
|
||||
assert 'missingkey' not in d
|
||||
assert 'foo' in d
|
||||
|
||||
# type lookup
|
||||
self.assert_equal(d.get('foo', type=int), 1)
|
||||
self.assert_equal(d.getlist('bar', type=int), [2, 3])
|
||||
|
||||
# get key errors for missing stuff
|
||||
with self.assert_raises(KeyError):
|
||||
d['missing']
|
||||
|
||||
# make sure that they are immutable
|
||||
with self.assert_raises(TypeError):
|
||||
d['foo'] = 'blub'
|
||||
|
||||
# copies are immutable
|
||||
d = d.copy()
|
||||
with self.assert_raises(TypeError):
|
||||
d['foo'] = 'blub'
|
||||
|
||||
# make sure lists merges
|
||||
md1 = datastructures.MultiDict((("foo", "bar"),))
|
||||
md2 = datastructures.MultiDict((("foo", "blafasel"),))
|
||||
x = self.storage_class((md1, md2))
|
||||
self.assert_equal(list(iterlists(x)), [('foo', ['bar', 'blafasel'])])
|
||||
|
||||
|
||||
class HeadersTestCase(WerkzeugTestCase):
|
||||
storage_class = datastructures.Headers
|
||||
|
||||
def test_basic_interface(self):
|
||||
headers = self.storage_class()
|
||||
headers.add('Content-Type', 'text/plain')
|
||||
headers.add('X-Foo', 'bar')
|
||||
assert 'x-Foo' in headers
|
||||
assert 'Content-type' in headers
|
||||
|
||||
headers['Content-Type'] = 'foo/bar'
|
||||
self.assert_equal(headers['Content-Type'], 'foo/bar')
|
||||
self.assert_equal(len(headers.getlist('Content-Type')), 1)
|
||||
|
||||
# list conversion
|
||||
self.assert_equal(headers.to_wsgi_list(), [
|
||||
('Content-Type', 'foo/bar'),
|
||||
('X-Foo', 'bar')
|
||||
])
|
||||
self.assert_equal(str(headers), (
|
||||
"Content-Type: foo/bar\r\n"
|
||||
"X-Foo: bar\r\n"
|
||||
"\r\n"))
|
||||
self.assert_equal(str(self.storage_class()), "\r\n")
|
||||
|
||||
# extended add
|
||||
headers.add('Content-Disposition', 'attachment', filename='foo')
|
||||
self.assert_equal(headers['Content-Disposition'],
|
||||
'attachment; filename=foo')
|
||||
|
||||
headers.add('x', 'y', z='"')
|
||||
self.assert_equal(headers['x'], r'y; z="\""')
|
||||
|
||||
def test_defaults_and_conversion(self):
|
||||
# defaults
|
||||
headers = self.storage_class([
|
||||
('Content-Type', 'text/plain'),
|
||||
('X-Foo', 'bar'),
|
||||
('X-Bar', '1'),
|
||||
('X-Bar', '2')
|
||||
])
|
||||
self.assert_equal(headers.getlist('x-bar'), ['1', '2'])
|
||||
self.assert_equal(headers.get('x-Bar'), '1')
|
||||
self.assert_equal(headers.get('Content-Type'), 'text/plain')
|
||||
|
||||
self.assert_equal(headers.setdefault('X-Foo', 'nope'), 'bar')
|
||||
self.assert_equal(headers.setdefault('X-Bar', 'nope'), '1')
|
||||
self.assert_equal(headers.setdefault('X-Baz', 'quux'), 'quux')
|
||||
self.assert_equal(headers.setdefault('X-Baz', 'nope'), 'quux')
|
||||
headers.pop('X-Baz')
|
||||
|
||||
# type conversion
|
||||
self.assert_equal(headers.get('x-bar', type=int), 1)
|
||||
self.assert_equal(headers.getlist('x-bar', type=int), [1, 2])
|
||||
|
||||
# list like operations
|
||||
self.assert_equal(headers[0], ('Content-Type', 'text/plain'))
|
||||
self.assert_equal(headers[:1], self.storage_class([('Content-Type', 'text/plain')]))
|
||||
del headers[:2]
|
||||
del headers[-1]
|
||||
self.assert_equal(headers, self.storage_class([('X-Bar', '1')]))
|
||||
|
||||
def test_copying(self):
|
||||
a = self.storage_class([('foo', 'bar')])
|
||||
b = a.copy()
|
||||
a.add('foo', 'baz')
|
||||
self.assert_equal(a.getlist('foo'), ['bar', 'baz'])
|
||||
self.assert_equal(b.getlist('foo'), ['bar'])
|
||||
|
||||
def test_popping(self):
|
||||
headers = self.storage_class([('a', 1)])
|
||||
self.assert_equal(headers.pop('a'), 1)
|
||||
self.assert_equal(headers.pop('b', 2), 2)
|
||||
|
||||
with self.assert_raises(KeyError):
|
||||
headers.pop('c')
|
||||
|
||||
def test_set_arguments(self):
|
||||
a = self.storage_class()
|
||||
a.set('Content-Disposition', 'useless')
|
||||
a.set('Content-Disposition', 'attachment', filename='foo')
|
||||
self.assert_equal(a['Content-Disposition'], 'attachment; filename=foo')
|
||||
|
||||
def test_reject_newlines(self):
|
||||
h = self.storage_class()
|
||||
|
||||
for variation in 'foo\nbar', 'foo\r\nbar', 'foo\rbar':
|
||||
with self.assert_raises(ValueError):
|
||||
h['foo'] = variation
|
||||
with self.assert_raises(ValueError):
|
||||
h.add('foo', variation)
|
||||
with self.assert_raises(ValueError):
|
||||
h.add('foo', 'test', option=variation)
|
||||
with self.assert_raises(ValueError):
|
||||
h.set('foo', variation)
|
||||
with self.assert_raises(ValueError):
|
||||
h.set('foo', 'test', option=variation)
|
||||
|
||||
def test_slicing(self):
|
||||
# there's nothing wrong with these being native strings
|
||||
# Headers doesn't care about the data types
|
||||
h = self.storage_class()
|
||||
h.set('X-Foo-Poo', 'bleh')
|
||||
h.set('Content-Type', 'application/whocares')
|
||||
h.set('X-Forwarded-For', '192.168.0.123')
|
||||
h[:] = [(k, v) for k, v in h if k.startswith(u'X-')]
|
||||
self.assert_equal(list(h), [
|
||||
('X-Foo-Poo', 'bleh'),
|
||||
('X-Forwarded-For', '192.168.0.123')
|
||||
])
|
||||
|
||||
def test_bytes_operations(self):
|
||||
h = self.storage_class()
|
||||
h.set('X-Foo-Poo', 'bleh')
|
||||
h.set('X-Whoops', b'\xff')
|
||||
|
||||
self.assert_equal(h.get('x-foo-poo', as_bytes=True), b'bleh')
|
||||
self.assert_equal(h.get('x-whoops', as_bytes=True), b'\xff')
|
||||
|
||||
|
||||
class EnvironHeadersTestCase(WerkzeugTestCase):
|
||||
storage_class = datastructures.EnvironHeaders
|
||||
|
||||
def test_basic_interface(self):
|
||||
# this happens in multiple WSGI servers because they
|
||||
# use a vary naive way to convert the headers;
|
||||
broken_env = {
|
||||
'HTTP_CONTENT_TYPE': 'text/html',
|
||||
'CONTENT_TYPE': 'text/html',
|
||||
'HTTP_CONTENT_LENGTH': '0',
|
||||
'CONTENT_LENGTH': '0',
|
||||
'HTTP_ACCEPT': '*',
|
||||
'wsgi.version': (1, 0)
|
||||
}
|
||||
headers = self.storage_class(broken_env)
|
||||
assert headers
|
||||
self.assert_equal(len(headers), 3)
|
||||
self.assert_equal(sorted(headers), [
|
||||
('Accept', '*'),
|
||||
('Content-Length', '0'),
|
||||
('Content-Type', 'text/html')
|
||||
])
|
||||
assert not self.storage_class({'wsgi.version': (1, 0)})
|
||||
self.assert_equal(len(self.storage_class({'wsgi.version': (1, 0)})), 0)
|
||||
|
||||
def test_return_type_is_unicode(self):
|
||||
# environ contains native strings; we return unicode
|
||||
headers = self.storage_class({
|
||||
'HTTP_FOO': '\xe2\x9c\x93',
|
||||
'CONTENT_TYPE': 'text/plain',
|
||||
})
|
||||
self.assert_equal(headers['Foo'], u"\xe2\x9c\x93")
|
||||
assert isinstance(headers['Foo'], text_type)
|
||||
assert isinstance(headers['Content-Type'], text_type)
|
||||
iter_output = dict(iter(headers))
|
||||
self.assert_equal(iter_output['Foo'], u"\xe2\x9c\x93")
|
||||
assert isinstance(iter_output['Foo'], text_type)
|
||||
assert isinstance(iter_output['Content-Type'], text_type)
|
||||
|
||||
def test_bytes_operations(self):
|
||||
foo_val = '\xff'
|
||||
h = self.storage_class({
|
||||
'HTTP_X_FOO': foo_val
|
||||
})
|
||||
|
||||
self.assert_equal(h.get('x-foo', as_bytes=True), b'\xff')
|
||||
self.assert_equal(h.get('x-foo'), u'\xff')
|
||||
|
||||
|
||||
class HeaderSetTestCase(WerkzeugTestCase):
|
||||
storage_class = datastructures.HeaderSet
|
||||
|
||||
def test_basic_interface(self):
|
||||
hs = self.storage_class()
|
||||
hs.add('foo')
|
||||
hs.add('bar')
|
||||
assert 'Bar' in hs
|
||||
self.assert_equal(hs.find('foo'), 0)
|
||||
self.assert_equal(hs.find('BAR'), 1)
|
||||
assert hs.find('baz') < 0
|
||||
hs.discard('missing')
|
||||
hs.discard('foo')
|
||||
assert hs.find('foo') < 0
|
||||
self.assert_equal(hs.find('bar'), 0)
|
||||
|
||||
with self.assert_raises(IndexError):
|
||||
hs.index('missing')
|
||||
|
||||
self.assert_equal(hs.index('bar'), 0)
|
||||
assert hs
|
||||
hs.clear()
|
||||
assert not hs
|
||||
|
||||
|
||||
class ImmutableListTestCase(WerkzeugTestCase):
|
||||
storage_class = datastructures.ImmutableList
|
||||
|
||||
def test_list_hashable(self):
|
||||
t = (1, 2, 3, 4)
|
||||
l = self.storage_class(t)
|
||||
self.assert_equal(hash(t), hash(l))
|
||||
self.assert_not_equal(t, l)
|
||||
|
||||
|
||||
def make_call_asserter(assert_equal_func, func=None):
|
||||
"""Utility to assert a certain number of function calls.
|
||||
|
||||
>>> assert_calls, func = make_call_asserter(self.assert_equal)
|
||||
>>> with assert_calls(2):
|
||||
func()
|
||||
func()
|
||||
"""
|
||||
|
||||
calls = [0]
|
||||
|
||||
@contextmanager
|
||||
def asserter(count, msg=None):
|
||||
calls[0] = 0
|
||||
yield
|
||||
assert_equal_func(calls[0], count, msg)
|
||||
|
||||
def wrapped(*args, **kwargs):
|
||||
calls[0] += 1
|
||||
if func is not None:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return asserter, wrapped
|
||||
|
||||
|
||||
class CallbackDictTestCase(WerkzeugTestCase):
|
||||
storage_class = datastructures.CallbackDict
|
||||
|
||||
def test_callback_dict_reads(self):
|
||||
assert_calls, func = make_call_asserter(self.assert_equal)
|
||||
initial = {'a': 'foo', 'b': 'bar'}
|
||||
dct = self.storage_class(initial=initial, on_update=func)
|
||||
with assert_calls(0, 'callback triggered by read-only method'):
|
||||
# read-only methods
|
||||
dct['a']
|
||||
dct.get('a')
|
||||
self.assert_raises(KeyError, lambda: dct['x'])
|
||||
'a' in dct
|
||||
list(iter(dct))
|
||||
dct.copy()
|
||||
with assert_calls(0, 'callback triggered without modification'):
|
||||
# methods that may write but don't
|
||||
dct.pop('z', None)
|
||||
dct.setdefault('a')
|
||||
|
||||
def test_callback_dict_writes(self):
|
||||
assert_calls, func = make_call_asserter(self.assert_equal)
|
||||
initial = {'a': 'foo', 'b': 'bar'}
|
||||
dct = self.storage_class(initial=initial, on_update=func)
|
||||
with assert_calls(8, 'callback not triggered by write method'):
|
||||
# always-write methods
|
||||
dct['z'] = 123
|
||||
dct['z'] = 123 # must trigger again
|
||||
del dct['z']
|
||||
dct.pop('b', None)
|
||||
dct.setdefault('x')
|
||||
dct.popitem()
|
||||
dct.update([])
|
||||
dct.clear()
|
||||
with assert_calls(0, 'callback triggered by failed del'):
|
||||
self.assert_raises(KeyError, lambda: dct.__delitem__('x'))
|
||||
with assert_calls(0, 'callback triggered by failed pop'):
|
||||
self.assert_raises(KeyError, lambda: dct.pop('x'))
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(MultiDictTestCase))
|
||||
suite.addTest(unittest.makeSuite(OrderedMultiDictTestCase))
|
||||
suite.addTest(unittest.makeSuite(CombinedMultiDictTestCase))
|
||||
suite.addTest(unittest.makeSuite(ImmutableTypeConversionDictTestCase))
|
||||
suite.addTest(unittest.makeSuite(ImmutableMultiDictTestCase))
|
||||
suite.addTest(unittest.makeSuite(ImmutableDictTestCase))
|
||||
suite.addTest(unittest.makeSuite(ImmutableOrderedMultiDictTestCase))
|
||||
suite.addTest(unittest.makeSuite(HeadersTestCase))
|
||||
suite.addTest(unittest.makeSuite(EnvironHeadersTestCase))
|
||||
suite.addTest(unittest.makeSuite(HeaderSetTestCase))
|
||||
suite.addTest(unittest.makeSuite(NativeItermethodsTestCase))
|
||||
suite.addTest(unittest.makeSuite(CallbackDictTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,172 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.debug
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tests some debug utilities.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
import sys
|
||||
import re
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
from werkzeug.debug.repr import debug_repr, DebugReprGenerator, \
|
||||
dump, helper
|
||||
from werkzeug.debug.console import HTMLStringO
|
||||
from werkzeug._compat import PY2
|
||||
|
||||
|
||||
class DebugReprTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_basic_repr(self):
|
||||
self.assert_equal(debug_repr([]), u'[]')
|
||||
self.assert_equal(debug_repr([1, 2]),
|
||||
u'[<span class="number">1</span>, <span class="number">2</span>]')
|
||||
self.assert_equal(debug_repr([1, 'test']),
|
||||
u'[<span class="number">1</span>, <span class="string">\'test\'</span>]')
|
||||
self.assert_equal(debug_repr([None]),
|
||||
u'[<span class="object">None</span>]')
|
||||
|
||||
def test_sequence_repr(self):
|
||||
self.assert_equal(debug_repr(list(range(20))), (
|
||||
u'[<span class="number">0</span>, <span class="number">1</span>, '
|
||||
u'<span class="number">2</span>, <span class="number">3</span>, '
|
||||
u'<span class="number">4</span>, <span class="number">5</span>, '
|
||||
u'<span class="number">6</span>, <span class="number">7</span>, '
|
||||
u'<span class="extended"><span class="number">8</span>, '
|
||||
u'<span class="number">9</span>, <span class="number">10</span>, '
|
||||
u'<span class="number">11</span>, <span class="number">12</span>, '
|
||||
u'<span class="number">13</span>, <span class="number">14</span>, '
|
||||
u'<span class="number">15</span>, <span class="number">16</span>, '
|
||||
u'<span class="number">17</span>, <span class="number">18</span>, '
|
||||
u'<span class="number">19</span></span>]'
|
||||
))
|
||||
|
||||
def test_mapping_repr(self):
|
||||
self.assert_equal(debug_repr({}), u'{}')
|
||||
self.assert_equal(debug_repr({'foo': 42}),
|
||||
u'{<span class="pair"><span class="key"><span class="string">\'foo\''
|
||||
u'</span></span>: <span class="value"><span class="number">42'
|
||||
u'</span></span></span>}')
|
||||
self.assert_equal(debug_repr(dict(zip(range(10), [None] * 10))),
|
||||
u'{<span class="pair"><span class="key"><span class="number">0</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">1</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">2</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">3</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="extended"><span class="pair"><span class="key"><span class="number">4</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">5</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">6</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">7</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">8</span></span>: <span class="value"><span class="object">None</span></span></span>, <span class="pair"><span class="key"><span class="number">9</span></span>: <span class="value"><span class="object">None</span></span></span></span>}')
|
||||
self.assert_equal(
|
||||
debug_repr((1, 'zwei', u'drei')),
|
||||
u'(<span class="number">1</span>, <span class="string">\''
|
||||
u'zwei\'</span>, <span class="string">%s\'drei\'</span>)' % ('u' if PY2 else ''))
|
||||
|
||||
def test_custom_repr(self):
|
||||
class Foo(object):
|
||||
def __repr__(self):
|
||||
return '<Foo 42>'
|
||||
self.assert_equal(debug_repr(Foo()),
|
||||
'<span class="object"><Foo 42></span>')
|
||||
|
||||
def test_list_subclass_repr(self):
|
||||
class MyList(list):
|
||||
pass
|
||||
self.assert_equal(
|
||||
debug_repr(MyList([1, 2])),
|
||||
u'<span class="module">werkzeug.testsuite.debug.</span>MyList(['
|
||||
u'<span class="number">1</span>, <span class="number">2</span>])')
|
||||
|
||||
def test_regex_repr(self):
|
||||
self.assert_equal(debug_repr(re.compile(r'foo\d')),
|
||||
u're.compile(<span class="string regex">r\'foo\\d\'</span>)')
|
||||
#XXX: no raw string here cause of a syntax bug in py3.3
|
||||
self.assert_equal(debug_repr(re.compile(u'foo\\d')),
|
||||
u're.compile(<span class="string regex">%sr\'foo\\d\'</span>)' %
|
||||
('u' if PY2 else ''))
|
||||
|
||||
def test_set_repr(self):
|
||||
self.assert_equal(debug_repr(frozenset('x')),
|
||||
u'frozenset([<span class="string">\'x\'</span>])')
|
||||
self.assert_equal(debug_repr(set('x')),
|
||||
u'set([<span class="string">\'x\'</span>])')
|
||||
|
||||
def test_recursive_repr(self):
|
||||
a = [1]
|
||||
a.append(a)
|
||||
self.assert_equal(debug_repr(a),
|
||||
u'[<span class="number">1</span>, [...]]')
|
||||
|
||||
def test_broken_repr(self):
|
||||
class Foo(object):
|
||||
def __repr__(self):
|
||||
raise Exception('broken!')
|
||||
|
||||
self.assert_equal(
|
||||
debug_repr(Foo()),
|
||||
u'<span class="brokenrepr"><broken repr (Exception: '
|
||||
u'broken!)></span>')
|
||||
|
||||
|
||||
class Foo(object):
|
||||
x = 42
|
||||
y = 23
|
||||
|
||||
def __init__(self):
|
||||
self.z = 15
|
||||
|
||||
|
||||
class DebugHelpersTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_object_dumping(self):
|
||||
drg = DebugReprGenerator()
|
||||
out = drg.dump_object(Foo())
|
||||
assert re.search('Details for werkzeug.testsuite.debug.Foo object at', out)
|
||||
assert re.search('<th>x.*<span class="number">42</span>(?s)', out)
|
||||
assert re.search('<th>y.*<span class="number">23</span>(?s)', out)
|
||||
assert re.search('<th>z.*<span class="number">15</span>(?s)', out)
|
||||
|
||||
out = drg.dump_object({'x': 42, 'y': 23})
|
||||
assert re.search('Contents of', out)
|
||||
assert re.search('<th>x.*<span class="number">42</span>(?s)', out)
|
||||
assert re.search('<th>y.*<span class="number">23</span>(?s)', out)
|
||||
|
||||
out = drg.dump_object({'x': 42, 'y': 23, 23: 11})
|
||||
assert not re.search('Contents of', out)
|
||||
|
||||
out = drg.dump_locals({'x': 42, 'y': 23})
|
||||
assert re.search('Local variables in frame', out)
|
||||
assert re.search('<th>x.*<span class="number">42</span>(?s)', out)
|
||||
assert re.search('<th>y.*<span class="number">23</span>(?s)', out)
|
||||
|
||||
def test_debug_dump(self):
|
||||
old = sys.stdout
|
||||
sys.stdout = HTMLStringO()
|
||||
try:
|
||||
dump([1, 2, 3])
|
||||
x = sys.stdout.reset()
|
||||
dump()
|
||||
y = sys.stdout.reset()
|
||||
finally:
|
||||
sys.stdout = old
|
||||
|
||||
self.assert_in('Details for list object at', x)
|
||||
self.assert_in('<span class="number">1</span>', x)
|
||||
self.assert_in('Local variables in frame', y)
|
||||
self.assert_in('<th>x', y)
|
||||
self.assert_in('<th>old', y)
|
||||
|
||||
def test_debug_help(self):
|
||||
old = sys.stdout
|
||||
sys.stdout = HTMLStringO()
|
||||
try:
|
||||
helper([1, 2, 3])
|
||||
x = sys.stdout.reset()
|
||||
finally:
|
||||
sys.stdout = old
|
||||
|
||||
self.assert_in('Help on list object', x)
|
||||
self.assert_in('__delitem__', x)
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(DebugReprTestCase))
|
||||
suite.addTest(unittest.makeSuite(DebugHelpersTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,85 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.exceptions
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The tests for the exception classes.
|
||||
|
||||
TODO:
|
||||
|
||||
- This is undertested. HTML is never checked
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
|
||||
from werkzeug import exceptions
|
||||
from werkzeug.wrappers import Response
|
||||
from werkzeug._compat import text_type
|
||||
|
||||
|
||||
class ExceptionsTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_proxy_exception(self):
|
||||
orig_resp = Response('Hello World')
|
||||
try:
|
||||
exceptions.abort(orig_resp)
|
||||
except exceptions.HTTPException as e:
|
||||
resp = e.get_response({})
|
||||
else:
|
||||
self.fail('exception not raised')
|
||||
self.assert_true(resp is orig_resp)
|
||||
self.assert_equal(resp.get_data(), b'Hello World')
|
||||
|
||||
def test_aborter(self):
|
||||
abort = exceptions.abort
|
||||
self.assert_raises(exceptions.BadRequest, abort, 400)
|
||||
self.assert_raises(exceptions.Unauthorized, abort, 401)
|
||||
self.assert_raises(exceptions.Forbidden, abort, 403)
|
||||
self.assert_raises(exceptions.NotFound, abort, 404)
|
||||
self.assert_raises(exceptions.MethodNotAllowed, abort, 405, ['GET', 'HEAD'])
|
||||
self.assert_raises(exceptions.NotAcceptable, abort, 406)
|
||||
self.assert_raises(exceptions.RequestTimeout, abort, 408)
|
||||
self.assert_raises(exceptions.Gone, abort, 410)
|
||||
self.assert_raises(exceptions.LengthRequired, abort, 411)
|
||||
self.assert_raises(exceptions.PreconditionFailed, abort, 412)
|
||||
self.assert_raises(exceptions.RequestEntityTooLarge, abort, 413)
|
||||
self.assert_raises(exceptions.RequestURITooLarge, abort, 414)
|
||||
self.assert_raises(exceptions.UnsupportedMediaType, abort, 415)
|
||||
self.assert_raises(exceptions.UnprocessableEntity, abort, 422)
|
||||
self.assert_raises(exceptions.InternalServerError, abort, 500)
|
||||
self.assert_raises(exceptions.NotImplemented, abort, 501)
|
||||
self.assert_raises(exceptions.BadGateway, abort, 502)
|
||||
self.assert_raises(exceptions.ServiceUnavailable, abort, 503)
|
||||
|
||||
myabort = exceptions.Aborter({1: exceptions.NotFound})
|
||||
self.assert_raises(LookupError, myabort, 404)
|
||||
self.assert_raises(exceptions.NotFound, myabort, 1)
|
||||
|
||||
myabort = exceptions.Aborter(extra={1: exceptions.NotFound})
|
||||
self.assert_raises(exceptions.NotFound, myabort, 404)
|
||||
self.assert_raises(exceptions.NotFound, myabort, 1)
|
||||
|
||||
def test_exception_repr(self):
|
||||
exc = exceptions.NotFound()
|
||||
self.assert_equal(text_type(exc), '404: Not Found')
|
||||
self.assert_equal(repr(exc), "<NotFound '404: Not Found'>")
|
||||
|
||||
exc = exceptions.NotFound('Not There')
|
||||
self.assert_equal(text_type(exc), '404: Not Found')
|
||||
self.assert_equal(repr(exc), "<NotFound '404: Not Found'>")
|
||||
|
||||
def test_special_exceptions(self):
|
||||
exc = exceptions.MethodNotAllowed(['GET', 'HEAD', 'POST'])
|
||||
h = dict(exc.get_headers({}))
|
||||
self.assert_equal(h['Allow'], 'GET, HEAD, POST')
|
||||
self.assert_true('The method is not allowed' in exc.get_description())
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(ExceptionsTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,400 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.formparser
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tests the form parsing facilities.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
import unittest
|
||||
from os.path import join, dirname
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
|
||||
from werkzeug import formparser
|
||||
from werkzeug.test import create_environ, Client
|
||||
from werkzeug.wrappers import Request, Response
|
||||
from werkzeug.exceptions import RequestEntityTooLarge
|
||||
from werkzeug.datastructures import MultiDict
|
||||
from werkzeug.formparser import parse_form_data
|
||||
from werkzeug._compat import BytesIO
|
||||
|
||||
|
||||
@Request.application
|
||||
def form_data_consumer(request):
|
||||
result_object = request.args['object']
|
||||
if result_object == 'text':
|
||||
return Response(repr(request.form['text']))
|
||||
f = request.files[result_object]
|
||||
return Response(b'\n'.join((
|
||||
repr(f.filename).encode('ascii'),
|
||||
repr(f.name).encode('ascii'),
|
||||
repr(f.content_type).encode('ascii'),
|
||||
f.stream.read()
|
||||
)))
|
||||
|
||||
|
||||
def get_contents(filename):
|
||||
with open(filename, 'rb') as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
class FormParserTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_limiting(self):
|
||||
data = b'foo=Hello+World&bar=baz'
|
||||
req = Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='application/x-www-form-urlencoded',
|
||||
method='POST')
|
||||
req.max_content_length = 400
|
||||
self.assert_strict_equal(req.form['foo'], u'Hello World')
|
||||
|
||||
req = Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='application/x-www-form-urlencoded',
|
||||
method='POST')
|
||||
req.max_form_memory_size = 7
|
||||
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
|
||||
|
||||
req = Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='application/x-www-form-urlencoded',
|
||||
method='POST')
|
||||
req.max_form_memory_size = 400
|
||||
self.assert_strict_equal(req.form['foo'], u'Hello World')
|
||||
|
||||
data = (b'--foo\r\nContent-Disposition: form-field; name=foo\r\n\r\n'
|
||||
b'Hello World\r\n'
|
||||
b'--foo\r\nContent-Disposition: form-field; name=bar\r\n\r\n'
|
||||
b'bar=baz\r\n--foo--')
|
||||
req = Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='multipart/form-data; boundary=foo',
|
||||
method='POST')
|
||||
req.max_content_length = 4
|
||||
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
|
||||
|
||||
req = Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='multipart/form-data; boundary=foo',
|
||||
method='POST')
|
||||
req.max_content_length = 400
|
||||
self.assert_strict_equal(req.form['foo'], u'Hello World')
|
||||
|
||||
req = Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='multipart/form-data; boundary=foo',
|
||||
method='POST')
|
||||
req.max_form_memory_size = 7
|
||||
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
|
||||
|
||||
req = Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='multipart/form-data; boundary=foo',
|
||||
method='POST')
|
||||
req.max_form_memory_size = 400
|
||||
self.assert_strict_equal(req.form['foo'], u'Hello World')
|
||||
|
||||
def test_parse_form_data_put_without_content(self):
|
||||
# A PUT without a Content-Type header returns empty data
|
||||
|
||||
# Both rfc1945 and rfc2616 (1.0 and 1.1) say "Any HTTP/[1.0/1.1] message
|
||||
# containing an entity-body SHOULD include a Content-Type header field
|
||||
# defining the media type of that body." In the case where either
|
||||
# headers are omitted, parse_form_data should still work.
|
||||
env = create_environ('/foo', 'http://example.org/', method='PUT')
|
||||
del env['CONTENT_TYPE']
|
||||
del env['CONTENT_LENGTH']
|
||||
|
||||
stream, form, files = formparser.parse_form_data(env)
|
||||
self.assert_strict_equal(stream.read(), b'')
|
||||
self.assert_strict_equal(len(form), 0)
|
||||
self.assert_strict_equal(len(files), 0)
|
||||
|
||||
def test_parse_form_data_get_without_content(self):
|
||||
env = create_environ('/foo', 'http://example.org/', method='GET')
|
||||
del env['CONTENT_TYPE']
|
||||
del env['CONTENT_LENGTH']
|
||||
|
||||
stream, form, files = formparser.parse_form_data(env)
|
||||
self.assert_strict_equal(stream.read(), b'')
|
||||
self.assert_strict_equal(len(form), 0)
|
||||
self.assert_strict_equal(len(files), 0)
|
||||
|
||||
def test_large_file(self):
|
||||
data = b'x' * (1024 * 600)
|
||||
req = Request.from_values(data={'foo': (BytesIO(data), 'test.txt')},
|
||||
method='POST')
|
||||
# make sure we have a real file here, because we expect to be
|
||||
# on the disk. > 1024 * 500
|
||||
self.assert_true(hasattr(req.files['foo'].stream, u'fileno'))
|
||||
# close file to prevent fds from leaking
|
||||
req.files['foo'].close()
|
||||
|
||||
def test_streaming_parse(self):
|
||||
data = b'x' * (1024 * 600)
|
||||
class StreamMPP(formparser.MultiPartParser):
|
||||
def parse(self, file, boundary, content_length):
|
||||
i = iter(self.parse_lines(file, boundary, content_length))
|
||||
one = next(i)
|
||||
two = next(i)
|
||||
return self.cls(()), {'one': one, 'two': two}
|
||||
class StreamFDP(formparser.FormDataParser):
|
||||
def _sf_parse_multipart(self, stream, mimetype,
|
||||
content_length, options):
|
||||
form, files = StreamMPP(
|
||||
self.stream_factory, self.charset, self.errors,
|
||||
max_form_memory_size=self.max_form_memory_size,
|
||||
cls=self.cls).parse(stream, options.get('boundary').encode('ascii'),
|
||||
content_length)
|
||||
return stream, form, files
|
||||
parse_functions = {}
|
||||
parse_functions.update(formparser.FormDataParser.parse_functions)
|
||||
parse_functions['multipart/form-data'] = _sf_parse_multipart
|
||||
class StreamReq(Request):
|
||||
form_data_parser_class = StreamFDP
|
||||
req = StreamReq.from_values(data={'foo': (BytesIO(data), 'test.txt')},
|
||||
method='POST')
|
||||
self.assert_strict_equal('begin_file', req.files['one'][0])
|
||||
self.assert_strict_equal(('foo', 'test.txt'), req.files['one'][1][1:])
|
||||
self.assert_strict_equal('cont', req.files['two'][0])
|
||||
self.assert_strict_equal(data, req.files['two'][1])
|
||||
|
||||
|
||||
class MultiPartTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_basic(self):
|
||||
resources = join(dirname(__file__), 'multipart')
|
||||
client = Client(form_data_consumer, Response)
|
||||
|
||||
repository = [
|
||||
('firefox3-2png1txt', '---------------------------186454651713519341951581030105', [
|
||||
(u'anchor.png', 'file1', 'image/png', 'file1.png'),
|
||||
(u'application_edit.png', 'file2', 'image/png', 'file2.png')
|
||||
], u'example text'),
|
||||
('firefox3-2pnglongtext', '---------------------------14904044739787191031754711748', [
|
||||
(u'accept.png', 'file1', 'image/png', 'file1.png'),
|
||||
(u'add.png', 'file2', 'image/png', 'file2.png')
|
||||
], u'--long text\r\n--with boundary\r\n--lookalikes--'),
|
||||
('opera8-2png1txt', '----------zEO9jQKmLc2Cq88c23Dx19', [
|
||||
(u'arrow_branch.png', 'file1', 'image/png', 'file1.png'),
|
||||
(u'award_star_bronze_1.png', 'file2', 'image/png', 'file2.png')
|
||||
], u'blafasel öäü'),
|
||||
('webkit3-2png1txt', '----WebKitFormBoundaryjdSFhcARk8fyGNy6', [
|
||||
(u'gtk-apply.png', 'file1', 'image/png', 'file1.png'),
|
||||
(u'gtk-no.png', 'file2', 'image/png', 'file2.png')
|
||||
], u'this is another text with ümläüts'),
|
||||
('ie6-2png1txt', '---------------------------7d91b03a20128', [
|
||||
(u'file1.png', 'file1', 'image/x-png', 'file1.png'),
|
||||
(u'file2.png', 'file2', 'image/x-png', 'file2.png')
|
||||
], u'ie6 sucks :-/')
|
||||
]
|
||||
|
||||
for name, boundary, files, text in repository:
|
||||
folder = join(resources, name)
|
||||
data = get_contents(join(folder, 'request.txt'))
|
||||
for filename, field, content_type, fsname in files:
|
||||
response = client.post('/?object=' + field, data=data, content_type=
|
||||
'multipart/form-data; boundary="%s"' % boundary,
|
||||
content_length=len(data))
|
||||
lines = response.get_data().split(b'\n', 3)
|
||||
self.assert_strict_equal(lines[0], repr(filename).encode('ascii'))
|
||||
self.assert_strict_equal(lines[1], repr(field).encode('ascii'))
|
||||
self.assert_strict_equal(lines[2], repr(content_type).encode('ascii'))
|
||||
self.assert_strict_equal(lines[3], get_contents(join(folder, fsname)))
|
||||
response = client.post('/?object=text', data=data, content_type=
|
||||
'multipart/form-data; boundary="%s"' % boundary,
|
||||
content_length=len(data))
|
||||
self.assert_strict_equal(response.get_data(), repr(text).encode('utf-8'))
|
||||
|
||||
def test_ie7_unc_path(self):
|
||||
client = Client(form_data_consumer, Response)
|
||||
data_file = join(dirname(__file__), 'multipart', 'ie7_full_path_request.txt')
|
||||
data = get_contents(data_file)
|
||||
boundary = '---------------------------7da36d1b4a0164'
|
||||
response = client.post('/?object=cb_file_upload_multiple', data=data, content_type=
|
||||
'multipart/form-data; boundary="%s"' % boundary, content_length=len(data))
|
||||
lines = response.get_data().split(b'\n', 3)
|
||||
self.assert_strict_equal(lines[0],
|
||||
repr(u'Sellersburg Town Council Meeting 02-22-2010doc.doc').encode('ascii'))
|
||||
|
||||
def test_end_of_file(self):
|
||||
# This test looks innocent but it was actually timeing out in
|
||||
# the Werkzeug 0.5 release version (#394)
|
||||
data = (
|
||||
b'--foo\r\n'
|
||||
b'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n'
|
||||
b'Content-Type: text/plain\r\n\r\n'
|
||||
b'file contents and no end'
|
||||
)
|
||||
data = Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='multipart/form-data; boundary=foo',
|
||||
method='POST')
|
||||
self.assert_true(not data.files)
|
||||
self.assert_true(not data.form)
|
||||
|
||||
def test_broken(self):
|
||||
data = (
|
||||
'--foo\r\n'
|
||||
'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n'
|
||||
'Content-Transfer-Encoding: base64\r\n'
|
||||
'Content-Type: text/plain\r\n\r\n'
|
||||
'broken base 64'
|
||||
'--foo--'
|
||||
)
|
||||
_, form, files = formparser.parse_form_data(create_environ(data=data,
|
||||
method='POST', content_type='multipart/form-data; boundary=foo'))
|
||||
self.assert_true(not files)
|
||||
self.assert_true(not form)
|
||||
|
||||
self.assert_raises(ValueError, formparser.parse_form_data,
|
||||
create_environ(data=data, method='POST',
|
||||
content_type='multipart/form-data; boundary=foo'),
|
||||
silent=False)
|
||||
|
||||
def test_file_no_content_type(self):
|
||||
data = (
|
||||
b'--foo\r\n'
|
||||
b'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n\r\n'
|
||||
b'file contents\r\n--foo--'
|
||||
)
|
||||
data = Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='multipart/form-data; boundary=foo',
|
||||
method='POST')
|
||||
self.assert_equal(data.files['test'].filename, 'test.txt')
|
||||
self.assert_strict_equal(data.files['test'].read(), b'file contents')
|
||||
|
||||
def test_extra_newline(self):
|
||||
# this test looks innocent but it was actually timeing out in
|
||||
# the Werkzeug 0.5 release version (#394)
|
||||
data = (
|
||||
b'\r\n\r\n--foo\r\n'
|
||||
b'Content-Disposition: form-data; name="foo"\r\n\r\n'
|
||||
b'a string\r\n'
|
||||
b'--foo--'
|
||||
)
|
||||
data = Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='multipart/form-data; boundary=foo',
|
||||
method='POST')
|
||||
self.assert_true(not data.files)
|
||||
self.assert_strict_equal(data.form['foo'], u'a string')
|
||||
|
||||
def test_headers(self):
|
||||
data = (b'--foo\r\n'
|
||||
b'Content-Disposition: form-data; name="foo"; filename="foo.txt"\r\n'
|
||||
b'X-Custom-Header: blah\r\n'
|
||||
b'Content-Type: text/plain; charset=utf-8\r\n\r\n'
|
||||
b'file contents, just the contents\r\n'
|
||||
b'--foo--')
|
||||
req = Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='multipart/form-data; boundary=foo',
|
||||
method='POST')
|
||||
foo = req.files['foo']
|
||||
self.assert_strict_equal(foo.mimetype, 'text/plain')
|
||||
self.assert_strict_equal(foo.mimetype_params, {'charset': 'utf-8'})
|
||||
self.assert_strict_equal(foo.headers['content-type'], foo.content_type)
|
||||
self.assert_strict_equal(foo.content_type, 'text/plain; charset=utf-8')
|
||||
self.assert_strict_equal(foo.headers['x-custom-header'], 'blah')
|
||||
|
||||
def test_nonstandard_line_endings(self):
|
||||
for nl in b'\n', b'\r', b'\r\n':
|
||||
data = nl.join((
|
||||
b'--foo',
|
||||
b'Content-Disposition: form-data; name=foo',
|
||||
b'',
|
||||
b'this is just bar',
|
||||
b'--foo',
|
||||
b'Content-Disposition: form-data; name=bar',
|
||||
b'',
|
||||
b'blafasel',
|
||||
b'--foo--'
|
||||
))
|
||||
req = Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='multipart/form-data; '
|
||||
'boundary=foo', method='POST')
|
||||
self.assert_strict_equal(req.form['foo'], u'this is just bar')
|
||||
self.assert_strict_equal(req.form['bar'], u'blafasel')
|
||||
|
||||
def test_failures(self):
|
||||
def parse_multipart(stream, boundary, content_length):
|
||||
parser = formparser.MultiPartParser(content_length)
|
||||
return parser.parse(stream, boundary, content_length)
|
||||
self.assert_raises(ValueError, parse_multipart, BytesIO(), b'broken ', 0)
|
||||
|
||||
data = b'--foo\r\n\r\nHello World\r\n--foo--'
|
||||
self.assert_raises(ValueError, parse_multipart, BytesIO(data), b'foo', len(data))
|
||||
|
||||
data = b'--foo\r\nContent-Disposition: form-field; name=foo\r\n' \
|
||||
b'Content-Transfer-Encoding: base64\r\n\r\nHello World\r\n--foo--'
|
||||
self.assert_raises(ValueError, parse_multipart, BytesIO(data), b'foo', len(data))
|
||||
|
||||
data = b'--foo\r\nContent-Disposition: form-field; name=foo\r\n\r\nHello World\r\n'
|
||||
self.assert_raises(ValueError, parse_multipart, BytesIO(data), b'foo', len(data))
|
||||
|
||||
x = formparser.parse_multipart_headers(['foo: bar\r\n', ' x test\r\n'])
|
||||
self.assert_strict_equal(x['foo'], 'bar\n x test')
|
||||
self.assert_raises(ValueError, formparser.parse_multipart_headers,
|
||||
['foo: bar\r\n', ' x test'])
|
||||
|
||||
def test_bad_newline_bad_newline_assumption(self):
|
||||
class ISORequest(Request):
|
||||
charset = 'latin1'
|
||||
contents = b'U2vlbmUgbORu'
|
||||
data = b'--foo\r\nContent-Disposition: form-data; name="test"\r\n' \
|
||||
b'Content-Transfer-Encoding: base64\r\n\r\n' + \
|
||||
contents + b'\r\n--foo--'
|
||||
req = ISORequest.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='multipart/form-data; boundary=foo',
|
||||
method='POST')
|
||||
self.assert_strict_equal(req.form['test'], u'Sk\xe5ne l\xe4n')
|
||||
|
||||
def test_empty_multipart(self):
|
||||
environ = {}
|
||||
data = b'--boundary--'
|
||||
environ['REQUEST_METHOD'] = 'POST'
|
||||
environ['CONTENT_TYPE'] = 'multipart/form-data; boundary=boundary'
|
||||
environ['CONTENT_LENGTH'] = str(len(data))
|
||||
environ['wsgi.input'] = BytesIO(data)
|
||||
stream, form, files = parse_form_data(environ, silent=False)
|
||||
rv = stream.read()
|
||||
self.assert_equal(rv, b'')
|
||||
self.assert_equal(form, MultiDict())
|
||||
self.assert_equal(files, MultiDict())
|
||||
|
||||
|
||||
class InternalFunctionsTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_line_parser(self):
|
||||
assert formparser._line_parse('foo') == ('foo', False)
|
||||
assert formparser._line_parse('foo\r\n') == ('foo', True)
|
||||
assert formparser._line_parse('foo\r') == ('foo', True)
|
||||
assert formparser._line_parse('foo\n') == ('foo', True)
|
||||
|
||||
def test_find_terminator(self):
|
||||
lineiter = iter(b'\n\n\nfoo\nbar\nbaz'.splitlines(True))
|
||||
find_terminator = formparser.MultiPartParser()._find_terminator
|
||||
line = find_terminator(lineiter)
|
||||
self.assert_equal(line, b'foo')
|
||||
self.assert_equal(list(lineiter), [b'bar\n', b'baz'])
|
||||
self.assert_equal(find_terminator([]), b'')
|
||||
self.assert_equal(find_terminator([b'']), b'')
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(FormParserTestCase))
|
||||
suite.addTest(unittest.makeSuite(MultiPartTestCase))
|
||||
suite.addTest(unittest.makeSuite(InternalFunctionsTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,449 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.http
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
HTTP parsing utilities.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
from datetime import datetime
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
from werkzeug._compat import itervalues, wsgi_encoding_dance
|
||||
|
||||
from werkzeug import http, datastructures
|
||||
from werkzeug.test import create_environ
|
||||
|
||||
|
||||
class HTTPUtilityTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_accept(self):
|
||||
a = http.parse_accept_header('en-us,ru;q=0.5')
|
||||
self.assert_equal(list(itervalues(a)), ['en-us', 'ru'])
|
||||
self.assert_equal(a.best, 'en-us')
|
||||
self.assert_equal(a.find('ru'), 1)
|
||||
self.assert_raises(ValueError, a.index, 'de')
|
||||
self.assert_equal(a.to_header(), 'en-us,ru;q=0.5')
|
||||
|
||||
def test_mime_accept(self):
|
||||
a = http.parse_accept_header('text/xml,application/xml,'
|
||||
'application/xhtml+xml,'
|
||||
'text/html;q=0.9,text/plain;q=0.8,'
|
||||
'image/png,*/*;q=0.5',
|
||||
datastructures.MIMEAccept)
|
||||
self.assert_raises(ValueError, lambda: a['missing'])
|
||||
self.assert_equal(a['image/png'], 1)
|
||||
self.assert_equal(a['text/plain'], 0.8)
|
||||
self.assert_equal(a['foo/bar'], 0.5)
|
||||
self.assert_equal(a[a.find('foo/bar')], ('*/*', 0.5))
|
||||
|
||||
def test_accept_matches(self):
|
||||
a = http.parse_accept_header('text/xml,application/xml,application/xhtml+xml,'
|
||||
'text/html;q=0.9,text/plain;q=0.8,'
|
||||
'image/png', datastructures.MIMEAccept)
|
||||
self.assert_equal(a.best_match(['text/html', 'application/xhtml+xml']),
|
||||
'application/xhtml+xml')
|
||||
self.assert_equal(a.best_match(['text/html']), 'text/html')
|
||||
self.assert_true(a.best_match(['foo/bar']) is None)
|
||||
self.assert_equal(a.best_match(['foo/bar', 'bar/foo'],
|
||||
default='foo/bar'), 'foo/bar')
|
||||
self.assert_equal(a.best_match(['application/xml', 'text/xml']), 'application/xml')
|
||||
|
||||
def test_charset_accept(self):
|
||||
a = http.parse_accept_header('ISO-8859-1,utf-8;q=0.7,*;q=0.7',
|
||||
datastructures.CharsetAccept)
|
||||
self.assert_equal(a['iso-8859-1'], a['iso8859-1'])
|
||||
self.assert_equal(a['iso-8859-1'], 1)
|
||||
self.assert_equal(a['UTF8'], 0.7)
|
||||
self.assert_equal(a['ebcdic'], 0.7)
|
||||
|
||||
def test_language_accept(self):
|
||||
a = http.parse_accept_header('de-AT,de;q=0.8,en;q=0.5',
|
||||
datastructures.LanguageAccept)
|
||||
self.assert_equal(a.best, 'de-AT')
|
||||
self.assert_true('de_AT' in a)
|
||||
self.assert_true('en' in a)
|
||||
self.assert_equal(a['de-at'], 1)
|
||||
self.assert_equal(a['en'], 0.5)
|
||||
|
||||
def test_set_header(self):
|
||||
hs = http.parse_set_header('foo, Bar, "Blah baz", Hehe')
|
||||
self.assert_true('blah baz' in hs)
|
||||
self.assert_true('foobar' not in hs)
|
||||
self.assert_true('foo' in hs)
|
||||
self.assert_equal(list(hs), ['foo', 'Bar', 'Blah baz', 'Hehe'])
|
||||
hs.add('Foo')
|
||||
self.assert_equal(hs.to_header(), 'foo, Bar, "Blah baz", Hehe')
|
||||
|
||||
def test_list_header(self):
|
||||
hl = http.parse_list_header('foo baz, blah')
|
||||
self.assert_equal(hl, ['foo baz', 'blah'])
|
||||
|
||||
def test_dict_header(self):
|
||||
d = http.parse_dict_header('foo="bar baz", blah=42')
|
||||
self.assert_equal(d, {'foo': 'bar baz', 'blah': '42'})
|
||||
|
||||
def test_cache_control_header(self):
|
||||
cc = http.parse_cache_control_header('max-age=0, no-cache')
|
||||
assert cc.max_age == 0
|
||||
assert cc.no_cache
|
||||
cc = http.parse_cache_control_header('private, community="UCI"', None,
|
||||
datastructures.ResponseCacheControl)
|
||||
assert cc.private
|
||||
assert cc['community'] == 'UCI'
|
||||
|
||||
c = datastructures.ResponseCacheControl()
|
||||
assert c.no_cache is None
|
||||
assert c.private is None
|
||||
c.no_cache = True
|
||||
assert c.no_cache == '*'
|
||||
c.private = True
|
||||
assert c.private == '*'
|
||||
del c.private
|
||||
assert c.private is None
|
||||
assert c.to_header() == 'no-cache'
|
||||
|
||||
def test_authorization_header(self):
|
||||
a = http.parse_authorization_header('Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==')
|
||||
assert a.type == 'basic'
|
||||
assert a.username == 'Aladdin'
|
||||
assert a.password == 'open sesame'
|
||||
|
||||
a = http.parse_authorization_header('''Digest username="Mufasa",
|
||||
realm="testrealm@host.invalid",
|
||||
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
|
||||
uri="/dir/index.html",
|
||||
qop=auth,
|
||||
nc=00000001,
|
||||
cnonce="0a4f113b",
|
||||
response="6629fae49393a05397450978507c4ef1",
|
||||
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
|
||||
assert a.type == 'digest'
|
||||
assert a.username == 'Mufasa'
|
||||
assert a.realm == 'testrealm@host.invalid'
|
||||
assert a.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
|
||||
assert a.uri == '/dir/index.html'
|
||||
assert 'auth' in a.qop
|
||||
assert a.nc == '00000001'
|
||||
assert a.cnonce == '0a4f113b'
|
||||
assert a.response == '6629fae49393a05397450978507c4ef1'
|
||||
assert a.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
|
||||
|
||||
a = http.parse_authorization_header('''Digest username="Mufasa",
|
||||
realm="testrealm@host.invalid",
|
||||
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
|
||||
uri="/dir/index.html",
|
||||
response="e257afa1414a3340d93d30955171dd0e",
|
||||
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
|
||||
assert a.type == 'digest'
|
||||
assert a.username == 'Mufasa'
|
||||
assert a.realm == 'testrealm@host.invalid'
|
||||
assert a.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
|
||||
assert a.uri == '/dir/index.html'
|
||||
assert a.response == 'e257afa1414a3340d93d30955171dd0e'
|
||||
assert a.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
|
||||
|
||||
assert http.parse_authorization_header('') is None
|
||||
assert http.parse_authorization_header(None) is None
|
||||
assert http.parse_authorization_header('foo') is None
|
||||
|
||||
def test_www_authenticate_header(self):
|
||||
wa = http.parse_www_authenticate_header('Basic realm="WallyWorld"')
|
||||
assert wa.type == 'basic'
|
||||
assert wa.realm == 'WallyWorld'
|
||||
wa.realm = 'Foo Bar'
|
||||
assert wa.to_header() == 'Basic realm="Foo Bar"'
|
||||
|
||||
wa = http.parse_www_authenticate_header('''Digest
|
||||
realm="testrealm@host.com",
|
||||
qop="auth,auth-int",
|
||||
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
|
||||
opaque="5ccc069c403ebaf9f0171e9517f40e41"''')
|
||||
assert wa.type == 'digest'
|
||||
assert wa.realm == 'testrealm@host.com'
|
||||
assert 'auth' in wa.qop
|
||||
assert 'auth-int' in wa.qop
|
||||
assert wa.nonce == 'dcd98b7102dd2f0e8b11d0f600bfb0c093'
|
||||
assert wa.opaque == '5ccc069c403ebaf9f0171e9517f40e41'
|
||||
|
||||
wa = http.parse_www_authenticate_header('broken')
|
||||
assert wa.type == 'broken'
|
||||
|
||||
assert not http.parse_www_authenticate_header('').type
|
||||
assert not http.parse_www_authenticate_header('')
|
||||
|
||||
def test_etags(self):
|
||||
assert http.quote_etag('foo') == '"foo"'
|
||||
assert http.quote_etag('foo', True) == 'w/"foo"'
|
||||
assert http.unquote_etag('"foo"') == ('foo', False)
|
||||
assert http.unquote_etag('w/"foo"') == ('foo', True)
|
||||
es = http.parse_etags('"foo", "bar", w/"baz", blar')
|
||||
assert sorted(es) == ['bar', 'blar', 'foo']
|
||||
assert 'foo' in es
|
||||
assert 'baz' not in es
|
||||
assert es.contains_weak('baz')
|
||||
assert 'blar' in es
|
||||
assert es.contains_raw('w/"baz"')
|
||||
assert es.contains_raw('"foo"')
|
||||
assert sorted(es.to_header().split(', ')) == ['"bar"', '"blar"', '"foo"', 'w/"baz"']
|
||||
|
||||
def test_etags_nonzero(self):
|
||||
etags = http.parse_etags('w/"foo"')
|
||||
self.assert_true(bool(etags))
|
||||
self.assert_true(etags.contains_raw('w/"foo"'))
|
||||
|
||||
def test_parse_date(self):
|
||||
assert http.parse_date('Sun, 06 Nov 1994 08:49:37 GMT ') == datetime(1994, 11, 6, 8, 49, 37)
|
||||
assert http.parse_date('Sunday, 06-Nov-94 08:49:37 GMT') == datetime(1994, 11, 6, 8, 49, 37)
|
||||
assert http.parse_date(' Sun Nov 6 08:49:37 1994') == datetime(1994, 11, 6, 8, 49, 37)
|
||||
assert http.parse_date('foo') is None
|
||||
|
||||
def test_parse_date_overflows(self):
|
||||
assert http.parse_date(' Sun 02 Feb 1343 08:49:37 GMT') == datetime(1343, 2, 2, 8, 49, 37)
|
||||
assert http.parse_date('Thu, 01 Jan 1970 00:00:00 GMT') == datetime(1970, 1, 1, 0, 0)
|
||||
assert http.parse_date('Thu, 33 Jan 1970 00:00:00 GMT') is None
|
||||
|
||||
def test_remove_entity_headers(self):
|
||||
now = http.http_date()
|
||||
headers1 = [('Date', now), ('Content-Type', 'text/html'), ('Content-Length', '0')]
|
||||
headers2 = datastructures.Headers(headers1)
|
||||
|
||||
http.remove_entity_headers(headers1)
|
||||
assert headers1 == [('Date', now)]
|
||||
|
||||
http.remove_entity_headers(headers2)
|
||||
self.assert_equal(headers2, datastructures.Headers([(u'Date', now)]))
|
||||
|
||||
def test_remove_hop_by_hop_headers(self):
|
||||
headers1 = [('Connection', 'closed'), ('Foo', 'bar'),
|
||||
('Keep-Alive', 'wtf')]
|
||||
headers2 = datastructures.Headers(headers1)
|
||||
|
||||
http.remove_hop_by_hop_headers(headers1)
|
||||
assert headers1 == [('Foo', 'bar')]
|
||||
|
||||
http.remove_hop_by_hop_headers(headers2)
|
||||
assert headers2 == datastructures.Headers([('Foo', 'bar')])
|
||||
|
||||
def test_parse_options_header(self):
|
||||
assert http.parse_options_header(r'something; foo="other\"thing"') == \
|
||||
('something', {'foo': 'other"thing'})
|
||||
assert http.parse_options_header(r'something; foo="other\"thing"; meh=42') == \
|
||||
('something', {'foo': 'other"thing', 'meh': '42'})
|
||||
assert http.parse_options_header(r'something; foo="other\"thing"; meh=42; bleh') == \
|
||||
('something', {'foo': 'other"thing', 'meh': '42', 'bleh': None})
|
||||
assert http.parse_options_header('something; foo="other;thing"; meh=42; bleh') == \
|
||||
('something', {'foo': 'other;thing', 'meh': '42', 'bleh': None})
|
||||
assert http.parse_options_header('something; foo="otherthing"; meh=; bleh') == \
|
||||
('something', {'foo': 'otherthing', 'meh': None, 'bleh': None})
|
||||
|
||||
|
||||
|
||||
def test_dump_options_header(self):
|
||||
assert http.dump_options_header('foo', {'bar': 42}) == \
|
||||
'foo; bar=42'
|
||||
assert http.dump_options_header('foo', {'bar': 42, 'fizz': None}) in \
|
||||
('foo; bar=42; fizz', 'foo; fizz; bar=42')
|
||||
|
||||
def test_dump_header(self):
|
||||
assert http.dump_header([1, 2, 3]) == '1, 2, 3'
|
||||
assert http.dump_header([1, 2, 3], allow_token=False) == '"1", "2", "3"'
|
||||
assert http.dump_header({'foo': 'bar'}, allow_token=False) == 'foo="bar"'
|
||||
assert http.dump_header({'foo': 'bar'}) == 'foo=bar'
|
||||
|
||||
def test_is_resource_modified(self):
|
||||
env = create_environ()
|
||||
|
||||
# ignore POST
|
||||
env['REQUEST_METHOD'] = 'POST'
|
||||
assert not http.is_resource_modified(env, etag='testing')
|
||||
env['REQUEST_METHOD'] = 'GET'
|
||||
|
||||
# etagify from data
|
||||
self.assert_raises(TypeError, http.is_resource_modified, env,
|
||||
data='42', etag='23')
|
||||
env['HTTP_IF_NONE_MATCH'] = http.generate_etag(b'awesome')
|
||||
assert not http.is_resource_modified(env, data=b'awesome')
|
||||
|
||||
env['HTTP_IF_MODIFIED_SINCE'] = http.http_date(datetime(2008, 1, 1, 12, 30))
|
||||
assert not http.is_resource_modified(env,
|
||||
last_modified=datetime(2008, 1, 1, 12, 00))
|
||||
assert http.is_resource_modified(env,
|
||||
last_modified=datetime(2008, 1, 1, 13, 00))
|
||||
|
||||
def test_date_formatting(self):
|
||||
assert http.cookie_date(0) == 'Thu, 01-Jan-1970 00:00:00 GMT'
|
||||
assert http.cookie_date(datetime(1970, 1, 1)) == 'Thu, 01-Jan-1970 00:00:00 GMT'
|
||||
assert http.http_date(0) == 'Thu, 01 Jan 1970 00:00:00 GMT'
|
||||
assert http.http_date(datetime(1970, 1, 1)) == 'Thu, 01 Jan 1970 00:00:00 GMT'
|
||||
|
||||
def test_cookies(self):
|
||||
self.assert_strict_equal(
|
||||
dict(http.parse_cookie('dismiss-top=6; CP=null*; PHPSESSID=0a539d42abc001cd'
|
||||
'c762809248d4beed; a=42; b="\\\";"')),
|
||||
{
|
||||
'CP': u'null*',
|
||||
'PHPSESSID': u'0a539d42abc001cdc762809248d4beed',
|
||||
'a': u'42',
|
||||
'dismiss-top': u'6',
|
||||
'b': u'\";'
|
||||
}
|
||||
)
|
||||
self.assert_strict_equal(
|
||||
set(http.dump_cookie('foo', 'bar baz blub', 360, httponly=True,
|
||||
sync_expires=False).split(u'; ')),
|
||||
set([u'HttpOnly', u'Max-Age=360', u'Path=/', u'foo="bar baz blub"'])
|
||||
)
|
||||
self.assert_strict_equal(dict(http.parse_cookie('fo234{=bar; blub=Blah')),
|
||||
{'fo234{': u'bar', 'blub': u'Blah'})
|
||||
|
||||
def test_cookie_quoting(self):
|
||||
val = http.dump_cookie("foo", "?foo")
|
||||
self.assert_strict_equal(val, 'foo="?foo"; Path=/')
|
||||
self.assert_strict_equal(dict(http.parse_cookie(val)), {'foo': u'?foo'})
|
||||
|
||||
self.assert_strict_equal(dict(http.parse_cookie(r'foo="foo\054bar"')),
|
||||
{'foo': u'foo,bar'})
|
||||
|
||||
def test_cookie_domain_resolving(self):
|
||||
val = http.dump_cookie('foo', 'bar', domain=u'\N{SNOWMAN}.com')
|
||||
self.assert_strict_equal(val, 'foo=bar; Domain=xn--n3h.com; Path=/')
|
||||
|
||||
def test_cookie_unicode_dumping(self):
|
||||
val = http.dump_cookie('foo', u'\N{SNOWMAN}')
|
||||
h = datastructures.Headers()
|
||||
h.add('Set-Cookie', val)
|
||||
self.assert_equal(h['Set-Cookie'], 'foo="\\342\\230\\203"; Path=/')
|
||||
|
||||
cookies = http.parse_cookie(h['Set-Cookie'])
|
||||
self.assert_equal(cookies['foo'], u'\N{SNOWMAN}')
|
||||
|
||||
def test_cookie_unicode_keys(self):
|
||||
# Yes, this is technically against the spec but happens
|
||||
val = http.dump_cookie(u'fö', u'fö')
|
||||
self.assert_equal(val, wsgi_encoding_dance(u'fö="f\\303\\266"; Path=/', 'utf-8'))
|
||||
cookies = http.parse_cookie(val)
|
||||
self.assert_equal(cookies[u'fö'], u'fö')
|
||||
|
||||
def test_cookie_unicode_parsing(self):
|
||||
# This is actually a correct test. This is what is being submitted
|
||||
# by firefox if you set an unicode cookie and we get the cookie sent
|
||||
# in on Python 3 under PEP 3333.
|
||||
cookies = http.parse_cookie(u'fö=fö')
|
||||
self.assert_equal(cookies[u'fö'], u'fö')
|
||||
|
||||
def test_cookie_domain_encoding(self):
|
||||
val = http.dump_cookie('foo', 'bar', domain=u'\N{SNOWMAN}.com')
|
||||
self.assert_strict_equal(val, 'foo=bar; Domain=xn--n3h.com; Path=/')
|
||||
|
||||
val = http.dump_cookie('foo', 'bar', domain=u'.\N{SNOWMAN}.com')
|
||||
self.assert_strict_equal(val, 'foo=bar; Domain=.xn--n3h.com; Path=/')
|
||||
|
||||
val = http.dump_cookie('foo', 'bar', domain=u'.foo.com')
|
||||
self.assert_strict_equal(val, 'foo=bar; Domain=.foo.com; Path=/')
|
||||
|
||||
|
||||
class RangeTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_if_range_parsing(self):
|
||||
rv = http.parse_if_range_header('"Test"')
|
||||
assert rv.etag == 'Test'
|
||||
assert rv.date is None
|
||||
assert rv.to_header() == '"Test"'
|
||||
|
||||
# weak information is dropped
|
||||
rv = http.parse_if_range_header('w/"Test"')
|
||||
assert rv.etag == 'Test'
|
||||
assert rv.date is None
|
||||
assert rv.to_header() == '"Test"'
|
||||
|
||||
# broken etags are supported too
|
||||
rv = http.parse_if_range_header('bullshit')
|
||||
assert rv.etag == 'bullshit'
|
||||
assert rv.date is None
|
||||
assert rv.to_header() == '"bullshit"'
|
||||
|
||||
rv = http.parse_if_range_header('Thu, 01 Jan 1970 00:00:00 GMT')
|
||||
assert rv.etag is None
|
||||
assert rv.date == datetime(1970, 1, 1)
|
||||
assert rv.to_header() == 'Thu, 01 Jan 1970 00:00:00 GMT'
|
||||
|
||||
for x in '', None:
|
||||
rv = http.parse_if_range_header(x)
|
||||
assert rv.etag is None
|
||||
assert rv.date is None
|
||||
assert rv.to_header() == ''
|
||||
|
||||
def test_range_parsing():
|
||||
rv = http.parse_range_header('bytes=52')
|
||||
assert rv is None
|
||||
|
||||
rv = http.parse_range_header('bytes=52-')
|
||||
assert rv.units == 'bytes'
|
||||
assert rv.ranges == [(52, None)]
|
||||
assert rv.to_header() == 'bytes=52-'
|
||||
|
||||
rv = http.parse_range_header('bytes=52-99')
|
||||
assert rv.units == 'bytes'
|
||||
assert rv.ranges == [(52, 100)]
|
||||
assert rv.to_header() == 'bytes=52-99'
|
||||
|
||||
rv = http.parse_range_header('bytes=52-99,-1000')
|
||||
assert rv.units == 'bytes'
|
||||
assert rv.ranges == [(52, 100), (-1000, None)]
|
||||
assert rv.to_header() == 'bytes=52-99,-1000'
|
||||
|
||||
rv = http.parse_range_header('bytes = 1 - 100')
|
||||
assert rv.units == 'bytes'
|
||||
assert rv.ranges == [(1, 101)]
|
||||
assert rv.to_header() == 'bytes=1-100'
|
||||
|
||||
rv = http.parse_range_header('AWesomes=0-999')
|
||||
assert rv.units == 'awesomes'
|
||||
assert rv.ranges == [(0, 1000)]
|
||||
assert rv.to_header() == 'awesomes=0-999'
|
||||
|
||||
def test_content_range_parsing():
|
||||
rv = http.parse_content_range_header('bytes 0-98/*')
|
||||
assert rv.units == 'bytes'
|
||||
assert rv.start == 0
|
||||
assert rv.stop == 99
|
||||
assert rv.length is None
|
||||
assert rv.to_header() == 'bytes 0-98/*'
|
||||
|
||||
rv = http.parse_content_range_header('bytes 0-98/*asdfsa')
|
||||
assert rv is None
|
||||
|
||||
rv = http.parse_content_range_header('bytes 0-99/100')
|
||||
assert rv.to_header() == 'bytes 0-99/100'
|
||||
rv.start = None
|
||||
rv.stop = None
|
||||
assert rv.units == 'bytes'
|
||||
assert rv.to_header() == 'bytes */100'
|
||||
|
||||
rv = http.parse_content_range_header('bytes */100')
|
||||
assert rv.start is None
|
||||
assert rv.stop is None
|
||||
assert rv.length == 100
|
||||
assert rv.units == 'bytes'
|
||||
|
||||
|
||||
class RegressionTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_best_match_works(self):
|
||||
# was a bug in 0.6
|
||||
rv = http.parse_accept_header('foo=,application/xml,application/xhtml+xml,'
|
||||
'text/html;q=0.9,text/plain;q=0.8,'
|
||||
'image/png,*/*;q=0.5',
|
||||
datastructures.MIMEAccept).best_match(['foo/bar'])
|
||||
self.assert_equal(rv, 'foo/bar')
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(HTTPUtilityTestCase))
|
||||
suite.addTest(unittest.makeSuite(RegressionTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.internal
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Internal tests.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
|
||||
from datetime import datetime
|
||||
from warnings import filterwarnings, resetwarnings
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
from werkzeug.wrappers import Request, Response
|
||||
|
||||
from werkzeug import _internal as internal
|
||||
from werkzeug.test import create_environ
|
||||
|
||||
|
||||
class InternalTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_date_to_unix(self):
|
||||
assert internal._date_to_unix(datetime(1970, 1, 1)) == 0
|
||||
assert internal._date_to_unix(datetime(1970, 1, 1, 1, 0, 0)) == 3600
|
||||
assert internal._date_to_unix(datetime(1970, 1, 1, 1, 1, 1)) == 3661
|
||||
x = datetime(2010, 2, 15, 16, 15, 39)
|
||||
assert internal._date_to_unix(x) == 1266250539
|
||||
|
||||
def test_easteregg(self):
|
||||
req = Request.from_values('/?macgybarchakku')
|
||||
resp = Response.force_type(internal._easteregg(None), req)
|
||||
assert b'About Werkzeug' in resp.get_data()
|
||||
assert b'the Swiss Army knife of Python web development' in resp.get_data()
|
||||
|
||||
def test_wrapper_internals(self):
|
||||
req = Request.from_values(data={'foo': 'bar'}, method='POST')
|
||||
req._load_form_data()
|
||||
assert req.form.to_dict() == {'foo': 'bar'}
|
||||
|
||||
# second call does not break
|
||||
req._load_form_data()
|
||||
assert req.form.to_dict() == {'foo': 'bar'}
|
||||
|
||||
# check reprs
|
||||
assert repr(req) == "<Request 'http://localhost/' [POST]>"
|
||||
resp = Response()
|
||||
assert repr(resp) == '<Response 0 bytes [200 OK]>'
|
||||
resp.set_data('Hello World!')
|
||||
assert repr(resp) == '<Response 12 bytes [200 OK]>'
|
||||
resp.response = iter(['Test'])
|
||||
assert repr(resp) == '<Response streamed [200 OK]>'
|
||||
|
||||
# unicode data does not set content length
|
||||
response = Response([u'Hällo Wörld'])
|
||||
headers = response.get_wsgi_headers(create_environ())
|
||||
assert u'Content-Length' not in headers
|
||||
|
||||
response = Response([u'Hällo Wörld'.encode('utf-8')])
|
||||
headers = response.get_wsgi_headers(create_environ())
|
||||
assert u'Content-Length' in headers
|
||||
|
||||
# check for internal warnings
|
||||
filterwarnings('error', category=Warning)
|
||||
response = Response()
|
||||
environ = create_environ()
|
||||
response.response = 'What the...?'
|
||||
self.assert_raises(Warning, lambda: list(response.iter_encoded()))
|
||||
self.assert_raises(Warning, lambda: list(response.get_app_iter(environ)))
|
||||
response.direct_passthrough = True
|
||||
self.assert_raises(Warning, lambda: list(response.iter_encoded()))
|
||||
self.assert_raises(Warning, lambda: list(response.get_app_iter(environ)))
|
||||
resetwarnings()
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(InternalTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,159 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.local
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Local and local proxy tests.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import time
|
||||
import unittest
|
||||
from threading import Thread
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
|
||||
from werkzeug import local
|
||||
|
||||
|
||||
class LocalTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_basic_local(self):
|
||||
l = local.Local()
|
||||
l.foo = 0
|
||||
values = []
|
||||
def value_setter(idx):
|
||||
time.sleep(0.01 * idx)
|
||||
l.foo = idx
|
||||
time.sleep(0.02)
|
||||
values.append(l.foo)
|
||||
threads = [Thread(target=value_setter, args=(x,))
|
||||
for x in [1, 2, 3]]
|
||||
for thread in threads:
|
||||
thread.start()
|
||||
time.sleep(0.2)
|
||||
assert sorted(values) == [1, 2, 3]
|
||||
|
||||
def delfoo():
|
||||
del l.foo
|
||||
delfoo()
|
||||
self.assert_raises(AttributeError, lambda: l.foo)
|
||||
self.assert_raises(AttributeError, delfoo)
|
||||
|
||||
local.release_local(l)
|
||||
|
||||
def test_local_release(self):
|
||||
loc = local.Local()
|
||||
loc.foo = 42
|
||||
local.release_local(loc)
|
||||
assert not hasattr(loc, 'foo')
|
||||
|
||||
ls = local.LocalStack()
|
||||
ls.push(42)
|
||||
local.release_local(ls)
|
||||
assert ls.top is None
|
||||
|
||||
def test_local_proxy(self):
|
||||
foo = []
|
||||
ls = local.LocalProxy(lambda: foo)
|
||||
ls.append(42)
|
||||
ls.append(23)
|
||||
ls[1:] = [1, 2, 3]
|
||||
assert foo == [42, 1, 2, 3]
|
||||
assert repr(foo) == repr(ls)
|
||||
assert foo[0] == 42
|
||||
foo += [1]
|
||||
assert list(foo) == [42, 1, 2, 3, 1]
|
||||
|
||||
def test_local_proxy_operations_math(self):
|
||||
foo = 2
|
||||
ls = local.LocalProxy(lambda: foo)
|
||||
assert ls + 1 == 3
|
||||
assert 1 + ls == 3
|
||||
assert ls - 1 == 1
|
||||
assert 1 - ls == -1
|
||||
assert ls * 1 == 2
|
||||
assert 1 * ls == 2
|
||||
assert ls / 1 == 2
|
||||
assert 1.0 / ls == 0.5
|
||||
assert ls // 1.0 == 2.0
|
||||
assert 1.0 // ls == 0.0
|
||||
assert ls % 2 == 0
|
||||
assert 2 % ls == 0
|
||||
|
||||
def test_local_proxy_operations_strings(self):
|
||||
foo = "foo"
|
||||
ls = local.LocalProxy(lambda: foo)
|
||||
assert ls + "bar" == "foobar"
|
||||
assert "bar" + ls == "barfoo"
|
||||
assert ls * 2 == "foofoo"
|
||||
|
||||
foo = "foo %s"
|
||||
assert ls % ("bar",) == "foo bar"
|
||||
|
||||
def test_local_stack(self):
|
||||
ident = local.get_ident()
|
||||
|
||||
ls = local.LocalStack()
|
||||
assert ident not in ls._local.__storage__
|
||||
assert ls.top is None
|
||||
ls.push(42)
|
||||
assert ident in ls._local.__storage__
|
||||
assert ls.top == 42
|
||||
ls.push(23)
|
||||
assert ls.top == 23
|
||||
ls.pop()
|
||||
assert ls.top == 42
|
||||
ls.pop()
|
||||
assert ls.top is None
|
||||
assert ls.pop() is None
|
||||
assert ls.pop() is None
|
||||
|
||||
proxy = ls()
|
||||
ls.push([1, 2])
|
||||
assert proxy == [1, 2]
|
||||
ls.push((1, 2))
|
||||
assert proxy == (1, 2)
|
||||
ls.pop()
|
||||
ls.pop()
|
||||
assert repr(proxy) == '<LocalProxy unbound>'
|
||||
|
||||
assert ident not in ls._local.__storage__
|
||||
|
||||
def test_local_proxies_with_callables(self):
|
||||
foo = 42
|
||||
ls = local.LocalProxy(lambda: foo)
|
||||
assert ls == 42
|
||||
foo = [23]
|
||||
ls.append(42)
|
||||
assert ls == [23, 42]
|
||||
assert foo == [23, 42]
|
||||
|
||||
def test_custom_idents(self):
|
||||
ident = 0
|
||||
loc = local.Local()
|
||||
stack = local.LocalStack()
|
||||
mgr = local.LocalManager([loc, stack], ident_func=lambda: ident)
|
||||
|
||||
loc.foo = 42
|
||||
stack.push({'foo': 42})
|
||||
ident = 1
|
||||
loc.foo = 23
|
||||
stack.push({'foo': 23})
|
||||
ident = 0
|
||||
assert loc.foo == 42
|
||||
assert stack.top['foo'] == 42
|
||||
stack.pop()
|
||||
assert stack.top is None
|
||||
ident = 1
|
||||
assert loc.foo == 23
|
||||
assert stack.top['foo'] == 23
|
||||
stack.pop()
|
||||
assert stack.top is None
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(LocalTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
#!/usr/bin/env python
|
||||
"""
|
||||
Hacky helper application to collect form data.
|
||||
"""
|
||||
from werkzeug.serving import run_simple
|
||||
from werkzeug.wrappers import Request, Response
|
||||
|
||||
|
||||
def copy_stream(request):
|
||||
from os import mkdir
|
||||
from time import time
|
||||
folder = 'request-%d' % time()
|
||||
mkdir(folder)
|
||||
environ = request.environ
|
||||
f = open(folder + '/request.txt', 'wb+')
|
||||
f.write(environ['wsgi.input'].read(int(environ['CONTENT_LENGTH'])))
|
||||
f.flush()
|
||||
f.seek(0)
|
||||
environ['wsgi.input'] = f
|
||||
request.stat_folder = folder
|
||||
|
||||
|
||||
def stats(request):
|
||||
copy_stream(request)
|
||||
f1 = request.files['file1']
|
||||
f2 = request.files['file2']
|
||||
text = request.form['text']
|
||||
f1.save(request.stat_folder + '/file1.bin')
|
||||
f2.save(request.stat_folder + '/file2.bin')
|
||||
open(request.stat_folder + '/text.txt', 'w').write(text.encode('utf-8'))
|
||||
return Response('Done.')
|
||||
|
||||
|
||||
def upload_file(request):
|
||||
return Response('''
|
||||
<h1>Upload File</h1>
|
||||
<form action="" method="post" enctype="multipart/form-data">
|
||||
<input type="file" name="file1"><br>
|
||||
<input type="file" name="file2"><br>
|
||||
<textarea name="text"></textarea><br>
|
||||
<input type="submit" value="Send">
|
||||
</form>
|
||||
''', mimetype='text/html')
|
||||
|
||||
|
||||
def application(environ, start_responseonse):
|
||||
request = Request(environ)
|
||||
if request.method == 'POST':
|
||||
response = stats(request)
|
||||
else:
|
||||
response = upload_file(request)
|
||||
return response(environ, start_responseonse)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_simple('localhost', 5000, application, use_debugger=True)
|
||||
|
After Width: | Height: | Size: 523 B |
|
After Width: | Height: | Size: 703 B |
|
|
@ -0,0 +1 @@
|
|||
example text
|
||||
|
After Width: | Height: | Size: 781 B |
|
After Width: | Height: | Size: 733 B |
|
|
@ -0,0 +1,3 @@
|
|||
--long text
|
||||
--with boundary
|
||||
--lookalikes--
|
||||
|
After Width: | Height: | Size: 523 B |
|
After Width: | Height: | Size: 703 B |
|
|
@ -0,0 +1 @@
|
|||
ie6 sucks :-/
|
||||
|
After Width: | Height: | Size: 582 B |
|
After Width: | Height: | Size: 733 B |
|
|
@ -0,0 +1 @@
|
|||
blafasel öäü
|
||||
|
After Width: | Height: | Size: 1,002 B |
|
After Width: | Height: | Size: 952 B |
|
|
@ -0,0 +1 @@
|
|||
this is another text with ümläüts
|
||||
|
|
@ -0,0 +1 @@
|
|||
FOUND
|
||||
|
|
@ -0,0 +1,673 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.routing
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Routing tests.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
|
||||
from werkzeug import routing as r
|
||||
from werkzeug.wrappers import Response
|
||||
from werkzeug.datastructures import ImmutableDict
|
||||
from werkzeug.test import create_environ
|
||||
|
||||
|
||||
class RoutingTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_basic_routing(self):
|
||||
map = r.Map([
|
||||
r.Rule('/', endpoint='index'),
|
||||
r.Rule('/foo', endpoint='foo'),
|
||||
r.Rule('/bar/', endpoint='bar')
|
||||
])
|
||||
adapter = map.bind('example.org', '/')
|
||||
assert adapter.match('/') == ('index', {})
|
||||
assert adapter.match('/foo') == ('foo', {})
|
||||
assert adapter.match('/bar/') == ('bar', {})
|
||||
self.assert_raises(r.RequestRedirect, lambda: adapter.match('/bar'))
|
||||
self.assert_raises(r.NotFound, lambda: adapter.match('/blub'))
|
||||
|
||||
adapter = map.bind('example.org', '/test')
|
||||
try:
|
||||
adapter.match('/bar')
|
||||
except r.RequestRedirect as e:
|
||||
assert e.new_url == 'http://example.org/test/bar/'
|
||||
else:
|
||||
self.fail('Expected request redirect')
|
||||
|
||||
adapter = map.bind('example.org', '/')
|
||||
try:
|
||||
adapter.match('/bar')
|
||||
except r.RequestRedirect as e:
|
||||
assert e.new_url == 'http://example.org/bar/'
|
||||
else:
|
||||
self.fail('Expected request redirect')
|
||||
|
||||
adapter = map.bind('example.org', '/')
|
||||
try:
|
||||
adapter.match('/bar', query_args={'aha': 'muhaha'})
|
||||
except r.RequestRedirect as e:
|
||||
assert e.new_url == 'http://example.org/bar/?aha=muhaha'
|
||||
else:
|
||||
self.fail('Expected request redirect')
|
||||
|
||||
adapter = map.bind('example.org', '/')
|
||||
try:
|
||||
adapter.match('/bar', query_args='aha=muhaha')
|
||||
except r.RequestRedirect as e:
|
||||
assert e.new_url == 'http://example.org/bar/?aha=muhaha'
|
||||
else:
|
||||
self.fail('Expected request redirect')
|
||||
|
||||
adapter = map.bind_to_environ(create_environ('/bar?foo=bar',
|
||||
'http://example.org/'))
|
||||
try:
|
||||
adapter.match()
|
||||
except r.RequestRedirect as e:
|
||||
assert e.new_url == 'http://example.org/bar/?foo=bar'
|
||||
else:
|
||||
self.fail('Expected request redirect')
|
||||
|
||||
def test_environ_defaults(self):
|
||||
environ = create_environ("/foo")
|
||||
self.assert_strict_equal(environ["PATH_INFO"], '/foo')
|
||||
m = r.Map([r.Rule("/foo", endpoint="foo"), r.Rule("/bar", endpoint="bar")])
|
||||
a = m.bind_to_environ(environ)
|
||||
self.assert_strict_equal(a.match("/foo"), ('foo', {}))
|
||||
self.assert_strict_equal(a.match(), ('foo', {}))
|
||||
self.assert_strict_equal(a.match("/bar"), ('bar', {}))
|
||||
self.assert_raises(r.NotFound, a.match, "/bars")
|
||||
|
||||
def test_environ_nonascii_pathinfo(self):
|
||||
environ = create_environ(u'/лошадь')
|
||||
m = r.Map([
|
||||
r.Rule(u'/', endpoint='index'),
|
||||
r.Rule(u'/лошадь', endpoint='horse')
|
||||
])
|
||||
a = m.bind_to_environ(environ)
|
||||
self.assert_strict_equal(a.match(u'/'), ('index', {}))
|
||||
self.assert_strict_equal(a.match(u'/лошадь'), ('horse', {}))
|
||||
self.assert_raises(r.NotFound, a.match, u'/барсук')
|
||||
|
||||
def test_basic_building(self):
|
||||
map = r.Map([
|
||||
r.Rule('/', endpoint='index'),
|
||||
r.Rule('/foo', endpoint='foo'),
|
||||
r.Rule('/bar/<baz>', endpoint='bar'),
|
||||
r.Rule('/bar/<int:bazi>', endpoint='bari'),
|
||||
r.Rule('/bar/<float:bazf>', endpoint='barf'),
|
||||
r.Rule('/bar/<path:bazp>', endpoint='barp'),
|
||||
r.Rule('/hehe', endpoint='blah', subdomain='blah')
|
||||
])
|
||||
adapter = map.bind('example.org', '/', subdomain='blah')
|
||||
|
||||
assert adapter.build('index', {}) == 'http://example.org/'
|
||||
assert adapter.build('foo', {}) == 'http://example.org/foo'
|
||||
assert adapter.build('bar', {'baz': 'blub'}) == 'http://example.org/bar/blub'
|
||||
assert adapter.build('bari', {'bazi': 50}) == 'http://example.org/bar/50'
|
||||
assert adapter.build('barf', {'bazf': 0.815}) == 'http://example.org/bar/0.815'
|
||||
assert adapter.build('barp', {'bazp': 'la/di'}) == 'http://example.org/bar/la/di'
|
||||
assert adapter.build('blah', {}) == '/hehe'
|
||||
self.assert_raises(r.BuildError, lambda: adapter.build('urks'))
|
||||
|
||||
adapter = map.bind('example.org', '/test', subdomain='blah')
|
||||
assert adapter.build('index', {}) == 'http://example.org/test/'
|
||||
assert adapter.build('foo', {}) == 'http://example.org/test/foo'
|
||||
assert adapter.build('bar', {'baz': 'blub'}) == 'http://example.org/test/bar/blub'
|
||||
assert adapter.build('bari', {'bazi': 50}) == 'http://example.org/test/bar/50'
|
||||
assert adapter.build('barf', {'bazf': 0.815}) == 'http://example.org/test/bar/0.815'
|
||||
assert adapter.build('barp', {'bazp': 'la/di'}) == 'http://example.org/test/bar/la/di'
|
||||
assert adapter.build('blah', {}) == '/test/hehe'
|
||||
|
||||
def test_defaults(self):
|
||||
map = r.Map([
|
||||
r.Rule('/foo/', defaults={'page': 1}, endpoint='foo'),
|
||||
r.Rule('/foo/<int:page>', endpoint='foo')
|
||||
])
|
||||
adapter = map.bind('example.org', '/')
|
||||
|
||||
assert adapter.match('/foo/') == ('foo', {'page': 1})
|
||||
self.assert_raises(r.RequestRedirect, lambda: adapter.match('/foo/1'))
|
||||
assert adapter.match('/foo/2') == ('foo', {'page': 2})
|
||||
assert adapter.build('foo', {}) == '/foo/'
|
||||
assert adapter.build('foo', {'page': 1}) == '/foo/'
|
||||
assert adapter.build('foo', {'page': 2}) == '/foo/2'
|
||||
|
||||
def test_greedy(self):
|
||||
map = r.Map([
|
||||
r.Rule('/foo', endpoint='foo'),
|
||||
r.Rule('/<path:bar>', endpoint='bar'),
|
||||
r.Rule('/<path:bar>/<path:blub>', endpoint='bar')
|
||||
])
|
||||
adapter = map.bind('example.org', '/')
|
||||
|
||||
assert adapter.match('/foo') == ('foo', {})
|
||||
assert adapter.match('/blub') == ('bar', {'bar': 'blub'})
|
||||
assert adapter.match('/he/he') == ('bar', {'bar': 'he', 'blub': 'he'})
|
||||
|
||||
assert adapter.build('foo', {}) == '/foo'
|
||||
assert adapter.build('bar', {'bar': 'blub'}) == '/blub'
|
||||
assert adapter.build('bar', {'bar': 'blub', 'blub': 'bar'}) == '/blub/bar'
|
||||
|
||||
def test_path(self):
|
||||
map = r.Map([
|
||||
r.Rule('/', defaults={'name': 'FrontPage'}, endpoint='page'),
|
||||
r.Rule('/Special', endpoint='special'),
|
||||
r.Rule('/<int:year>', endpoint='year'),
|
||||
r.Rule('/<path:name>', endpoint='page'),
|
||||
r.Rule('/<path:name>/edit', endpoint='editpage'),
|
||||
r.Rule('/<path:name>/silly/<path:name2>', endpoint='sillypage'),
|
||||
r.Rule('/<path:name>/silly/<path:name2>/edit', endpoint='editsillypage'),
|
||||
r.Rule('/Talk:<path:name>', endpoint='talk'),
|
||||
r.Rule('/User:<username>', endpoint='user'),
|
||||
r.Rule('/User:<username>/<path:name>', endpoint='userpage'),
|
||||
r.Rule('/Files/<path:file>', endpoint='files'),
|
||||
])
|
||||
adapter = map.bind('example.org', '/')
|
||||
|
||||
assert adapter.match('/') == ('page', {'name':'FrontPage'})
|
||||
self.assert_raises(r.RequestRedirect, lambda: adapter.match('/FrontPage'))
|
||||
assert adapter.match('/Special') == ('special', {})
|
||||
assert adapter.match('/2007') == ('year', {'year':2007})
|
||||
assert adapter.match('/Some/Page') == ('page', {'name':'Some/Page'})
|
||||
assert adapter.match('/Some/Page/edit') == ('editpage', {'name':'Some/Page'})
|
||||
assert adapter.match('/Foo/silly/bar') == ('sillypage', {'name':'Foo', 'name2':'bar'})
|
||||
assert adapter.match('/Foo/silly/bar/edit') == ('editsillypage', {'name':'Foo', 'name2':'bar'})
|
||||
assert adapter.match('/Talk:Foo/Bar') == ('talk', {'name':'Foo/Bar'})
|
||||
assert adapter.match('/User:thomas') == ('user', {'username':'thomas'})
|
||||
assert adapter.match('/User:thomas/projects/werkzeug') == \
|
||||
('userpage', {'username':'thomas', 'name':'projects/werkzeug'})
|
||||
assert adapter.match('/Files/downloads/werkzeug/0.2.zip') == \
|
||||
('files', {'file':'downloads/werkzeug/0.2.zip'})
|
||||
|
||||
def test_dispatch(self):
|
||||
env = create_environ('/')
|
||||
map = r.Map([
|
||||
r.Rule('/', endpoint='root'),
|
||||
r.Rule('/foo/', endpoint='foo')
|
||||
])
|
||||
adapter = map.bind_to_environ(env)
|
||||
|
||||
raise_this = None
|
||||
def view_func(endpoint, values):
|
||||
if raise_this is not None:
|
||||
raise raise_this
|
||||
return Response(repr((endpoint, values)))
|
||||
dispatch = lambda p, q=False: Response.force_type(adapter.dispatch(view_func, p,
|
||||
catch_http_exceptions=q), env)
|
||||
|
||||
assert dispatch('/').data == b"('root', {})"
|
||||
assert dispatch('/foo').status_code == 301
|
||||
raise_this = r.NotFound()
|
||||
self.assert_raises(r.NotFound, lambda: dispatch('/bar'))
|
||||
assert dispatch('/bar', True).status_code == 404
|
||||
|
||||
def test_http_host_before_server_name(self):
|
||||
env = {
|
||||
'HTTP_HOST': 'wiki.example.com',
|
||||
'SERVER_NAME': 'web0.example.com',
|
||||
'SERVER_PORT': '80',
|
||||
'SCRIPT_NAME': '',
|
||||
'PATH_INFO': '',
|
||||
'REQUEST_METHOD': 'GET',
|
||||
'wsgi.url_scheme': 'http'
|
||||
}
|
||||
map = r.Map([r.Rule('/', endpoint='index', subdomain='wiki')])
|
||||
adapter = map.bind_to_environ(env, server_name='example.com')
|
||||
assert adapter.match('/') == ('index', {})
|
||||
assert adapter.build('index', force_external=True) == 'http://wiki.example.com/'
|
||||
assert adapter.build('index') == '/'
|
||||
|
||||
env['HTTP_HOST'] = 'admin.example.com'
|
||||
adapter = map.bind_to_environ(env, server_name='example.com')
|
||||
assert adapter.build('index') == 'http://wiki.example.com/'
|
||||
|
||||
def test_adapter_url_parameter_sorting(self):
|
||||
map = r.Map([r.Rule('/', endpoint='index')], sort_parameters=True,
|
||||
sort_key=lambda x: x[1])
|
||||
adapter = map.bind('localhost', '/')
|
||||
assert adapter.build('index', {'x': 20, 'y': 10, 'z': 30},
|
||||
force_external=True) == 'http://localhost/?y=10&x=20&z=30'
|
||||
|
||||
def test_request_direct_charset_bug(self):
|
||||
map = r.Map([r.Rule(u'/öäü/')])
|
||||
adapter = map.bind('localhost', '/')
|
||||
try:
|
||||
adapter.match(u'/öäü')
|
||||
except r.RequestRedirect as e:
|
||||
assert e.new_url == 'http://localhost/%C3%B6%C3%A4%C3%BC/'
|
||||
else:
|
||||
self.fail('expected request redirect exception')
|
||||
|
||||
def test_request_redirect_default(self):
|
||||
map = r.Map([r.Rule(u'/foo', defaults={'bar': 42}),
|
||||
r.Rule(u'/foo/<int:bar>')])
|
||||
adapter = map.bind('localhost', '/')
|
||||
try:
|
||||
adapter.match(u'/foo/42')
|
||||
except r.RequestRedirect as e:
|
||||
assert e.new_url == 'http://localhost/foo'
|
||||
else:
|
||||
self.fail('expected request redirect exception')
|
||||
|
||||
def test_request_redirect_default_subdomain(self):
|
||||
map = r.Map([r.Rule(u'/foo', defaults={'bar': 42}, subdomain='test'),
|
||||
r.Rule(u'/foo/<int:bar>', subdomain='other')])
|
||||
adapter = map.bind('localhost', '/', subdomain='other')
|
||||
try:
|
||||
adapter.match(u'/foo/42')
|
||||
except r.RequestRedirect as e:
|
||||
assert e.new_url == 'http://test.localhost/foo'
|
||||
else:
|
||||
self.fail('expected request redirect exception')
|
||||
|
||||
def test_adapter_match_return_rule(self):
|
||||
rule = r.Rule('/foo/', endpoint='foo')
|
||||
map = r.Map([rule])
|
||||
adapter = map.bind('localhost', '/')
|
||||
assert adapter.match('/foo/', return_rule=True) == (rule, {})
|
||||
|
||||
def test_server_name_interpolation(self):
|
||||
server_name = 'example.invalid'
|
||||
map = r.Map([r.Rule('/', endpoint='index'),
|
||||
r.Rule('/', endpoint='alt', subdomain='alt')])
|
||||
|
||||
env = create_environ('/', 'http://%s/' % server_name)
|
||||
adapter = map.bind_to_environ(env, server_name=server_name)
|
||||
assert adapter.match() == ('index', {})
|
||||
|
||||
env = create_environ('/', 'http://alt.%s/' % server_name)
|
||||
adapter = map.bind_to_environ(env, server_name=server_name)
|
||||
assert adapter.match() == ('alt', {})
|
||||
|
||||
env = create_environ('/', 'http://%s/' % server_name)
|
||||
adapter = map.bind_to_environ(env, server_name='foo')
|
||||
assert adapter.subdomain == '<invalid>'
|
||||
|
||||
def test_rule_emptying(self):
|
||||
rule = r.Rule('/foo', {'meh': 'muh'}, 'x', ['POST'],
|
||||
False, 'x', True, None)
|
||||
rule2 = rule.empty()
|
||||
assert rule.__dict__ == rule2.__dict__
|
||||
rule.methods.add('GET')
|
||||
assert rule.__dict__ != rule2.__dict__
|
||||
rule.methods.discard('GET')
|
||||
rule.defaults['meh'] = 'aha'
|
||||
assert rule.__dict__ != rule2.__dict__
|
||||
|
||||
def test_rule_templates(self):
|
||||
testcase = r.RuleTemplate(
|
||||
[ r.Submount('/test/$app',
|
||||
[ r.Rule('/foo/', endpoint='handle_foo')
|
||||
, r.Rule('/bar/', endpoint='handle_bar')
|
||||
, r.Rule('/baz/', endpoint='handle_baz')
|
||||
]),
|
||||
r.EndpointPrefix('${app}',
|
||||
[ r.Rule('/${app}-blah', endpoint='bar')
|
||||
, r.Rule('/${app}-meh', endpoint='baz')
|
||||
]),
|
||||
r.Subdomain('$app',
|
||||
[ r.Rule('/blah', endpoint='x_bar')
|
||||
, r.Rule('/meh', endpoint='x_baz')
|
||||
])
|
||||
])
|
||||
|
||||
url_map = r.Map(
|
||||
[ testcase(app='test1')
|
||||
, testcase(app='test2')
|
||||
, testcase(app='test3')
|
||||
, testcase(app='test4')
|
||||
])
|
||||
|
||||
out = sorted([(x.rule, x.subdomain, x.endpoint)
|
||||
for x in url_map.iter_rules()])
|
||||
|
||||
assert out == ([
|
||||
('/blah', 'test1', 'x_bar'),
|
||||
('/blah', 'test2', 'x_bar'),
|
||||
('/blah', 'test3', 'x_bar'),
|
||||
('/blah', 'test4', 'x_bar'),
|
||||
('/meh', 'test1', 'x_baz'),
|
||||
('/meh', 'test2', 'x_baz'),
|
||||
('/meh', 'test3', 'x_baz'),
|
||||
('/meh', 'test4', 'x_baz'),
|
||||
('/test/test1/bar/', '', 'handle_bar'),
|
||||
('/test/test1/baz/', '', 'handle_baz'),
|
||||
('/test/test1/foo/', '', 'handle_foo'),
|
||||
('/test/test2/bar/', '', 'handle_bar'),
|
||||
('/test/test2/baz/', '', 'handle_baz'),
|
||||
('/test/test2/foo/', '', 'handle_foo'),
|
||||
('/test/test3/bar/', '', 'handle_bar'),
|
||||
('/test/test3/baz/', '', 'handle_baz'),
|
||||
('/test/test3/foo/', '', 'handle_foo'),
|
||||
('/test/test4/bar/', '', 'handle_bar'),
|
||||
('/test/test4/baz/', '', 'handle_baz'),
|
||||
('/test/test4/foo/', '', 'handle_foo'),
|
||||
('/test1-blah', '', 'test1bar'),
|
||||
('/test1-meh', '', 'test1baz'),
|
||||
('/test2-blah', '', 'test2bar'),
|
||||
('/test2-meh', '', 'test2baz'),
|
||||
('/test3-blah', '', 'test3bar'),
|
||||
('/test3-meh', '', 'test3baz'),
|
||||
('/test4-blah', '', 'test4bar'),
|
||||
('/test4-meh', '', 'test4baz')
|
||||
])
|
||||
|
||||
def test_non_string_parts(self):
|
||||
m = r.Map([
|
||||
r.Rule('/<foo>', endpoint='foo')
|
||||
])
|
||||
a = m.bind('example.com')
|
||||
self.assert_equal(a.build('foo', {'foo': 42}), '/42')
|
||||
|
||||
def test_complex_routing_rules(self):
|
||||
m = r.Map([
|
||||
r.Rule('/', endpoint='index'),
|
||||
r.Rule('/<int:blub>', endpoint='an_int'),
|
||||
r.Rule('/<blub>', endpoint='a_string'),
|
||||
r.Rule('/foo/', endpoint='nested'),
|
||||
r.Rule('/foobar/', endpoint='nestedbar'),
|
||||
r.Rule('/foo/<path:testing>/', endpoint='nested_show'),
|
||||
r.Rule('/foo/<path:testing>/edit', endpoint='nested_edit'),
|
||||
r.Rule('/users/', endpoint='users', defaults={'page': 1}),
|
||||
r.Rule('/users/page/<int:page>', endpoint='users'),
|
||||
r.Rule('/foox', endpoint='foox'),
|
||||
r.Rule('/<path:bar>/<path:blub>', endpoint='barx_path_path')
|
||||
])
|
||||
a = m.bind('example.com')
|
||||
|
||||
assert a.match('/') == ('index', {})
|
||||
assert a.match('/42') == ('an_int', {'blub': 42})
|
||||
assert a.match('/blub') == ('a_string', {'blub': 'blub'})
|
||||
assert a.match('/foo/') == ('nested', {})
|
||||
assert a.match('/foobar/') == ('nestedbar', {})
|
||||
assert a.match('/foo/1/2/3/') == ('nested_show', {'testing': '1/2/3'})
|
||||
assert a.match('/foo/1/2/3/edit') == ('nested_edit', {'testing': '1/2/3'})
|
||||
assert a.match('/users/') == ('users', {'page': 1})
|
||||
assert a.match('/users/page/2') == ('users', {'page': 2})
|
||||
assert a.match('/foox') == ('foox', {})
|
||||
assert a.match('/1/2/3') == ('barx_path_path', {'bar': '1', 'blub': '2/3'})
|
||||
|
||||
assert a.build('index') == '/'
|
||||
assert a.build('an_int', {'blub': 42}) == '/42'
|
||||
assert a.build('a_string', {'blub': 'test'}) == '/test'
|
||||
assert a.build('nested') == '/foo/'
|
||||
assert a.build('nestedbar') == '/foobar/'
|
||||
assert a.build('nested_show', {'testing': '1/2/3'}) == '/foo/1/2/3/'
|
||||
assert a.build('nested_edit', {'testing': '1/2/3'}) == '/foo/1/2/3/edit'
|
||||
assert a.build('users', {'page': 1}) == '/users/'
|
||||
assert a.build('users', {'page': 2}) == '/users/page/2'
|
||||
assert a.build('foox') == '/foox'
|
||||
assert a.build('barx_path_path', {'bar': '1', 'blub': '2/3'}) == '/1/2/3'
|
||||
|
||||
def test_default_converters(self):
|
||||
class MyMap(r.Map):
|
||||
default_converters = r.Map.default_converters.copy()
|
||||
default_converters['foo'] = r.UnicodeConverter
|
||||
assert isinstance(r.Map.default_converters, ImmutableDict)
|
||||
m = MyMap([
|
||||
r.Rule('/a/<foo:a>', endpoint='a'),
|
||||
r.Rule('/b/<foo:b>', endpoint='b'),
|
||||
r.Rule('/c/<c>', endpoint='c')
|
||||
], converters={'bar': r.UnicodeConverter})
|
||||
a = m.bind('example.org', '/')
|
||||
assert a.match('/a/1') == ('a', {'a': '1'})
|
||||
assert a.match('/b/2') == ('b', {'b': '2'})
|
||||
assert a.match('/c/3') == ('c', {'c': '3'})
|
||||
assert 'foo' not in r.Map.default_converters
|
||||
|
||||
def test_build_append_unknown(self):
|
||||
map = r.Map([
|
||||
r.Rule('/bar/<float:bazf>', endpoint='barf')
|
||||
])
|
||||
adapter = map.bind('example.org', '/', subdomain='blah')
|
||||
assert adapter.build('barf', {'bazf': 0.815, 'bif' : 1.0}) == \
|
||||
'http://example.org/bar/0.815?bif=1.0'
|
||||
assert adapter.build('barf', {'bazf': 0.815, 'bif' : 1.0},
|
||||
append_unknown=False) == 'http://example.org/bar/0.815'
|
||||
|
||||
def test_method_fallback(self):
|
||||
map = r.Map([
|
||||
r.Rule('/', endpoint='index', methods=['GET']),
|
||||
r.Rule('/<name>', endpoint='hello_name', methods=['GET']),
|
||||
r.Rule('/select', endpoint='hello_select', methods=['POST']),
|
||||
r.Rule('/search_get', endpoint='search', methods=['GET']),
|
||||
r.Rule('/search_post', endpoint='search', methods=['POST'])
|
||||
])
|
||||
adapter = map.bind('example.com')
|
||||
assert adapter.build('index') == '/'
|
||||
assert adapter.build('index', method='GET') == '/'
|
||||
assert adapter.build('hello_name', {'name': 'foo'}) == '/foo'
|
||||
assert adapter.build('hello_select') == '/select'
|
||||
assert adapter.build('hello_select', method='POST') == '/select'
|
||||
assert adapter.build('search') == '/search_get'
|
||||
assert adapter.build('search', method='GET') == '/search_get'
|
||||
assert adapter.build('search', method='POST') == '/search_post'
|
||||
|
||||
def test_implicit_head(self):
|
||||
url_map = r.Map([
|
||||
r.Rule('/get', methods=['GET'], endpoint='a'),
|
||||
r.Rule('/post', methods=['POST'], endpoint='b')
|
||||
])
|
||||
adapter = url_map.bind('example.org')
|
||||
assert adapter.match('/get', method='HEAD') == ('a', {})
|
||||
self.assert_raises(r.MethodNotAllowed, adapter.match,
|
||||
'/post', method='HEAD')
|
||||
|
||||
def test_protocol_joining_bug(self):
|
||||
m = r.Map([r.Rule('/<foo>', endpoint='x')])
|
||||
a = m.bind('example.org')
|
||||
assert a.build('x', {'foo': 'x:y'}) == '/x:y'
|
||||
assert a.build('x', {'foo': 'x:y'}, force_external=True) == \
|
||||
'http://example.org/x:y'
|
||||
|
||||
def test_allowed_methods_querying(self):
|
||||
m = r.Map([r.Rule('/<foo>', methods=['GET', 'HEAD']),
|
||||
r.Rule('/foo', methods=['POST'])])
|
||||
a = m.bind('example.org')
|
||||
assert sorted(a.allowed_methods('/foo')) == ['GET', 'HEAD', 'POST']
|
||||
|
||||
def test_external_building_with_port(self):
|
||||
map = r.Map([
|
||||
r.Rule('/', endpoint='index'),
|
||||
])
|
||||
adapter = map.bind('example.org:5000', '/')
|
||||
built_url = adapter.build('index', {}, force_external=True)
|
||||
assert built_url == 'http://example.org:5000/', built_url
|
||||
|
||||
def test_external_building_with_port_bind_to_environ(self):
|
||||
map = r.Map([
|
||||
r.Rule('/', endpoint='index'),
|
||||
])
|
||||
adapter = map.bind_to_environ(
|
||||
create_environ('/', 'http://example.org:5000/'),
|
||||
server_name="example.org:5000"
|
||||
)
|
||||
built_url = adapter.build('index', {}, force_external=True)
|
||||
assert built_url == 'http://example.org:5000/', built_url
|
||||
|
||||
def test_external_building_with_port_bind_to_environ_wrong_servername(self):
|
||||
map = r.Map([
|
||||
r.Rule('/', endpoint='index'),
|
||||
])
|
||||
environ = create_environ('/', 'http://example.org:5000/')
|
||||
adapter = map.bind_to_environ(environ, server_name="example.org")
|
||||
assert adapter.subdomain == '<invalid>'
|
||||
|
||||
def test_converter_parser(self):
|
||||
args, kwargs = r.parse_converter_args(u'test, a=1, b=3.0')
|
||||
|
||||
assert args == ('test',)
|
||||
assert kwargs == {'a': 1, 'b': 3.0 }
|
||||
|
||||
args, kwargs = r.parse_converter_args('')
|
||||
assert not args and not kwargs
|
||||
|
||||
args, kwargs = r.parse_converter_args('a, b, c,')
|
||||
assert args == ('a', 'b', 'c')
|
||||
assert not kwargs
|
||||
|
||||
args, kwargs = r.parse_converter_args('True, False, None')
|
||||
assert args == (True, False, None)
|
||||
|
||||
args, kwargs = r.parse_converter_args('"foo", u"bar"')
|
||||
assert args == ('foo', 'bar')
|
||||
|
||||
def test_alias_redirects(self):
|
||||
m = r.Map([
|
||||
r.Rule('/', endpoint='index'),
|
||||
r.Rule('/index.html', endpoint='index', alias=True),
|
||||
r.Rule('/users/', defaults={'page': 1}, endpoint='users'),
|
||||
r.Rule('/users/index.html', defaults={'page': 1}, alias=True,
|
||||
endpoint='users'),
|
||||
r.Rule('/users/page/<int:page>', endpoint='users'),
|
||||
r.Rule('/users/page-<int:page>.html', alias=True, endpoint='users'),
|
||||
])
|
||||
a = m.bind('example.com')
|
||||
|
||||
def ensure_redirect(path, new_url, args=None):
|
||||
try:
|
||||
a.match(path, query_args=args)
|
||||
except r.RequestRedirect as e:
|
||||
assert e.new_url == 'http://example.com' + new_url
|
||||
else:
|
||||
assert False, 'expected redirect'
|
||||
|
||||
ensure_redirect('/index.html', '/')
|
||||
ensure_redirect('/users/index.html', '/users/')
|
||||
ensure_redirect('/users/page-2.html', '/users/page/2')
|
||||
ensure_redirect('/users/page-1.html', '/users/')
|
||||
ensure_redirect('/users/page-1.html', '/users/?foo=bar', {'foo': 'bar'})
|
||||
|
||||
assert a.build('index') == '/'
|
||||
assert a.build('users', {'page': 1}) == '/users/'
|
||||
assert a.build('users', {'page': 2}) == '/users/page/2'
|
||||
|
||||
def test_double_defaults(self):
|
||||
for prefix in '', '/aaa':
|
||||
m = r.Map([
|
||||
r.Rule(prefix + '/', defaults={'foo': 1, 'bar': False}, endpoint='x'),
|
||||
r.Rule(prefix + '/<int:foo>', defaults={'bar': False}, endpoint='x'),
|
||||
r.Rule(prefix + '/bar/', defaults={'foo': 1, 'bar': True}, endpoint='x'),
|
||||
r.Rule(prefix + '/bar/<int:foo>', defaults={'bar': True}, endpoint='x')
|
||||
])
|
||||
a = m.bind('example.com')
|
||||
|
||||
assert a.match(prefix + '/') == ('x', {'foo': 1, 'bar': False})
|
||||
assert a.match(prefix + '/2') == ('x', {'foo': 2, 'bar': False})
|
||||
assert a.match(prefix + '/bar/') == ('x', {'foo': 1, 'bar': True})
|
||||
assert a.match(prefix + '/bar/2') == ('x', {'foo': 2, 'bar': True})
|
||||
|
||||
assert a.build('x', {'foo': 1, 'bar': False}) == prefix + '/'
|
||||
assert a.build('x', {'foo': 2, 'bar': False}) == prefix + '/2'
|
||||
assert a.build('x', {'bar': False}) == prefix + '/'
|
||||
assert a.build('x', {'foo': 1, 'bar': True}) == prefix + '/bar/'
|
||||
assert a.build('x', {'foo': 2, 'bar': True}) == prefix + '/bar/2'
|
||||
assert a.build('x', {'bar': True}) == prefix + '/bar/'
|
||||
|
||||
def test_host_matching(self):
|
||||
m = r.Map([
|
||||
r.Rule('/', endpoint='index', host='www.<domain>'),
|
||||
r.Rule('/', endpoint='files', host='files.<domain>'),
|
||||
r.Rule('/foo/', defaults={'page': 1}, host='www.<domain>', endpoint='x'),
|
||||
r.Rule('/<int:page>', host='files.<domain>', endpoint='x')
|
||||
], host_matching=True)
|
||||
|
||||
a = m.bind('www.example.com')
|
||||
assert a.match('/') == ('index', {'domain': 'example.com'})
|
||||
assert a.match('/foo/') == ('x', {'domain': 'example.com', 'page': 1})
|
||||
try:
|
||||
a.match('/foo')
|
||||
except r.RequestRedirect as e:
|
||||
assert e.new_url == 'http://www.example.com/foo/'
|
||||
else:
|
||||
assert False, 'expected redirect'
|
||||
|
||||
a = m.bind('files.example.com')
|
||||
assert a.match('/') == ('files', {'domain': 'example.com'})
|
||||
assert a.match('/2') == ('x', {'domain': 'example.com', 'page': 2})
|
||||
try:
|
||||
a.match('/1')
|
||||
except r.RequestRedirect as e:
|
||||
assert e.new_url == 'http://www.example.com/foo/'
|
||||
else:
|
||||
assert False, 'expected redirect'
|
||||
|
||||
def test_server_name_casing(self):
|
||||
m = r.Map([
|
||||
r.Rule('/', endpoint='index', subdomain='foo')
|
||||
])
|
||||
|
||||
env = create_environ()
|
||||
env['SERVER_NAME'] = env['HTTP_HOST'] = 'FOO.EXAMPLE.COM'
|
||||
a = m.bind_to_environ(env, server_name='example.com')
|
||||
assert a.match('/') == ('index', {})
|
||||
|
||||
env = create_environ()
|
||||
env['SERVER_NAME'] = '127.0.0.1'
|
||||
env['SERVER_PORT'] = '5000'
|
||||
del env['HTTP_HOST']
|
||||
a = m.bind_to_environ(env, server_name='example.com')
|
||||
try:
|
||||
a.match()
|
||||
except r.NotFound:
|
||||
pass
|
||||
else:
|
||||
assert False, 'Expected not found exception'
|
||||
|
||||
def test_redirect_request_exception_code(self):
|
||||
exc = r.RequestRedirect('http://www.google.com/')
|
||||
exc.code = 307
|
||||
env = create_environ()
|
||||
self.assert_strict_equal(exc.get_response(env).status_code, exc.code)
|
||||
|
||||
def test_unicode_rules(self):
|
||||
m = r.Map([
|
||||
r.Rule(u'/войти/', endpoint='enter'),
|
||||
r.Rule(u'/foo+bar/', endpoint='foobar')
|
||||
])
|
||||
a = m.bind(u'☃.example.com')
|
||||
try:
|
||||
a.match(u'/войти')
|
||||
except r.RequestRedirect as e:
|
||||
self.assert_strict_equal(e.new_url, 'http://xn--n3h.example.com/'
|
||||
'%D0%B2%D0%BE%D0%B9%D1%82%D0%B8/')
|
||||
endpoint, values = a.match(u'/войти/')
|
||||
self.assert_strict_equal(endpoint, 'enter')
|
||||
self.assert_strict_equal(values, {})
|
||||
|
||||
try:
|
||||
a.match(u'/foo+bar')
|
||||
except r.RequestRedirect as e:
|
||||
self.assert_strict_equal(e.new_url, 'http://xn--n3h.example.com/'
|
||||
'foo+bar/')
|
||||
endpoint, values = a.match(u'/foo+bar/')
|
||||
self.assert_strict_equal(endpoint, 'foobar')
|
||||
self.assert_strict_equal(values, {})
|
||||
|
||||
url = a.build('enter', {}, force_external=True)
|
||||
self.assert_strict_equal(url, 'http://xn--n3h.example.com/%D0%B2%D0%BE%D0%B9%D1%82%D0%B8/')
|
||||
|
||||
url = a.build('foobar', {}, force_external=True)
|
||||
self.assert_strict_equal(url, 'http://xn--n3h.example.com/foo+bar/')
|
||||
|
||||
def test_map_repr(self):
|
||||
m = r.Map([
|
||||
r.Rule(u'/wat', endpoint='enter'),
|
||||
r.Rule(u'/woop', endpoint='foobar')
|
||||
])
|
||||
rv = repr(m)
|
||||
self.assert_strict_equal(rv,
|
||||
"Map([<Rule '/woop' -> foobar>, <Rule '/wat' -> enter>])")
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(RoutingTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.security
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tests the security helpers.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
|
||||
from werkzeug.security import check_password_hash, generate_password_hash, \
|
||||
safe_join, pbkdf2_hex
|
||||
|
||||
|
||||
class SecurityTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_password_hashing(self):
|
||||
hash0 = generate_password_hash('default')
|
||||
assert check_password_hash(hash0, 'default')
|
||||
assert hash0.startswith('pbkdf2:sha1:1000$')
|
||||
|
||||
hash1 = generate_password_hash('default', 'sha1')
|
||||
hash2 = generate_password_hash(u'default', method='sha1')
|
||||
assert hash1 != hash2
|
||||
assert check_password_hash(hash1, 'default')
|
||||
assert check_password_hash(hash2, 'default')
|
||||
assert hash1.startswith('sha1$')
|
||||
assert hash2.startswith('sha1$')
|
||||
|
||||
fakehash = generate_password_hash('default', method='plain')
|
||||
assert fakehash == 'plain$$default'
|
||||
assert check_password_hash(fakehash, 'default')
|
||||
|
||||
mhash = generate_password_hash(u'default', method='md5')
|
||||
assert mhash.startswith('md5$')
|
||||
assert check_password_hash(mhash, 'default')
|
||||
|
||||
legacy = 'md5$$c21f969b5f03d33d43e04f8f136e7682'
|
||||
assert check_password_hash(legacy, 'default')
|
||||
|
||||
legacy = u'md5$$c21f969b5f03d33d43e04f8f136e7682'
|
||||
assert check_password_hash(legacy, 'default')
|
||||
|
||||
def test_safe_join(self):
|
||||
assert safe_join('foo', 'bar/baz') == os.path.join('foo', 'bar/baz')
|
||||
assert safe_join('foo', '../bar/baz') is None
|
||||
if os.name == 'nt':
|
||||
assert safe_join('foo', 'foo\\bar') is None
|
||||
|
||||
def test_pbkdf2(self):
|
||||
def check(data, salt, iterations, keylen, expected):
|
||||
rv = pbkdf2_hex(data, salt, iterations, keylen)
|
||||
self.assert_equal(rv, expected)
|
||||
|
||||
# From RFC 6070
|
||||
check('password', 'salt', 1, None,
|
||||
'0c60c80f961f0e71f3a9b524af6012062fe037a6')
|
||||
check('password', 'salt', 1, 20,
|
||||
'0c60c80f961f0e71f3a9b524af6012062fe037a6')
|
||||
check('password', 'salt', 2, 20,
|
||||
'ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957')
|
||||
check('password', 'salt', 4096, 20,
|
||||
'4b007901b765489abead49d926f721d065a429c1')
|
||||
check('passwordPASSWORDpassword', 'saltSALTsaltSALTsaltSALTsaltSALTsalt',
|
||||
4096, 25, '3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038')
|
||||
check('pass\x00word', 'sa\x00lt', 4096, 16,
|
||||
'56fa6aa75548099dcc37d7f03425e0c3')
|
||||
# This one is from the RFC but it just takes for ages
|
||||
##check('password', 'salt', 16777216, 20,
|
||||
## 'eefe3d61cd4da4e4e9945b3d6ba2158c2634e984')
|
||||
|
||||
# From Crypt-PBKDF2
|
||||
check('password', 'ATHENA.MIT.EDUraeburn', 1, 16,
|
||||
'cdedb5281bb2f801565a1122b2563515')
|
||||
check('password', 'ATHENA.MIT.EDUraeburn', 1, 32,
|
||||
'cdedb5281bb2f801565a1122b25635150ad1f7a04bb9f3a333ecc0e2e1f70837')
|
||||
check('password', 'ATHENA.MIT.EDUraeburn', 2, 16,
|
||||
'01dbee7f4a9e243e988b62c73cda935d')
|
||||
check('password', 'ATHENA.MIT.EDUraeburn', 2, 32,
|
||||
'01dbee7f4a9e243e988b62c73cda935da05378b93244ec8f48a99e61ad799d86')
|
||||
check('password', 'ATHENA.MIT.EDUraeburn', 1200, 32,
|
||||
'5c08eb61fdf71e4e4ec3cf6ba1f5512ba7e52ddbc5e5142f708a31e2e62b1e13')
|
||||
check('X' * 64, 'pass phrase equals block size', 1200, 32,
|
||||
'139c30c0966bc32ba55fdbf212530ac9c5ec59f1a452f5cc9ad940fea0598ed1')
|
||||
check('X' * 65, 'pass phrase exceeds block size', 1200, 32,
|
||||
'9ccad6d468770cd51b10e6a68721be611a8b4d282601db3b36be9246915ec82a')
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(SecurityTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,117 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.serving
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Added serving tests.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import sys
|
||||
import time
|
||||
try:
|
||||
import httplib
|
||||
except ImportError:
|
||||
from http import client as httplib
|
||||
try:
|
||||
from urllib2 import urlopen, HTTPError
|
||||
except ImportError: # pragma: no cover
|
||||
from urllib.request import urlopen
|
||||
from urllib.error import HTTPError
|
||||
|
||||
import unittest
|
||||
from functools import update_wrapper
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
|
||||
from werkzeug import __version__ as version, serving
|
||||
from werkzeug.testapp import test_app
|
||||
from werkzeug._compat import StringIO
|
||||
from threading import Thread
|
||||
|
||||
|
||||
|
||||
real_make_server = serving.make_server
|
||||
|
||||
|
||||
def silencestderr(f):
|
||||
def new_func(*args, **kwargs):
|
||||
old_stderr = sys.stderr
|
||||
sys.stderr = StringIO()
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
finally:
|
||||
sys.stderr = old_stderr
|
||||
return update_wrapper(new_func, f)
|
||||
|
||||
|
||||
def run_dev_server(application):
|
||||
servers = []
|
||||
|
||||
def tracking_make_server(*args, **kwargs):
|
||||
srv = real_make_server(*args, **kwargs)
|
||||
servers.append(srv)
|
||||
return srv
|
||||
serving.make_server = tracking_make_server
|
||||
try:
|
||||
t = Thread(target=serving.run_simple,
|
||||
args=('localhost', 0, application))
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
time.sleep(0.25)
|
||||
finally:
|
||||
serving.make_server = real_make_server
|
||||
if not servers:
|
||||
return None, None
|
||||
server, = servers
|
||||
ip, port = server.socket.getsockname()[:2]
|
||||
if ':' in ip:
|
||||
ip = '[%s]' % ip
|
||||
return server, '%s:%d' % (ip, port)
|
||||
|
||||
|
||||
class ServingTestCase(WerkzeugTestCase):
|
||||
|
||||
@silencestderr
|
||||
def test_serving(self):
|
||||
server, addr = run_dev_server(test_app)
|
||||
rv = urlopen('http://%s/?foo=bar&baz=blah' % addr).read()
|
||||
self.assert_in(b'WSGI Information', rv)
|
||||
self.assert_in(b'foo=bar&baz=blah', rv)
|
||||
self.assert_in(b'Werkzeug/' + version.encode('ascii'), rv)
|
||||
|
||||
@silencestderr
|
||||
def test_broken_app(self):
|
||||
def broken_app(environ, start_response):
|
||||
1 // 0
|
||||
server, addr = run_dev_server(broken_app)
|
||||
try:
|
||||
urlopen('http://%s/?foo=bar&baz=blah' % addr).read()
|
||||
except HTTPError as e:
|
||||
# In Python3 a 500 response causes an exception
|
||||
rv = e.read()
|
||||
assert b'Internal Server Error' in rv
|
||||
else:
|
||||
assert False, 'expected internal server error'
|
||||
|
||||
@silencestderr
|
||||
def test_absolute_requests(self):
|
||||
def asserting_app(environ, start_response):
|
||||
assert environ['HTTP_HOST'] == 'surelynotexisting.example.com:1337'
|
||||
assert environ['PATH_INFO'] == '/index.htm'
|
||||
assert environ['SERVER_PORT'] == addr.split(':')[1]
|
||||
start_response('200 OK', [('Content-Type', 'text/html')])
|
||||
return [b'YES']
|
||||
|
||||
server, addr = run_dev_server(asserting_app)
|
||||
conn = httplib.HTTPConnection(addr)
|
||||
conn.request('GET', 'http://surelynotexisting.example.com:1337/index.htm')
|
||||
res = conn.getresponse()
|
||||
assert res.read() == b'YES'
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(ServingTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,410 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.test
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tests the testing tools.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
from io import BytesIO
|
||||
from werkzeug._compat import iteritems, to_bytes
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
|
||||
from werkzeug.wrappers import Request, Response, BaseResponse
|
||||
from werkzeug.test import Client, EnvironBuilder, create_environ, \
|
||||
ClientRedirectError, stream_encode_multipart, run_wsgi_app
|
||||
from werkzeug.utils import redirect
|
||||
from werkzeug.formparser import parse_form_data
|
||||
from werkzeug.datastructures import MultiDict
|
||||
|
||||
|
||||
def cookie_app(environ, start_response):
|
||||
"""A WSGI application which sets a cookie, and returns as a ersponse any
|
||||
cookie which exists.
|
||||
"""
|
||||
response = Response(environ.get('HTTP_COOKIE', 'No Cookie'),
|
||||
mimetype='text/plain')
|
||||
response.set_cookie('test', 'test')
|
||||
return response(environ, start_response)
|
||||
|
||||
|
||||
def redirect_loop_app(environ, start_response):
|
||||
response = redirect('http://localhost/some/redirect/')
|
||||
return response(environ, start_response)
|
||||
|
||||
|
||||
def redirect_with_get_app(environ, start_response):
|
||||
req = Request(environ)
|
||||
if req.url not in ('http://localhost/',
|
||||
'http://localhost/first/request',
|
||||
'http://localhost/some/redirect/'):
|
||||
assert False, 'redirect_demo_app() did not expect URL "%s"' % req.url
|
||||
if '/some/redirect' not in req.url:
|
||||
response = redirect('http://localhost/some/redirect/')
|
||||
else:
|
||||
response = Response('current url: %s' % req.url)
|
||||
return response(environ, start_response)
|
||||
|
||||
|
||||
def redirect_with_post_app(environ, start_response):
|
||||
req = Request(environ)
|
||||
if req.url == 'http://localhost/some/redirect/':
|
||||
assert req.method == 'GET', 'request should be GET'
|
||||
assert not req.form, 'request should not have data'
|
||||
response = Response('current url: %s' % req.url)
|
||||
else:
|
||||
response = redirect('http://localhost/some/redirect/')
|
||||
return response(environ, start_response)
|
||||
|
||||
|
||||
def external_redirect_demo_app(environ, start_response):
|
||||
response = redirect('http://example.com/')
|
||||
return response(environ, start_response)
|
||||
|
||||
|
||||
def external_subdomain_redirect_demo_app(environ, start_response):
|
||||
if 'test.example.com' in environ['HTTP_HOST']:
|
||||
response = Response('redirected successfully to subdomain')
|
||||
else:
|
||||
response = redirect('http://test.example.com/login')
|
||||
return response(environ, start_response)
|
||||
|
||||
|
||||
def multi_value_post_app(environ, start_response):
|
||||
req = Request(environ)
|
||||
assert req.form['field'] == 'val1', req.form['field']
|
||||
assert req.form.getlist('field') == ['val1', 'val2'], req.form.getlist('field')
|
||||
response = Response('ok')
|
||||
return response(environ, start_response)
|
||||
|
||||
|
||||
class TestTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_cookie_forging(self):
|
||||
c = Client(cookie_app)
|
||||
c.set_cookie('localhost', 'foo', 'bar')
|
||||
appiter, code, headers = c.open()
|
||||
self.assert_strict_equal(list(appiter), [b'foo=bar'])
|
||||
|
||||
def test_set_cookie_app(self):
|
||||
c = Client(cookie_app)
|
||||
appiter, code, headers = c.open()
|
||||
self.assert_in('Set-Cookie', dict(headers))
|
||||
|
||||
def test_cookiejar_stores_cookie(self):
|
||||
c = Client(cookie_app)
|
||||
appiter, code, headers = c.open()
|
||||
self.assert_in('test', c.cookie_jar._cookies['localhost.local']['/'])
|
||||
|
||||
def test_no_initial_cookie(self):
|
||||
c = Client(cookie_app)
|
||||
appiter, code, headers = c.open()
|
||||
self.assert_strict_equal(b''.join(appiter), b'No Cookie')
|
||||
|
||||
def test_resent_cookie(self):
|
||||
c = Client(cookie_app)
|
||||
c.open()
|
||||
appiter, code, headers = c.open()
|
||||
self.assert_strict_equal(b''.join(appiter), b'test=test')
|
||||
|
||||
def test_disable_cookies(self):
|
||||
c = Client(cookie_app, use_cookies=False)
|
||||
c.open()
|
||||
appiter, code, headers = c.open()
|
||||
self.assert_strict_equal(b''.join(appiter), b'No Cookie')
|
||||
|
||||
def test_cookie_for_different_path(self):
|
||||
c = Client(cookie_app)
|
||||
c.open('/path1')
|
||||
appiter, code, headers = c.open('/path2')
|
||||
self.assert_strict_equal(b''.join(appiter), b'test=test')
|
||||
|
||||
def test_environ_builder_basics(self):
|
||||
b = EnvironBuilder()
|
||||
self.assert_is_none(b.content_type)
|
||||
b.method = 'POST'
|
||||
self.assert_equal(b.content_type, 'application/x-www-form-urlencoded')
|
||||
b.files.add_file('test', BytesIO(b'test contents'), 'test.txt')
|
||||
self.assert_equal(b.files['test'].content_type, 'text/plain')
|
||||
self.assert_equal(b.content_type, 'multipart/form-data')
|
||||
b.form['test'] = 'normal value'
|
||||
|
||||
req = b.get_request()
|
||||
b.close()
|
||||
|
||||
self.assert_strict_equal(req.url, u'http://localhost/')
|
||||
self.assert_strict_equal(req.method, 'POST')
|
||||
self.assert_strict_equal(req.form['test'], u'normal value')
|
||||
self.assert_equal(req.files['test'].content_type, 'text/plain')
|
||||
self.assert_strict_equal(req.files['test'].filename, u'test.txt')
|
||||
self.assert_strict_equal(req.files['test'].read(), b'test contents')
|
||||
|
||||
def test_environ_builder_headers(self):
|
||||
b = EnvironBuilder(environ_base={'HTTP_USER_AGENT': 'Foo/0.1'},
|
||||
environ_overrides={'wsgi.version': (1, 1)})
|
||||
b.headers['X-Suck-My-Dick'] = 'very well sir'
|
||||
env = b.get_environ()
|
||||
self.assert_strict_equal(env['HTTP_USER_AGENT'], 'Foo/0.1')
|
||||
self.assert_strict_equal(env['HTTP_X_SUCK_MY_DICK'], 'very well sir')
|
||||
self.assert_strict_equal(env['wsgi.version'], (1, 1))
|
||||
|
||||
b.headers['User-Agent'] = 'Bar/1.0'
|
||||
env = b.get_environ()
|
||||
self.assert_strict_equal(env['HTTP_USER_AGENT'], 'Bar/1.0')
|
||||
|
||||
def test_environ_builder_headers_content_type(self):
|
||||
b = EnvironBuilder(headers={'Content-Type': 'text/plain'})
|
||||
env = b.get_environ()
|
||||
self.assert_equal(env['CONTENT_TYPE'], 'text/plain')
|
||||
b = EnvironBuilder(content_type='text/html',
|
||||
headers={'Content-Type': 'text/plain'})
|
||||
env = b.get_environ()
|
||||
self.assert_equal(env['CONTENT_TYPE'], 'text/html')
|
||||
|
||||
def test_environ_builder_paths(self):
|
||||
b = EnvironBuilder(path='/foo', base_url='http://example.com/')
|
||||
self.assert_strict_equal(b.base_url, 'http://example.com/')
|
||||
self.assert_strict_equal(b.path, '/foo')
|
||||
self.assert_strict_equal(b.script_root, '')
|
||||
self.assert_strict_equal(b.host, 'example.com')
|
||||
|
||||
b = EnvironBuilder(path='/foo', base_url='http://example.com/bar')
|
||||
self.assert_strict_equal(b.base_url, 'http://example.com/bar/')
|
||||
self.assert_strict_equal(b.path, '/foo')
|
||||
self.assert_strict_equal(b.script_root, '/bar')
|
||||
self.assert_strict_equal(b.host, 'example.com')
|
||||
|
||||
b.host = 'localhost'
|
||||
self.assert_strict_equal(b.base_url, 'http://localhost/bar/')
|
||||
b.base_url = 'http://localhost:8080/'
|
||||
self.assert_strict_equal(b.host, 'localhost:8080')
|
||||
self.assert_strict_equal(b.server_name, 'localhost')
|
||||
self.assert_strict_equal(b.server_port, 8080)
|
||||
|
||||
b.host = 'foo.invalid'
|
||||
b.url_scheme = 'https'
|
||||
b.script_root = '/test'
|
||||
env = b.get_environ()
|
||||
self.assert_strict_equal(env['SERVER_NAME'], 'foo.invalid')
|
||||
self.assert_strict_equal(env['SERVER_PORT'], '443')
|
||||
self.assert_strict_equal(env['SCRIPT_NAME'], '/test')
|
||||
self.assert_strict_equal(env['PATH_INFO'], '/foo')
|
||||
self.assert_strict_equal(env['HTTP_HOST'], 'foo.invalid')
|
||||
self.assert_strict_equal(env['wsgi.url_scheme'], 'https')
|
||||
self.assert_strict_equal(b.base_url, 'https://foo.invalid/test/')
|
||||
|
||||
def test_environ_builder_content_type(self):
|
||||
builder = EnvironBuilder()
|
||||
self.assert_is_none(builder.content_type)
|
||||
builder.method = 'POST'
|
||||
self.assert_equal(builder.content_type, 'application/x-www-form-urlencoded')
|
||||
builder.form['foo'] = 'bar'
|
||||
self.assert_equal(builder.content_type, 'application/x-www-form-urlencoded')
|
||||
builder.files.add_file('blafasel', BytesIO(b'foo'), 'test.txt')
|
||||
self.assert_equal(builder.content_type, 'multipart/form-data')
|
||||
req = builder.get_request()
|
||||
self.assert_strict_equal(req.form['foo'], u'bar')
|
||||
self.assert_strict_equal(req.files['blafasel'].read(), b'foo')
|
||||
|
||||
def test_environ_builder_stream_switch(self):
|
||||
d = MultiDict(dict(foo=u'bar', blub=u'blah', hu=u'hum'))
|
||||
for use_tempfile in False, True:
|
||||
stream, length, boundary = stream_encode_multipart(
|
||||
d, use_tempfile, threshold=150)
|
||||
self.assert_true(isinstance(stream, BytesIO) != use_tempfile)
|
||||
|
||||
form = parse_form_data({'wsgi.input': stream, 'CONTENT_LENGTH': str(length),
|
||||
'CONTENT_TYPE': 'multipart/form-data; boundary="%s"' %
|
||||
boundary})[1]
|
||||
self.assert_strict_equal(form, d)
|
||||
stream.close()
|
||||
|
||||
def test_create_environ(self):
|
||||
env = create_environ('/foo?bar=baz', 'http://example.org/')
|
||||
expected = {
|
||||
'wsgi.multiprocess': False,
|
||||
'wsgi.version': (1, 0),
|
||||
'wsgi.run_once': False,
|
||||
'wsgi.errors': sys.stderr,
|
||||
'wsgi.multithread': False,
|
||||
'wsgi.url_scheme': 'http',
|
||||
'SCRIPT_NAME': '',
|
||||
'CONTENT_TYPE': '',
|
||||
'CONTENT_LENGTH': '0',
|
||||
'SERVER_NAME': 'example.org',
|
||||
'REQUEST_METHOD': 'GET',
|
||||
'HTTP_HOST': 'example.org',
|
||||
'PATH_INFO': '/foo',
|
||||
'SERVER_PORT': '80',
|
||||
'SERVER_PROTOCOL': 'HTTP/1.1',
|
||||
'QUERY_STRING': 'bar=baz'
|
||||
}
|
||||
for key, value in iteritems(expected):
|
||||
self.assert_equal(env[key], value)
|
||||
self.assert_strict_equal(env['wsgi.input'].read(0), b'')
|
||||
self.assert_strict_equal(create_environ('/foo', 'http://example.com/')['SCRIPT_NAME'], '')
|
||||
|
||||
def test_file_closing(self):
|
||||
closed = []
|
||||
class SpecialInput(object):
|
||||
def read(self):
|
||||
return ''
|
||||
def close(self):
|
||||
closed.append(self)
|
||||
|
||||
env = create_environ(data={'foo': SpecialInput()})
|
||||
self.assert_strict_equal(len(closed), 1)
|
||||
builder = EnvironBuilder()
|
||||
builder.files.add_file('blah', SpecialInput())
|
||||
builder.close()
|
||||
self.assert_strict_equal(len(closed), 2)
|
||||
|
||||
def test_follow_redirect(self):
|
||||
env = create_environ('/', base_url='http://localhost')
|
||||
c = Client(redirect_with_get_app)
|
||||
appiter, code, headers = c.open(environ_overrides=env, follow_redirects=True)
|
||||
self.assert_strict_equal(code, '200 OK')
|
||||
self.assert_strict_equal(b''.join(appiter), b'current url: http://localhost/some/redirect/')
|
||||
|
||||
# Test that the :cls:`Client` is aware of user defined response wrappers
|
||||
c = Client(redirect_with_get_app, response_wrapper=BaseResponse)
|
||||
resp = c.get('/', follow_redirects=True)
|
||||
self.assert_strict_equal(resp.status_code, 200)
|
||||
self.assert_strict_equal(resp.data, b'current url: http://localhost/some/redirect/')
|
||||
|
||||
# test with URL other than '/' to make sure redirected URL's are correct
|
||||
c = Client(redirect_with_get_app, response_wrapper=BaseResponse)
|
||||
resp = c.get('/first/request', follow_redirects=True)
|
||||
self.assert_strict_equal(resp.status_code, 200)
|
||||
self.assert_strict_equal(resp.data, b'current url: http://localhost/some/redirect/')
|
||||
|
||||
def test_follow_external_redirect(self):
|
||||
env = create_environ('/', base_url='http://localhost')
|
||||
c = Client(external_redirect_demo_app)
|
||||
self.assert_raises(RuntimeError, lambda:
|
||||
c.get(environ_overrides=env, follow_redirects=True))
|
||||
|
||||
def test_follow_external_redirect_on_same_subdomain(self):
|
||||
env = create_environ('/', base_url='http://example.com')
|
||||
c = Client(external_subdomain_redirect_demo_app, allow_subdomain_redirects=True)
|
||||
c.get(environ_overrides=env, follow_redirects=True)
|
||||
|
||||
# check that this does not work for real external domains
|
||||
env = create_environ('/', base_url='http://localhost')
|
||||
self.assert_raises(RuntimeError, lambda:
|
||||
c.get(environ_overrides=env, follow_redirects=True))
|
||||
|
||||
# check that subdomain redirects fail if no `allow_subdomain_redirects` is applied
|
||||
c = Client(external_subdomain_redirect_demo_app)
|
||||
self.assert_raises(RuntimeError, lambda:
|
||||
c.get(environ_overrides=env, follow_redirects=True))
|
||||
|
||||
def test_follow_redirect_loop(self):
|
||||
c = Client(redirect_loop_app, response_wrapper=BaseResponse)
|
||||
with self.assert_raises(ClientRedirectError):
|
||||
resp = c.get('/', follow_redirects=True)
|
||||
|
||||
def test_follow_redirect_with_post(self):
|
||||
c = Client(redirect_with_post_app, response_wrapper=BaseResponse)
|
||||
resp = c.post('/', follow_redirects=True, data='foo=blub+hehe&blah=42')
|
||||
self.assert_strict_equal(resp.status_code, 200)
|
||||
self.assert_strict_equal(resp.data, b'current url: http://localhost/some/redirect/')
|
||||
|
||||
def test_path_info_script_name_unquoting(self):
|
||||
def test_app(environ, start_response):
|
||||
start_response('200 OK', [('Content-Type', 'text/plain')])
|
||||
return [environ['PATH_INFO'] + '\n' + environ['SCRIPT_NAME']]
|
||||
c = Client(test_app, response_wrapper=BaseResponse)
|
||||
resp = c.get('/foo%40bar')
|
||||
self.assert_strict_equal(resp.data, b'/foo@bar\n')
|
||||
c = Client(test_app, response_wrapper=BaseResponse)
|
||||
resp = c.get('/foo%40bar', 'http://localhost/bar%40baz')
|
||||
self.assert_strict_equal(resp.data, b'/foo@bar\n/bar@baz')
|
||||
|
||||
def test_multi_value_submit(self):
|
||||
c = Client(multi_value_post_app, response_wrapper=BaseResponse)
|
||||
data = {
|
||||
'field': ['val1','val2']
|
||||
}
|
||||
resp = c.post('/', data=data)
|
||||
self.assert_strict_equal(resp.status_code, 200)
|
||||
c = Client(multi_value_post_app, response_wrapper=BaseResponse)
|
||||
data = MultiDict({
|
||||
'field': ['val1', 'val2']
|
||||
})
|
||||
resp = c.post('/', data=data)
|
||||
self.assert_strict_equal(resp.status_code, 200)
|
||||
|
||||
def test_iri_support(self):
|
||||
b = EnvironBuilder(u'/föö-bar', base_url=u'http://☃.net/')
|
||||
self.assert_strict_equal(b.path, '/f%C3%B6%C3%B6-bar')
|
||||
self.assert_strict_equal(b.base_url, 'http://xn--n3h.net/')
|
||||
|
||||
def test_run_wsgi_apps(self):
|
||||
def simple_app(environ, start_response):
|
||||
start_response('200 OK', [('Content-Type', 'text/html')])
|
||||
return ['Hello World!']
|
||||
app_iter, status, headers = run_wsgi_app(simple_app, {})
|
||||
self.assert_strict_equal(status, '200 OK')
|
||||
self.assert_strict_equal(list(headers), [('Content-Type', 'text/html')])
|
||||
self.assert_strict_equal(app_iter, ['Hello World!'])
|
||||
|
||||
def yielding_app(environ, start_response):
|
||||
start_response('200 OK', [('Content-Type', 'text/html')])
|
||||
yield 'Hello '
|
||||
yield 'World!'
|
||||
app_iter, status, headers = run_wsgi_app(yielding_app, {})
|
||||
self.assert_strict_equal(status, '200 OK')
|
||||
self.assert_strict_equal(list(headers), [('Content-Type', 'text/html')])
|
||||
self.assert_strict_equal(list(app_iter), ['Hello ', 'World!'])
|
||||
|
||||
def test_multiple_cookies(self):
|
||||
@Request.application
|
||||
def test_app(request):
|
||||
response = Response(repr(sorted(request.cookies.items())))
|
||||
response.set_cookie(u'test1', b'foo')
|
||||
response.set_cookie(u'test2', b'bar')
|
||||
return response
|
||||
client = Client(test_app, Response)
|
||||
resp = client.get('/')
|
||||
self.assert_strict_equal(resp.data, b'[]')
|
||||
resp = client.get('/')
|
||||
self.assert_strict_equal(resp.data,
|
||||
to_bytes(repr([('test1', u'foo'), ('test2', u'bar')]), 'ascii'))
|
||||
|
||||
def test_correct_open_invocation_on_redirect(self):
|
||||
class MyClient(Client):
|
||||
counter = 0
|
||||
def open(self, *args, **kwargs):
|
||||
self.counter += 1
|
||||
env = kwargs.setdefault('environ_overrides', {})
|
||||
env['werkzeug._foo'] = self.counter
|
||||
return Client.open(self, *args, **kwargs)
|
||||
|
||||
@Request.application
|
||||
def test_app(request):
|
||||
return Response(str(request.environ['werkzeug._foo']))
|
||||
|
||||
c = MyClient(test_app, response_wrapper=Response)
|
||||
self.assert_strict_equal(c.get('/').data, b'1')
|
||||
self.assert_strict_equal(c.get('/').data, b'2')
|
||||
self.assert_strict_equal(c.get('/').data, b'3')
|
||||
|
||||
def test_correct_encoding(self):
|
||||
req = Request.from_values(u'/\N{SNOWMAN}', u'http://example.com/foo')
|
||||
self.assert_strict_equal(req.script_root, u'/foo')
|
||||
self.assert_strict_equal(req.path, u'/\N{SNOWMAN}')
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(TestTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,308 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.urls
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
URL helper tests.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
|
||||
from werkzeug.datastructures import OrderedMultiDict
|
||||
from werkzeug import urls
|
||||
from werkzeug._compat import text_type, NativeStringIO, BytesIO
|
||||
|
||||
|
||||
class URLsTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_replace(self):
|
||||
url = urls.url_parse('http://de.wikipedia.org/wiki/Troll')
|
||||
self.assert_strict_equal(url.replace(query='foo=bar'),
|
||||
urls.url_parse('http://de.wikipedia.org/wiki/Troll?foo=bar'))
|
||||
self.assert_strict_equal(url.replace(scheme='https'),
|
||||
urls.url_parse('https://de.wikipedia.org/wiki/Troll'))
|
||||
|
||||
def test_quoting(self):
|
||||
self.assert_strict_equal(urls.url_quote(u'\xf6\xe4\xfc'), '%C3%B6%C3%A4%C3%BC')
|
||||
self.assert_strict_equal(urls.url_unquote(urls.url_quote(u'#%="\xf6')), u'#%="\xf6')
|
||||
self.assert_strict_equal(urls.url_quote_plus('foo bar'), 'foo+bar')
|
||||
self.assert_strict_equal(urls.url_unquote_plus('foo+bar'), u'foo bar')
|
||||
self.assert_strict_equal(urls.url_quote_plus('foo+bar'), 'foo%2Bbar')
|
||||
self.assert_strict_equal(urls.url_unquote_plus('foo%2Bbar'), u'foo+bar')
|
||||
self.assert_strict_equal(urls.url_encode({b'a': None, b'b': b'foo bar'}), 'b=foo+bar')
|
||||
self.assert_strict_equal(urls.url_encode({u'a': None, u'b': u'foo bar'}), 'b=foo+bar')
|
||||
self.assert_strict_equal(urls.url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)'),
|
||||
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)')
|
||||
self.assert_strict_equal(urls.url_quote_plus(42), '42')
|
||||
self.assert_strict_equal(urls.url_quote(b'\xff'), '%FF')
|
||||
|
||||
def test_bytes_unquoting(self):
|
||||
self.assert_strict_equal(urls.url_unquote(urls.url_quote(
|
||||
u'#%="\xf6', charset='latin1'), charset=None), b'#%="\xf6')
|
||||
|
||||
def test_url_decoding(self):
|
||||
x = urls.url_decode(b'foo=42&bar=23&uni=H%C3%A4nsel')
|
||||
self.assert_strict_equal(x['foo'], u'42')
|
||||
self.assert_strict_equal(x['bar'], u'23')
|
||||
self.assert_strict_equal(x['uni'], u'Hänsel')
|
||||
|
||||
x = urls.url_decode(b'foo=42;bar=23;uni=H%C3%A4nsel', separator=b';')
|
||||
self.assert_strict_equal(x['foo'], u'42')
|
||||
self.assert_strict_equal(x['bar'], u'23')
|
||||
self.assert_strict_equal(x['uni'], u'Hänsel')
|
||||
|
||||
x = urls.url_decode(b'%C3%9Ch=H%C3%A4nsel', decode_keys=True)
|
||||
self.assert_strict_equal(x[u'Üh'], u'Hänsel')
|
||||
|
||||
def test_url_bytes_decoding(self):
|
||||
x = urls.url_decode(b'foo=42&bar=23&uni=H%C3%A4nsel', charset=None)
|
||||
self.assert_strict_equal(x[b'foo'], b'42')
|
||||
self.assert_strict_equal(x[b'bar'], b'23')
|
||||
self.assert_strict_equal(x[b'uni'], u'Hänsel'.encode('utf-8'))
|
||||
|
||||
def test_streamed_url_decoding(self):
|
||||
item1 = u'a' * 100000
|
||||
item2 = u'b' * 400
|
||||
string = ('a=%s&b=%s&c=%s' % (item1, item2, item2)).encode('ascii')
|
||||
gen = urls.url_decode_stream(BytesIO(string), limit=len(string),
|
||||
return_iterator=True)
|
||||
self.assert_strict_equal(next(gen), ('a', item1))
|
||||
self.assert_strict_equal(next(gen), ('b', item2))
|
||||
self.assert_strict_equal(next(gen), ('c', item2))
|
||||
self.assert_raises(StopIteration, lambda: next(gen))
|
||||
|
||||
def test_stream_decoding_string_fails(self):
|
||||
self.assert_raises(TypeError, urls.url_decode_stream, 'testing')
|
||||
|
||||
def test_url_encoding(self):
|
||||
self.assert_strict_equal(urls.url_encode({'foo': 'bar 45'}), 'foo=bar+45')
|
||||
d = {'foo': 1, 'bar': 23, 'blah': u'Hänsel'}
|
||||
self.assert_strict_equal(urls.url_encode(d, sort=True), 'bar=23&blah=H%C3%A4nsel&foo=1')
|
||||
self.assert_strict_equal(urls.url_encode(d, sort=True, separator=u';'), 'bar=23;blah=H%C3%A4nsel;foo=1')
|
||||
|
||||
def test_sorted_url_encode(self):
|
||||
self.assert_strict_equal(urls.url_encode({u"a": 42, u"b": 23, 1: 1, 2: 2},
|
||||
sort=True, key=lambda i: text_type(i[0])), '1=1&2=2&a=42&b=23')
|
||||
self.assert_strict_equal(urls.url_encode({u'A': 1, u'a': 2, u'B': 3, 'b': 4}, sort=True,
|
||||
key=lambda x: x[0].lower() + x[0]), 'A=1&a=2&B=3&b=4')
|
||||
|
||||
def test_streamed_url_encoding(self):
|
||||
out = NativeStringIO()
|
||||
urls.url_encode_stream({'foo': 'bar 45'}, out)
|
||||
self.assert_strict_equal(out.getvalue(), 'foo=bar+45')
|
||||
|
||||
d = {'foo': 1, 'bar': 23, 'blah': u'Hänsel'}
|
||||
out = NativeStringIO()
|
||||
urls.url_encode_stream(d, out, sort=True)
|
||||
self.assert_strict_equal(out.getvalue(), 'bar=23&blah=H%C3%A4nsel&foo=1')
|
||||
out = NativeStringIO()
|
||||
urls.url_encode_stream(d, out, sort=True, separator=u';')
|
||||
self.assert_strict_equal(out.getvalue(), 'bar=23;blah=H%C3%A4nsel;foo=1')
|
||||
|
||||
gen = urls.url_encode_stream(d, sort=True)
|
||||
self.assert_strict_equal(next(gen), 'bar=23')
|
||||
self.assert_strict_equal(next(gen), 'blah=H%C3%A4nsel')
|
||||
self.assert_strict_equal(next(gen), 'foo=1')
|
||||
self.assert_raises(StopIteration, lambda: next(gen))
|
||||
|
||||
def test_url_fixing(self):
|
||||
x = urls.url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
|
||||
self.assert_line_equal(x, 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)')
|
||||
|
||||
x = urls.url_fix("http://just.a.test/$-_.+!*'(),")
|
||||
self.assert_equal(x, "http://just.a.test/$-_.+!*'(),")
|
||||
|
||||
def test_url_fixing_qs(self):
|
||||
x = urls.url_fix(b'http://example.com/?foo=%2f%2f')
|
||||
self.assert_line_equal(x, 'http://example.com/?foo=%2f%2f')
|
||||
|
||||
x = urls.url_fix('http://acronyms.thefreedictionary.com/Algebraic+Methods+of+Solving+the+Schr%C3%B6dinger+Equation')
|
||||
self.assert_equal(x, 'http://acronyms.thefreedictionary.com/Algebraic+Methods+of+Solving+the+Schr%C3%B6dinger+Equation')
|
||||
|
||||
def test_iri_support(self):
|
||||
self.assert_strict_equal(urls.uri_to_iri('http://xn--n3h.net/'),
|
||||
u'http://\u2603.net/')
|
||||
self.assert_strict_equal(
|
||||
urls.uri_to_iri(b'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'),
|
||||
u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th')
|
||||
self.assert_strict_equal(urls.iri_to_uri(u'http://☃.net/'), 'http://xn--n3h.net/')
|
||||
self.assert_strict_equal(
|
||||
urls.iri_to_uri(u'http://üser:pässword@☃.net/påth'),
|
||||
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
|
||||
|
||||
self.assert_strict_equal(urls.uri_to_iri('http://test.com/%3Fmeh?foo=%26%2F'),
|
||||
u'http://test.com/%3Fmeh?foo=%26%2F')
|
||||
|
||||
# this should work as well, might break on 2.4 because of a broken
|
||||
# idna codec
|
||||
self.assert_strict_equal(urls.uri_to_iri(b'/foo'), u'/foo')
|
||||
self.assert_strict_equal(urls.iri_to_uri(u'/foo'), '/foo')
|
||||
|
||||
self.assert_strict_equal(urls.iri_to_uri(u'http://föö.com:8080/bam/baz'),
|
||||
'http://xn--f-1gaa.com:8080/bam/baz')
|
||||
|
||||
def test_iri_safe_quoting(self):
|
||||
uri = b'http://xn--f-1gaa.com/%2F%25?q=%C3%B6&x=%3D%25#%25'
|
||||
iri = u'http://föö.com/%2F%25?q=ö&x=%3D%25#%25'
|
||||
self.assert_strict_equal(urls.uri_to_iri(uri), iri)
|
||||
self.assert_strict_equal(urls.iri_to_uri(urls.uri_to_iri(uri)), uri)
|
||||
|
||||
def test_ordered_multidict_encoding(self):
|
||||
d = OrderedMultiDict()
|
||||
d.add('foo', 1)
|
||||
d.add('foo', 2)
|
||||
d.add('foo', 3)
|
||||
d.add('bar', 0)
|
||||
d.add('foo', 4)
|
||||
self.assert_equal(urls.url_encode(d), 'foo=1&foo=2&foo=3&bar=0&foo=4')
|
||||
|
||||
def test_href(self):
|
||||
x = urls.Href('http://www.example.com/')
|
||||
self.assert_strict_equal(x(u'foo'), 'http://www.example.com/foo')
|
||||
self.assert_strict_equal(x.foo(u'bar'), 'http://www.example.com/foo/bar')
|
||||
self.assert_strict_equal(x.foo(u'bar', x=42), 'http://www.example.com/foo/bar?x=42')
|
||||
self.assert_strict_equal(x.foo(u'bar', class_=42), 'http://www.example.com/foo/bar?class=42')
|
||||
self.assert_strict_equal(x.foo(u'bar', {u'class': 42}), 'http://www.example.com/foo/bar?class=42')
|
||||
self.assert_raises(AttributeError, lambda: x.__blah__)
|
||||
|
||||
x = urls.Href('blah')
|
||||
self.assert_strict_equal(x.foo(u'bar'), 'blah/foo/bar')
|
||||
|
||||
self.assert_raises(TypeError, x.foo, {u"foo": 23}, x=42)
|
||||
|
||||
x = urls.Href('')
|
||||
self.assert_strict_equal(x('foo'), 'foo')
|
||||
|
||||
def test_href_url_join(self):
|
||||
x = urls.Href(u'test')
|
||||
self.assert_line_equal(x(u'foo:bar'), u'test/foo:bar')
|
||||
self.assert_line_equal(x(u'http://example.com/'), u'test/http://example.com/')
|
||||
self.assert_line_equal(x.a(), u'test/a')
|
||||
|
||||
def test_href_past_root(self):
|
||||
base_href = urls.Href('http://www.blagga.com/1/2/3')
|
||||
self.assert_strict_equal(base_href('../foo'), 'http://www.blagga.com/1/2/foo')
|
||||
self.assert_strict_equal(base_href('../../foo'), 'http://www.blagga.com/1/foo')
|
||||
self.assert_strict_equal(base_href('../../../foo'), 'http://www.blagga.com/foo')
|
||||
self.assert_strict_equal(base_href('../../../../foo'), 'http://www.blagga.com/foo')
|
||||
self.assert_strict_equal(base_href('../../../../../foo'), 'http://www.blagga.com/foo')
|
||||
self.assert_strict_equal(base_href('../../../../../../foo'), 'http://www.blagga.com/foo')
|
||||
|
||||
def test_url_unquote_plus_unicode(self):
|
||||
# was broken in 0.6
|
||||
self.assert_strict_equal(urls.url_unquote_plus(u'\x6d'), u'\x6d')
|
||||
self.assert_is(type(urls.url_unquote_plus(u'\x6d')), text_type)
|
||||
|
||||
def test_quoting_of_local_urls(self):
|
||||
rv = urls.iri_to_uri(u'/foo\x8f')
|
||||
self.assert_strict_equal(rv, '/foo%C2%8F')
|
||||
self.assert_is(type(rv), str)
|
||||
|
||||
def test_url_attributes(self):
|
||||
rv = urls.url_parse('http://foo%3a:bar%3a@[::1]:80/123?x=y#frag')
|
||||
self.assert_strict_equal(rv.scheme, 'http')
|
||||
self.assert_strict_equal(rv.auth, 'foo%3a:bar%3a')
|
||||
self.assert_strict_equal(rv.username, u'foo:')
|
||||
self.assert_strict_equal(rv.password, u'bar:')
|
||||
self.assert_strict_equal(rv.raw_username, 'foo%3a')
|
||||
self.assert_strict_equal(rv.raw_password, 'bar%3a')
|
||||
self.assert_strict_equal(rv.host, '::1')
|
||||
self.assert_equal(rv.port, 80)
|
||||
self.assert_strict_equal(rv.path, '/123')
|
||||
self.assert_strict_equal(rv.query, 'x=y')
|
||||
self.assert_strict_equal(rv.fragment, 'frag')
|
||||
|
||||
rv = urls.url_parse(u'http://\N{SNOWMAN}.com/')
|
||||
self.assert_strict_equal(rv.host, u'\N{SNOWMAN}.com')
|
||||
self.assert_strict_equal(rv.ascii_host, 'xn--n3h.com')
|
||||
|
||||
def test_url_attributes_bytes(self):
|
||||
rv = urls.url_parse(b'http://foo%3a:bar%3a@[::1]:80/123?x=y#frag')
|
||||
self.assert_strict_equal(rv.scheme, b'http')
|
||||
self.assert_strict_equal(rv.auth, b'foo%3a:bar%3a')
|
||||
self.assert_strict_equal(rv.username, u'foo:')
|
||||
self.assert_strict_equal(rv.password, u'bar:')
|
||||
self.assert_strict_equal(rv.raw_username, b'foo%3a')
|
||||
self.assert_strict_equal(rv.raw_password, b'bar%3a')
|
||||
self.assert_strict_equal(rv.host, b'::1')
|
||||
self.assert_equal(rv.port, 80)
|
||||
self.assert_strict_equal(rv.path, b'/123')
|
||||
self.assert_strict_equal(rv.query, b'x=y')
|
||||
self.assert_strict_equal(rv.fragment, b'frag')
|
||||
|
||||
def test_url_joining(self):
|
||||
self.assert_strict_equal(urls.url_join('/foo', '/bar'), '/bar')
|
||||
self.assert_strict_equal(urls.url_join('http://example.com/foo', '/bar'),
|
||||
'http://example.com/bar')
|
||||
self.assert_strict_equal(urls.url_join('file:///tmp/', 'test.html'),
|
||||
'file:///tmp/test.html')
|
||||
self.assert_strict_equal(urls.url_join('file:///tmp/x', 'test.html'),
|
||||
'file:///tmp/test.html')
|
||||
self.assert_strict_equal(urls.url_join('file:///tmp/x', '../../../x.html'),
|
||||
'file:///x.html')
|
||||
|
||||
def test_partial_unencoded_decode(self):
|
||||
ref = u'foo=정상처리'.encode('euc-kr')
|
||||
x = urls.url_decode(ref, charset='euc-kr')
|
||||
self.assert_strict_equal(x['foo'], u'정상처리')
|
||||
|
||||
def test_iri_to_uri_idempotence_ascii_only(self):
|
||||
uri = u'http://www.idempoten.ce'
|
||||
uri = urls.iri_to_uri(uri)
|
||||
self.assert_equal(urls.iri_to_uri(uri), uri)
|
||||
|
||||
def test_iri_to_uri_idempotence_non_ascii(self):
|
||||
uri = u'http://\N{SNOWMAN}/\N{SNOWMAN}'
|
||||
uri = urls.iri_to_uri(uri)
|
||||
self.assert_equal(urls.iri_to_uri(uri), uri)
|
||||
|
||||
def test_uri_to_iri_idempotence_ascii_only(self):
|
||||
uri = 'http://www.idempoten.ce'
|
||||
uri = urls.uri_to_iri(uri)
|
||||
self.assert_equal(urls.uri_to_iri(uri), uri)
|
||||
|
||||
def test_uri_to_iri_idempotence_non_ascii(self):
|
||||
uri = 'http://xn--n3h/%E2%98%83'
|
||||
uri = urls.uri_to_iri(uri)
|
||||
self.assert_equal(urls.uri_to_iri(uri), uri)
|
||||
|
||||
def test_iri_to_uri_to_iri(self):
|
||||
iri = u'http://föö.com/'
|
||||
uri = urls.iri_to_uri(iri)
|
||||
self.assert_equal(urls.uri_to_iri(uri), iri)
|
||||
|
||||
def test_uri_to_iri_to_uri(self):
|
||||
uri = 'http://xn--f-rgao.com/%C3%9E'
|
||||
iri = urls.uri_to_iri(uri)
|
||||
self.assert_equal(urls.iri_to_uri(iri), uri)
|
||||
|
||||
def test_uri_iri_normalization(self):
|
||||
uri = 'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93'
|
||||
iri = u'http://föñ.com/\N{BALLOT BOX}/fred?utf8=\u2713'
|
||||
|
||||
tests = [
|
||||
u'http://föñ.com/\N{BALLOT BOX}/fred?utf8=\u2713',
|
||||
u'http://xn--f-rgao.com/\u2610/fred?utf8=\N{CHECK MARK}',
|
||||
b'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93',
|
||||
u'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93',
|
||||
u'http://föñ.com/\u2610/fred?utf8=%E2%9C%93',
|
||||
b'http://xn--f-rgao.com/\xe2\x98\x90/fred?utf8=\xe2\x9c\x93',
|
||||
]
|
||||
|
||||
for test in tests:
|
||||
self.assert_equal(urls.uri_to_iri(test), iri)
|
||||
self.assert_equal(urls.iri_to_uri(test), uri)
|
||||
self.assert_equal(urls.uri_to_iri(urls.iri_to_uri(test)), iri)
|
||||
self.assert_equal(urls.iri_to_uri(urls.uri_to_iri(test)), uri)
|
||||
self.assert_equal(urls.uri_to_iri(urls.uri_to_iri(test)), iri)
|
||||
self.assert_equal(urls.iri_to_uri(urls.iri_to_uri(test)), uri)
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(URLsTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,284 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.utils
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
General utilities.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
import unittest
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
|
||||
from werkzeug import utils
|
||||
from werkzeug.datastructures import Headers
|
||||
from werkzeug.http import parse_date, http_date
|
||||
from werkzeug.wrappers import BaseResponse
|
||||
from werkzeug.test import Client, run_wsgi_app
|
||||
from werkzeug._compat import text_type, implements_iterator
|
||||
|
||||
|
||||
class GeneralUtilityTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_redirect(self):
|
||||
resp = utils.redirect(u'/füübär')
|
||||
self.assert_in(b'/f%C3%BC%C3%BCb%C3%A4r', resp.get_data())
|
||||
self.assert_equal(resp.headers['Location'], '/f%C3%BC%C3%BCb%C3%A4r')
|
||||
self.assert_equal(resp.status_code, 302)
|
||||
|
||||
resp = utils.redirect(u'http://☃.net/', 307)
|
||||
self.assert_in(b'http://xn--n3h.net/', resp.get_data())
|
||||
self.assert_equal(resp.headers['Location'], 'http://xn--n3h.net/')
|
||||
self.assert_equal(resp.status_code, 307)
|
||||
|
||||
resp = utils.redirect('http://example.com/', 305)
|
||||
self.assert_equal(resp.headers['Location'], 'http://example.com/')
|
||||
self.assert_equal(resp.status_code, 305)
|
||||
|
||||
def test_redirect_no_unicode_header_keys(self):
|
||||
# Make sure all headers are native keys. This was a bug at one point
|
||||
# due to an incorrect conversion.
|
||||
resp = utils.redirect('http://example.com/', 305)
|
||||
for key, value in resp.headers.items():
|
||||
self.assert_equal(type(key), str)
|
||||
self.assert_equal(type(value), text_type)
|
||||
self.assert_equal(resp.headers['Location'], 'http://example.com/')
|
||||
self.assert_equal(resp.status_code, 305)
|
||||
|
||||
def test_redirect_xss(self):
|
||||
location = 'http://example.com/?xss="><script>alert(1)</script>'
|
||||
resp = utils.redirect(location)
|
||||
self.assert_not_in(b'<script>alert(1)</script>', resp.get_data())
|
||||
|
||||
location = 'http://example.com/?xss="onmouseover="alert(1)'
|
||||
resp = utils.redirect(location)
|
||||
self.assert_not_in(b'href="http://example.com/?xss="onmouseover="alert(1)"', resp.get_data())
|
||||
|
||||
def test_cached_property(self):
|
||||
foo = []
|
||||
class A(object):
|
||||
def prop(self):
|
||||
foo.append(42)
|
||||
return 42
|
||||
prop = utils.cached_property(prop)
|
||||
|
||||
a = A()
|
||||
p = a.prop
|
||||
q = a.prop
|
||||
self.assert_true(p == q == 42)
|
||||
self.assert_equal(foo, [42])
|
||||
|
||||
foo = []
|
||||
class A(object):
|
||||
def _prop(self):
|
||||
foo.append(42)
|
||||
return 42
|
||||
prop = utils.cached_property(_prop, name='prop')
|
||||
del _prop
|
||||
|
||||
a = A()
|
||||
p = a.prop
|
||||
q = a.prop
|
||||
self.assert_true(p == q == 42)
|
||||
self.assert_equal(foo, [42])
|
||||
|
||||
def test_environ_property(self):
|
||||
class A(object):
|
||||
environ = {'string': 'abc', 'number': '42'}
|
||||
|
||||
string = utils.environ_property('string')
|
||||
missing = utils.environ_property('missing', 'spam')
|
||||
read_only = utils.environ_property('number')
|
||||
number = utils.environ_property('number', load_func=int)
|
||||
broken_number = utils.environ_property('broken_number', load_func=int)
|
||||
date = utils.environ_property('date', None, parse_date, http_date,
|
||||
read_only=False)
|
||||
foo = utils.environ_property('foo')
|
||||
|
||||
a = A()
|
||||
self.assert_equal(a.string, 'abc')
|
||||
self.assert_equal(a.missing, 'spam')
|
||||
def test_assign():
|
||||
a.read_only = 'something'
|
||||
self.assert_raises(AttributeError, test_assign)
|
||||
self.assert_equal(a.number, 42)
|
||||
self.assert_equal(a.broken_number, None)
|
||||
self.assert_is_none(a.date)
|
||||
a.date = datetime(2008, 1, 22, 10, 0, 0, 0)
|
||||
self.assert_equal(a.environ['date'], 'Tue, 22 Jan 2008 10:00:00 GMT')
|
||||
|
||||
def test_escape(self):
|
||||
class Foo(str):
|
||||
def __html__(self):
|
||||
return text_type(self)
|
||||
self.assert_equal(utils.escape(None), '')
|
||||
self.assert_equal(utils.escape(42), '42')
|
||||
self.assert_equal(utils.escape('<>'), '<>')
|
||||
self.assert_equal(utils.escape('"foo"'), '"foo"')
|
||||
self.assert_equal(utils.escape(Foo('<foo>')), '<foo>')
|
||||
|
||||
def test_unescape(self):
|
||||
self.assert_equal(utils.unescape('<ä>'), u'<ä>')
|
||||
|
||||
def test_run_wsgi_app(self):
|
||||
def foo(environ, start_response):
|
||||
start_response('200 OK', [('Content-Type', 'text/plain')])
|
||||
yield '1'
|
||||
yield '2'
|
||||
yield '3'
|
||||
|
||||
app_iter, status, headers = run_wsgi_app(foo, {})
|
||||
self.assert_equal(status, '200 OK')
|
||||
self.assert_equal(list(headers), [('Content-Type', 'text/plain')])
|
||||
self.assert_equal(next(app_iter), '1')
|
||||
self.assert_equal(next(app_iter), '2')
|
||||
self.assert_equal(next(app_iter), '3')
|
||||
self.assert_raises(StopIteration, partial(next, app_iter))
|
||||
|
||||
got_close = []
|
||||
@implements_iterator
|
||||
class CloseIter(object):
|
||||
def __init__(self):
|
||||
self.iterated = False
|
||||
def __iter__(self):
|
||||
return self
|
||||
def close(self):
|
||||
got_close.append(None)
|
||||
def __next__(self):
|
||||
if self.iterated:
|
||||
raise StopIteration()
|
||||
self.iterated = True
|
||||
return 'bar'
|
||||
|
||||
def bar(environ, start_response):
|
||||
start_response('200 OK', [('Content-Type', 'text/plain')])
|
||||
return CloseIter()
|
||||
|
||||
app_iter, status, headers = run_wsgi_app(bar, {})
|
||||
self.assert_equal(status, '200 OK')
|
||||
self.assert_equal(list(headers), [('Content-Type', 'text/plain')])
|
||||
self.assert_equal(next(app_iter), 'bar')
|
||||
self.assert_raises(StopIteration, partial(next, app_iter))
|
||||
app_iter.close()
|
||||
|
||||
self.assert_equal(run_wsgi_app(bar, {}, True)[0], ['bar'])
|
||||
|
||||
self.assert_equal(len(got_close), 2)
|
||||
|
||||
def test_import_string(self):
|
||||
import cgi
|
||||
from werkzeug.debug import DebuggedApplication
|
||||
self.assert_is(utils.import_string('cgi.escape'), cgi.escape)
|
||||
self.assert_is(utils.import_string(u'cgi.escape'), cgi.escape)
|
||||
self.assert_is(utils.import_string('cgi:escape'), cgi.escape)
|
||||
self.assert_is_none(utils.import_string('XXXXXXXXXXXX', True))
|
||||
self.assert_is_none(utils.import_string('cgi.XXXXXXXXXXXX', True))
|
||||
self.assert_is(utils.import_string(u'cgi.escape'), cgi.escape)
|
||||
self.assert_is(utils.import_string(u'werkzeug.debug.DebuggedApplication'), DebuggedApplication)
|
||||
self.assert_raises(ImportError, utils.import_string, 'XXXXXXXXXXXXXXXX')
|
||||
self.assert_raises(ImportError, utils.import_string, 'cgi.XXXXXXXXXX')
|
||||
|
||||
def test_find_modules(self):
|
||||
self.assert_equal(list(utils.find_modules('werkzeug.debug')), \
|
||||
['werkzeug.debug.console', 'werkzeug.debug.repr',
|
||||
'werkzeug.debug.tbtools'])
|
||||
|
||||
def test_html_builder(self):
|
||||
html = utils.html
|
||||
xhtml = utils.xhtml
|
||||
self.assert_equal(html.p('Hello World'), '<p>Hello World</p>')
|
||||
self.assert_equal(html.a('Test', href='#'), '<a href="#">Test</a>')
|
||||
self.assert_equal(html.br(), '<br>')
|
||||
self.assert_equal(xhtml.br(), '<br />')
|
||||
self.assert_equal(html.img(src='foo'), '<img src="foo">')
|
||||
self.assert_equal(xhtml.img(src='foo'), '<img src="foo" />')
|
||||
self.assert_equal(html.html(
|
||||
html.head(
|
||||
html.title('foo'),
|
||||
html.script(type='text/javascript')
|
||||
)
|
||||
), '<html><head><title>foo</title><script type="text/javascript">'
|
||||
'</script></head></html>')
|
||||
self.assert_equal(html('<foo>'), '<foo>')
|
||||
self.assert_equal(html.input(disabled=True), '<input disabled>')
|
||||
self.assert_equal(xhtml.input(disabled=True), '<input disabled="disabled" />')
|
||||
self.assert_equal(html.input(disabled=''), '<input>')
|
||||
self.assert_equal(xhtml.input(disabled=''), '<input />')
|
||||
self.assert_equal(html.input(disabled=None), '<input>')
|
||||
self.assert_equal(xhtml.input(disabled=None), '<input />')
|
||||
self.assert_equal(html.script('alert("Hello World");'), '<script>' \
|
||||
'alert("Hello World");</script>')
|
||||
self.assert_equal(xhtml.script('alert("Hello World");'), '<script>' \
|
||||
'/*<![CDATA[*/alert("Hello World");/*]]>*/</script>')
|
||||
|
||||
def test_validate_arguments(self):
|
||||
take_none = lambda: None
|
||||
take_two = lambda a, b: None
|
||||
take_two_one_default = lambda a, b=0: None
|
||||
|
||||
self.assert_equal(utils.validate_arguments(take_two, (1, 2,), {}), ((1, 2), {}))
|
||||
self.assert_equal(utils.validate_arguments(take_two, (1,), {'b': 2}), ((1, 2), {}))
|
||||
self.assert_equal(utils.validate_arguments(take_two_one_default, (1,), {}), ((1, 0), {}))
|
||||
self.assert_equal(utils.validate_arguments(take_two_one_default, (1, 2), {}), ((1, 2), {}))
|
||||
|
||||
self.assert_raises(utils.ArgumentValidationError,
|
||||
utils.validate_arguments, take_two, (), {})
|
||||
|
||||
self.assert_equal(utils.validate_arguments(take_none, (1, 2,), {'c': 3}), ((), {}))
|
||||
self.assert_raises(utils.ArgumentValidationError,
|
||||
utils.validate_arguments, take_none, (1,), {}, drop_extra=False)
|
||||
self.assert_raises(utils.ArgumentValidationError,
|
||||
utils.validate_arguments, take_none, (), {'a': 1}, drop_extra=False)
|
||||
|
||||
def test_header_set_duplication_bug(self):
|
||||
headers = Headers([
|
||||
('Content-Type', 'text/html'),
|
||||
('Foo', 'bar'),
|
||||
('Blub', 'blah')
|
||||
])
|
||||
headers['blub'] = 'hehe'
|
||||
headers['blafasel'] = 'humm'
|
||||
self.assert_equal(headers, Headers([
|
||||
('Content-Type', 'text/html'),
|
||||
('Foo', 'bar'),
|
||||
('blub', 'hehe'),
|
||||
('blafasel', 'humm')
|
||||
]))
|
||||
|
||||
def test_append_slash_redirect(self):
|
||||
def app(env, sr):
|
||||
return utils.append_slash_redirect(env)(env, sr)
|
||||
client = Client(app, BaseResponse)
|
||||
response = client.get('foo', base_url='http://example.org/app')
|
||||
self.assert_equal(response.status_code, 301)
|
||||
self.assert_equal(response.headers['Location'], 'http://example.org/app/foo/')
|
||||
|
||||
def test_cached_property_doc(self):
|
||||
@utils.cached_property
|
||||
def foo():
|
||||
"""testing"""
|
||||
return 42
|
||||
self.assert_equal(foo.__doc__, 'testing')
|
||||
self.assert_equal(foo.__name__, 'foo')
|
||||
self.assert_equal(foo.__module__, __name__)
|
||||
|
||||
def test_secure_filename(self):
|
||||
self.assert_equal(utils.secure_filename('My cool movie.mov'),
|
||||
'My_cool_movie.mov')
|
||||
self.assert_equal(utils.secure_filename('../../../etc/passwd'),
|
||||
'etc_passwd')
|
||||
self.assert_equal(utils.secure_filename(u'i contain cool \xfcml\xe4uts.txt'),
|
||||
'i_contain_cool_umlauts.txt')
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(GeneralUtilityTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,840 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.wrappers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tests for the response and request objects.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
import pickle
|
||||
from io import BytesIO
|
||||
from datetime import datetime
|
||||
from werkzeug._compat import iteritems
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase
|
||||
|
||||
from werkzeug import wrappers
|
||||
from werkzeug.exceptions import SecurityError
|
||||
from werkzeug.wsgi import LimitedStream
|
||||
from werkzeug.datastructures import MultiDict, ImmutableOrderedMultiDict, \
|
||||
ImmutableList, ImmutableTypeConversionDict, CharsetAccept, \
|
||||
MIMEAccept, LanguageAccept, Accept, CombinedMultiDict
|
||||
from werkzeug.test import Client, create_environ, run_wsgi_app
|
||||
from werkzeug._compat import implements_iterator, text_type
|
||||
|
||||
|
||||
class RequestTestResponse(wrappers.BaseResponse):
|
||||
"""Subclass of the normal response class we use to test response
|
||||
and base classes. Has some methods to test if things in the
|
||||
response match.
|
||||
"""
|
||||
|
||||
def __init__(self, response, status, headers):
|
||||
wrappers.BaseResponse.__init__(self, response, status, headers)
|
||||
self.body_data = pickle.loads(self.get_data())
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.body_data[key]
|
||||
|
||||
|
||||
def request_demo_app(environ, start_response):
|
||||
request = wrappers.BaseRequest(environ)
|
||||
assert 'werkzeug.request' in environ
|
||||
start_response('200 OK', [('Content-Type', 'text/plain')])
|
||||
return [pickle.dumps({
|
||||
'args': request.args,
|
||||
'args_as_list': list(request.args.lists()),
|
||||
'form': request.form,
|
||||
'form_as_list': list(request.form.lists()),
|
||||
'environ': prepare_environ_pickle(request.environ),
|
||||
'data': request.get_data()
|
||||
})]
|
||||
|
||||
|
||||
def prepare_environ_pickle(environ):
|
||||
result = {}
|
||||
for key, value in iteritems(environ):
|
||||
try:
|
||||
pickle.dumps((key, value))
|
||||
except Exception:
|
||||
continue
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
|
||||
class WrappersTestCase(WerkzeugTestCase):
|
||||
|
||||
def assert_environ(self, environ, method):
|
||||
self.assert_strict_equal(environ['REQUEST_METHOD'], method)
|
||||
self.assert_strict_equal(environ['PATH_INFO'], '/')
|
||||
self.assert_strict_equal(environ['SCRIPT_NAME'], '')
|
||||
self.assert_strict_equal(environ['SERVER_NAME'], 'localhost')
|
||||
self.assert_strict_equal(environ['wsgi.version'], (1, 0))
|
||||
self.assert_strict_equal(environ['wsgi.url_scheme'], 'http')
|
||||
|
||||
def test_base_request(self):
|
||||
client = Client(request_demo_app, RequestTestResponse)
|
||||
|
||||
# get requests
|
||||
response = client.get('/?foo=bar&foo=hehe')
|
||||
self.assert_strict_equal(response['args'], MultiDict([('foo', u'bar'), ('foo', u'hehe')]))
|
||||
self.assert_strict_equal(response['args_as_list'], [('foo', [u'bar', u'hehe'])])
|
||||
self.assert_strict_equal(response['form'], MultiDict())
|
||||
self.assert_strict_equal(response['form_as_list'], [])
|
||||
self.assert_strict_equal(response['data'], b'')
|
||||
self.assert_environ(response['environ'], 'GET')
|
||||
|
||||
# post requests with form data
|
||||
response = client.post('/?blub=blah', data='foo=blub+hehe&blah=42',
|
||||
content_type='application/x-www-form-urlencoded')
|
||||
self.assert_strict_equal(response['args'], MultiDict([('blub', u'blah')]))
|
||||
self.assert_strict_equal(response['args_as_list'], [('blub', [u'blah'])])
|
||||
self.assert_strict_equal(response['form'], MultiDict([('foo', u'blub hehe'), ('blah', u'42')]))
|
||||
self.assert_strict_equal(response['data'], b'')
|
||||
# currently we do not guarantee that the values are ordered correctly
|
||||
# for post data.
|
||||
## self.assert_strict_equal(response['form_as_list'], [('foo', ['blub hehe']), ('blah', ['42'])])
|
||||
self.assert_environ(response['environ'], 'POST')
|
||||
|
||||
# patch requests with form data
|
||||
response = client.patch('/?blub=blah', data='foo=blub+hehe&blah=42',
|
||||
content_type='application/x-www-form-urlencoded')
|
||||
self.assert_strict_equal(response['args'], MultiDict([('blub', u'blah')]))
|
||||
self.assert_strict_equal(response['args_as_list'], [('blub', [u'blah'])])
|
||||
self.assert_strict_equal(response['form'],
|
||||
MultiDict([('foo', u'blub hehe'), ('blah', u'42')]))
|
||||
self.assert_strict_equal(response['data'], b'')
|
||||
self.assert_environ(response['environ'], 'PATCH')
|
||||
|
||||
# post requests with json data
|
||||
json = b'{"foo": "bar", "blub": "blah"}'
|
||||
response = client.post('/?a=b', data=json, content_type='application/json')
|
||||
self.assert_strict_equal(response['data'], json)
|
||||
self.assert_strict_equal(response['args'], MultiDict([('a', u'b')]))
|
||||
self.assert_strict_equal(response['form'], MultiDict())
|
||||
|
||||
def test_query_string_is_bytes(self):
|
||||
req = wrappers.Request.from_values(u'/?foo=%2f')
|
||||
self.assert_strict_equal(req.query_string, b'foo=%2f')
|
||||
|
||||
def test_access_route(self):
|
||||
req = wrappers.Request.from_values(headers={
|
||||
'X-Forwarded-For': '192.168.1.2, 192.168.1.1'
|
||||
})
|
||||
req.environ['REMOTE_ADDR'] = '192.168.1.3'
|
||||
self.assert_equal(req.access_route, ['192.168.1.2', '192.168.1.1'])
|
||||
self.assert_strict_equal(req.remote_addr, '192.168.1.3')
|
||||
|
||||
req = wrappers.Request.from_values()
|
||||
req.environ['REMOTE_ADDR'] = '192.168.1.3'
|
||||
self.assert_strict_equal(list(req.access_route), ['192.168.1.3'])
|
||||
|
||||
def test_url_request_descriptors(self):
|
||||
req = wrappers.Request.from_values('/bar?foo=baz', 'http://example.com/test')
|
||||
self.assert_strict_equal(req.path, u'/bar')
|
||||
self.assert_strict_equal(req.full_path, u'/bar?foo=baz')
|
||||
self.assert_strict_equal(req.script_root, u'/test')
|
||||
self.assert_strict_equal(req.url, u'http://example.com/test/bar?foo=baz')
|
||||
self.assert_strict_equal(req.base_url, u'http://example.com/test/bar')
|
||||
self.assert_strict_equal(req.url_root, u'http://example.com/test/')
|
||||
self.assert_strict_equal(req.host_url, u'http://example.com/')
|
||||
self.assert_strict_equal(req.host, 'example.com')
|
||||
self.assert_strict_equal(req.scheme, 'http')
|
||||
|
||||
req = wrappers.Request.from_values('/bar?foo=baz', 'https://example.com/test')
|
||||
self.assert_strict_equal(req.scheme, 'https')
|
||||
|
||||
def test_url_request_descriptors_query_quoting(self):
|
||||
next = 'http%3A%2F%2Fwww.example.com%2F%3Fnext%3D%2F'
|
||||
req = wrappers.Request.from_values('/bar?next=' + next, 'http://example.com/')
|
||||
self.assert_equal(req.path, u'/bar')
|
||||
self.assert_strict_equal(req.full_path, u'/bar?next=' + next)
|
||||
self.assert_strict_equal(req.url, u'http://example.com/bar?next=' + next)
|
||||
|
||||
def test_url_request_descriptors_hosts(self):
|
||||
req = wrappers.Request.from_values('/bar?foo=baz', 'http://example.com/test')
|
||||
req.trusted_hosts = ['example.com']
|
||||
self.assert_strict_equal(req.path, u'/bar')
|
||||
self.assert_strict_equal(req.full_path, u'/bar?foo=baz')
|
||||
self.assert_strict_equal(req.script_root, u'/test')
|
||||
self.assert_strict_equal(req.url, u'http://example.com/test/bar?foo=baz')
|
||||
self.assert_strict_equal(req.base_url, u'http://example.com/test/bar')
|
||||
self.assert_strict_equal(req.url_root, u'http://example.com/test/')
|
||||
self.assert_strict_equal(req.host_url, u'http://example.com/')
|
||||
self.assert_strict_equal(req.host, 'example.com')
|
||||
self.assert_strict_equal(req.scheme, 'http')
|
||||
|
||||
req = wrappers.Request.from_values('/bar?foo=baz', 'https://example.com/test')
|
||||
self.assert_strict_equal(req.scheme, 'https')
|
||||
|
||||
req = wrappers.Request.from_values('/bar?foo=baz', 'http://example.com/test')
|
||||
req.trusted_hosts = ['example.org']
|
||||
self.assert_raises(SecurityError, lambda: req.url)
|
||||
self.assert_raises(SecurityError, lambda: req.base_url)
|
||||
self.assert_raises(SecurityError, lambda: req.url_root)
|
||||
self.assert_raises(SecurityError, lambda: req.host_url)
|
||||
self.assert_raises(SecurityError, lambda: req.host)
|
||||
|
||||
def test_authorization_mixin(self):
|
||||
request = wrappers.Request.from_values(headers={
|
||||
'Authorization': 'Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=='
|
||||
})
|
||||
a = request.authorization
|
||||
self.assert_strict_equal(a.type, 'basic')
|
||||
self.assert_strict_equal(a.username, 'Aladdin')
|
||||
self.assert_strict_equal(a.password, 'open sesame')
|
||||
|
||||
def test_stream_only_mixing(self):
|
||||
request = wrappers.PlainRequest.from_values(
|
||||
data=b'foo=blub+hehe',
|
||||
content_type='application/x-www-form-urlencoded'
|
||||
)
|
||||
self.assert_equal(list(request.files.items()), [])
|
||||
self.assert_equal(list(request.form.items()), [])
|
||||
self.assert_raises(AttributeError, lambda: request.data)
|
||||
self.assert_strict_equal(request.stream.read(), b'foo=blub+hehe')
|
||||
|
||||
def test_base_response(self):
|
||||
# unicode
|
||||
response = wrappers.BaseResponse(u'öäü')
|
||||
self.assert_strict_equal(response.get_data(), u'öäü'.encode('utf-8'))
|
||||
|
||||
# writing
|
||||
response = wrappers.Response('foo')
|
||||
response.stream.write('bar')
|
||||
self.assert_strict_equal(response.get_data(), b'foobar')
|
||||
|
||||
# set cookie
|
||||
response = wrappers.BaseResponse()
|
||||
response.set_cookie('foo', 'bar', 60, 0, '/blub', 'example.org')
|
||||
self.assert_strict_equal(response.headers.to_wsgi_list(), [
|
||||
('Content-Type', 'text/plain; charset=utf-8'),
|
||||
('Set-Cookie', 'foo=bar; Domain=example.org; Expires=Thu, '
|
||||
'01-Jan-1970 00:00:00 GMT; Max-Age=60; Path=/blub')
|
||||
])
|
||||
|
||||
# delete cookie
|
||||
response = wrappers.BaseResponse()
|
||||
response.delete_cookie('foo')
|
||||
self.assert_strict_equal(response.headers.to_wsgi_list(), [
|
||||
('Content-Type', 'text/plain; charset=utf-8'),
|
||||
('Set-Cookie', 'foo=; Expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/')
|
||||
])
|
||||
|
||||
# close call forwarding
|
||||
closed = []
|
||||
@implements_iterator
|
||||
class Iterable(object):
|
||||
def __next__(self):
|
||||
raise StopIteration()
|
||||
def __iter__(self):
|
||||
return self
|
||||
def close(self):
|
||||
closed.append(True)
|
||||
response = wrappers.BaseResponse(Iterable())
|
||||
response.call_on_close(lambda: closed.append(True))
|
||||
app_iter, status, headers = run_wsgi_app(response,
|
||||
create_environ(),
|
||||
buffered=True)
|
||||
self.assert_strict_equal(status, '200 OK')
|
||||
self.assert_strict_equal(''.join(app_iter), '')
|
||||
self.assert_strict_equal(len(closed), 2)
|
||||
|
||||
# with statement
|
||||
del closed[:]
|
||||
response = wrappers.BaseResponse(Iterable())
|
||||
with response:
|
||||
pass
|
||||
self.assert_equal(len(closed), 1)
|
||||
|
||||
def test_response_status_codes(self):
|
||||
response = wrappers.BaseResponse()
|
||||
response.status_code = 404
|
||||
self.assert_strict_equal(response.status, '404 NOT FOUND')
|
||||
response.status = '200 OK'
|
||||
self.assert_strict_equal(response.status_code, 200)
|
||||
response.status = '999 WTF'
|
||||
self.assert_strict_equal(response.status_code, 999)
|
||||
response.status_code = 588
|
||||
self.assert_strict_equal(response.status_code, 588)
|
||||
self.assert_strict_equal(response.status, '588 UNKNOWN')
|
||||
response.status = 'wtf'
|
||||
self.assert_strict_equal(response.status_code, 0)
|
||||
self.assert_strict_equal(response.status, '0 wtf')
|
||||
|
||||
def test_type_forcing(self):
|
||||
def wsgi_application(environ, start_response):
|
||||
start_response('200 OK', [('Content-Type', 'text/html')])
|
||||
return ['Hello World!']
|
||||
base_response = wrappers.BaseResponse('Hello World!', content_type='text/html')
|
||||
|
||||
class SpecialResponse(wrappers.Response):
|
||||
def foo(self):
|
||||
return 42
|
||||
|
||||
# good enough for this simple application, but don't ever use that in
|
||||
# real world examples!
|
||||
fake_env = {}
|
||||
|
||||
for orig_resp in wsgi_application, base_response:
|
||||
response = SpecialResponse.force_type(orig_resp, fake_env)
|
||||
assert response.__class__ is SpecialResponse
|
||||
self.assert_strict_equal(response.foo(), 42)
|
||||
self.assert_strict_equal(response.get_data(), b'Hello World!')
|
||||
self.assert_equal(response.content_type, 'text/html')
|
||||
|
||||
# without env, no arbitrary conversion
|
||||
self.assert_raises(TypeError, SpecialResponse.force_type, wsgi_application)
|
||||
|
||||
def test_accept_mixin(self):
|
||||
request = wrappers.Request({
|
||||
'HTTP_ACCEPT': 'text/xml,application/xml,application/xhtml+xml,'
|
||||
'text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
|
||||
'HTTP_ACCEPT_CHARSET': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
|
||||
'HTTP_ACCEPT_ENCODING': 'gzip,deflate',
|
||||
'HTTP_ACCEPT_LANGUAGE': 'en-us,en;q=0.5'
|
||||
})
|
||||
self.assert_equal(request.accept_mimetypes, MIMEAccept([
|
||||
('text/xml', 1), ('image/png', 1), ('application/xml', 1),
|
||||
('application/xhtml+xml', 1), ('text/html', 0.9),
|
||||
('text/plain', 0.8), ('*/*', 0.5)
|
||||
]))
|
||||
self.assert_strict_equal(request.accept_charsets, CharsetAccept([
|
||||
('ISO-8859-1', 1), ('utf-8', 0.7), ('*', 0.7)
|
||||
]))
|
||||
self.assert_strict_equal(request.accept_encodings, Accept([
|
||||
('gzip', 1), ('deflate', 1)]))
|
||||
self.assert_strict_equal(request.accept_languages, LanguageAccept([
|
||||
('en-us', 1), ('en', 0.5)]))
|
||||
|
||||
request = wrappers.Request({'HTTP_ACCEPT': ''})
|
||||
self.assert_strict_equal(request.accept_mimetypes, MIMEAccept())
|
||||
|
||||
def test_etag_request_mixin(self):
|
||||
request = wrappers.Request({
|
||||
'HTTP_CACHE_CONTROL': 'no-store, no-cache',
|
||||
'HTTP_IF_MATCH': 'w/"foo", bar, "baz"',
|
||||
'HTTP_IF_NONE_MATCH': 'w/"foo", bar, "baz"',
|
||||
'HTTP_IF_MODIFIED_SINCE': 'Tue, 22 Jan 2008 11:18:44 GMT',
|
||||
'HTTP_IF_UNMODIFIED_SINCE': 'Tue, 22 Jan 2008 11:18:44 GMT'
|
||||
})
|
||||
assert request.cache_control.no_store
|
||||
assert request.cache_control.no_cache
|
||||
|
||||
for etags in request.if_match, request.if_none_match:
|
||||
assert etags('bar')
|
||||
assert etags.contains_raw('w/"foo"')
|
||||
assert etags.contains_weak('foo')
|
||||
assert not etags.contains('foo')
|
||||
|
||||
self.assert_equal(request.if_modified_since, datetime(2008, 1, 22, 11, 18, 44))
|
||||
self.assert_equal(request.if_unmodified_since, datetime(2008, 1, 22, 11, 18, 44))
|
||||
|
||||
def test_user_agent_mixin(self):
|
||||
user_agents = [
|
||||
('Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US; rv:1.8.1.11) '
|
||||
'Gecko/20071127 Firefox/2.0.0.11', 'firefox', 'macos', '2.0.0.11',
|
||||
'en-US'),
|
||||
('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; de-DE) Opera 8.54',
|
||||
'opera', 'windows', '8.54', 'de-DE'),
|
||||
('Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420 '
|
||||
'(KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3',
|
||||
'safari', 'iphone', '419.3', 'en'),
|
||||
('Bot Googlebot/2.1 ( http://www.googlebot.com/bot.html)',
|
||||
'google', None, '2.1', None)
|
||||
]
|
||||
for ua, browser, platform, version, lang in user_agents:
|
||||
request = wrappers.Request({'HTTP_USER_AGENT': ua})
|
||||
self.assert_strict_equal(request.user_agent.browser, browser)
|
||||
self.assert_strict_equal(request.user_agent.platform, platform)
|
||||
self.assert_strict_equal(request.user_agent.version, version)
|
||||
self.assert_strict_equal(request.user_agent.language, lang)
|
||||
assert bool(request.user_agent)
|
||||
self.assert_strict_equal(request.user_agent.to_header(), ua)
|
||||
self.assert_strict_equal(str(request.user_agent), ua)
|
||||
|
||||
request = wrappers.Request({'HTTP_USER_AGENT': 'foo'})
|
||||
assert not request.user_agent
|
||||
|
||||
def test_stream_wrapping(self):
|
||||
class LowercasingStream(object):
|
||||
def __init__(self, stream):
|
||||
self._stream = stream
|
||||
def read(self, size=-1):
|
||||
return self._stream.read(size).lower()
|
||||
def readline(self, size=-1):
|
||||
return self._stream.readline(size).lower()
|
||||
|
||||
data = b'foo=Hello+World'
|
||||
req = wrappers.Request.from_values('/', method='POST', data=data,
|
||||
content_type='application/x-www-form-urlencoded')
|
||||
req.stream = LowercasingStream(req.stream)
|
||||
self.assert_equal(req.form['foo'], 'hello world')
|
||||
|
||||
def test_data_descriptor_triggers_parsing(self):
|
||||
data = b'foo=Hello+World'
|
||||
req = wrappers.Request.from_values('/', method='POST', data=data,
|
||||
content_type='application/x-www-form-urlencoded')
|
||||
|
||||
self.assert_equal(req.data, b'')
|
||||
self.assert_equal(req.form['foo'], u'Hello World')
|
||||
|
||||
def test_get_data_method_parsing_caching_behavior(self):
|
||||
data = b'foo=Hello+World'
|
||||
req = wrappers.Request.from_values('/', method='POST', data=data,
|
||||
content_type='application/x-www-form-urlencoded')
|
||||
|
||||
# get_data() caches, so form stays available
|
||||
self.assert_equal(req.get_data(), data)
|
||||
self.assert_equal(req.form['foo'], u'Hello World')
|
||||
self.assert_equal(req.get_data(), data)
|
||||
|
||||
# here we access the form data first, caching is bypassed
|
||||
req = wrappers.Request.from_values('/', method='POST', data=data,
|
||||
content_type='application/x-www-form-urlencoded')
|
||||
self.assert_equal(req.form['foo'], u'Hello World')
|
||||
self.assert_equal(req.get_data(), b'')
|
||||
|
||||
# Another case is uncached get data which trashes everything
|
||||
req = wrappers.Request.from_values('/', method='POST', data=data,
|
||||
content_type='application/x-www-form-urlencoded')
|
||||
self.assert_equal(req.get_data(cache=False), data)
|
||||
self.assert_equal(req.get_data(cache=False), b'')
|
||||
self.assert_equal(req.form, {})
|
||||
|
||||
# Or we can implicitly start the form parser which is similar to
|
||||
# the old .data behavior
|
||||
req = wrappers.Request.from_values('/', method='POST', data=data,
|
||||
content_type='application/x-www-form-urlencoded')
|
||||
self.assert_equal(req.get_data(parse_form_data=True), b'')
|
||||
self.assert_equal(req.form['foo'], u'Hello World')
|
||||
|
||||
def test_etag_response_mixin(self):
|
||||
response = wrappers.Response('Hello World')
|
||||
self.assert_equal(response.get_etag(), (None, None))
|
||||
response.add_etag()
|
||||
self.assert_equal(response.get_etag(), ('b10a8db164e0754105b7a99be72e3fe5', False))
|
||||
assert not response.cache_control
|
||||
response.cache_control.must_revalidate = True
|
||||
response.cache_control.max_age = 60
|
||||
response.headers['Content-Length'] = len(response.get_data())
|
||||
assert response.headers['Cache-Control'] in ('must-revalidate, max-age=60',
|
||||
'max-age=60, must-revalidate')
|
||||
|
||||
assert 'date' not in response.headers
|
||||
env = create_environ()
|
||||
env.update({
|
||||
'REQUEST_METHOD': 'GET',
|
||||
'HTTP_IF_NONE_MATCH': response.get_etag()[0]
|
||||
})
|
||||
response.make_conditional(env)
|
||||
assert 'date' in response.headers
|
||||
|
||||
# after the thing is invoked by the server as wsgi application
|
||||
# (we're emulating this here), there must not be any entity
|
||||
# headers left and the status code would have to be 304
|
||||
resp = wrappers.Response.from_app(response, env)
|
||||
self.assert_equal(resp.status_code, 304)
|
||||
assert not 'content-length' in resp.headers
|
||||
|
||||
# make sure date is not overriden
|
||||
response = wrappers.Response('Hello World')
|
||||
response.date = 1337
|
||||
d = response.date
|
||||
response.make_conditional(env)
|
||||
self.assert_equal(response.date, d)
|
||||
|
||||
# make sure content length is only set if missing
|
||||
response = wrappers.Response('Hello World')
|
||||
response.content_length = 999
|
||||
response.make_conditional(env)
|
||||
self.assert_equal(response.content_length, 999)
|
||||
|
||||
def test_etag_response_mixin_freezing(self):
|
||||
class WithFreeze(wrappers.ETagResponseMixin, wrappers.BaseResponse):
|
||||
pass
|
||||
class WithoutFreeze(wrappers.BaseResponse, wrappers.ETagResponseMixin):
|
||||
pass
|
||||
|
||||
response = WithFreeze('Hello World')
|
||||
response.freeze()
|
||||
self.assert_strict_equal(response.get_etag(),
|
||||
(text_type(wrappers.generate_etag(b'Hello World')), False))
|
||||
response = WithoutFreeze('Hello World')
|
||||
response.freeze()
|
||||
self.assert_equal(response.get_etag(), (None, None))
|
||||
response = wrappers.Response('Hello World')
|
||||
response.freeze()
|
||||
self.assert_equal(response.get_etag(), (None, None))
|
||||
|
||||
def test_authenticate_mixin(self):
|
||||
resp = wrappers.Response()
|
||||
resp.www_authenticate.type = 'basic'
|
||||
resp.www_authenticate.realm = 'Testing'
|
||||
self.assert_strict_equal(resp.headers['WWW-Authenticate'], u'Basic realm="Testing"')
|
||||
resp.www_authenticate.realm = None
|
||||
resp.www_authenticate.type = None
|
||||
assert 'WWW-Authenticate' not in resp.headers
|
||||
|
||||
def test_response_stream_mixin(self):
|
||||
response = wrappers.Response()
|
||||
response.stream.write('Hello ')
|
||||
response.stream.write('World!')
|
||||
self.assert_equal(response.response, ['Hello ', 'World!'])
|
||||
self.assert_equal(response.get_data(), b'Hello World!')
|
||||
|
||||
def test_common_response_descriptors_mixin(self):
|
||||
response = wrappers.Response()
|
||||
response.mimetype = 'text/html'
|
||||
self.assert_equal(response.mimetype, 'text/html')
|
||||
self.assert_equal(response.content_type, 'text/html; charset=utf-8')
|
||||
self.assert_equal(response.mimetype_params, {'charset': 'utf-8'})
|
||||
response.mimetype_params['x-foo'] = 'yep'
|
||||
del response.mimetype_params['charset']
|
||||
self.assert_equal(response.content_type, 'text/html; x-foo=yep')
|
||||
|
||||
now = datetime.utcnow().replace(microsecond=0)
|
||||
|
||||
assert response.content_length is None
|
||||
response.content_length = '42'
|
||||
self.assert_equal(response.content_length, 42)
|
||||
|
||||
for attr in 'date', 'age', 'expires':
|
||||
assert getattr(response, attr) is None
|
||||
setattr(response, attr, now)
|
||||
self.assert_equal(getattr(response, attr), now)
|
||||
|
||||
assert response.retry_after is None
|
||||
response.retry_after = now
|
||||
self.assert_equal(response.retry_after, now)
|
||||
|
||||
assert not response.vary
|
||||
response.vary.add('Cookie')
|
||||
response.vary.add('Content-Language')
|
||||
assert 'cookie' in response.vary
|
||||
self.assert_equal(response.vary.to_header(), 'Cookie, Content-Language')
|
||||
response.headers['Vary'] = 'Content-Encoding'
|
||||
self.assert_equal(response.vary.as_set(), set(['content-encoding']))
|
||||
|
||||
response.allow.update(['GET', 'POST'])
|
||||
self.assert_equal(response.headers['Allow'], 'GET, POST')
|
||||
|
||||
response.content_language.add('en-US')
|
||||
response.content_language.add('fr')
|
||||
self.assert_equal(response.headers['Content-Language'], 'en-US, fr')
|
||||
|
||||
def test_common_request_descriptors_mixin(self):
|
||||
request = wrappers.Request.from_values(content_type='text/html; charset=utf-8',
|
||||
content_length='23',
|
||||
headers={
|
||||
'Referer': 'http://www.example.com/',
|
||||
'Date': 'Sat, 28 Feb 2009 19:04:35 GMT',
|
||||
'Max-Forwards': '10',
|
||||
'Pragma': 'no-cache',
|
||||
'Content-Encoding': 'gzip',
|
||||
'Content-MD5': '9a3bc6dbc47a70db25b84c6e5867a072'
|
||||
})
|
||||
|
||||
self.assert_equal(request.content_type, 'text/html; charset=utf-8')
|
||||
self.assert_equal(request.mimetype, 'text/html')
|
||||
self.assert_equal(request.mimetype_params, {'charset': 'utf-8'})
|
||||
self.assert_equal(request.content_length, 23)
|
||||
self.assert_equal(request.referrer, 'http://www.example.com/')
|
||||
self.assert_equal(request.date, datetime(2009, 2, 28, 19, 4, 35))
|
||||
self.assert_equal(request.max_forwards, 10)
|
||||
self.assert_true('no-cache' in request.pragma)
|
||||
self.assert_equal(request.content_encoding, 'gzip')
|
||||
self.assert_equal(request.content_md5, '9a3bc6dbc47a70db25b84c6e5867a072')
|
||||
|
||||
def test_shallow_mode(self):
|
||||
request = wrappers.Request({'QUERY_STRING': 'foo=bar'}, shallow=True)
|
||||
self.assert_equal(request.args['foo'], 'bar')
|
||||
self.assert_raises(RuntimeError, lambda: request.form['foo'])
|
||||
|
||||
def test_form_parsing_failed(self):
|
||||
data = (
|
||||
b'--blah\r\n'
|
||||
)
|
||||
data = wrappers.Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='multipart/form-data; boundary=foo',
|
||||
method='POST')
|
||||
assert not data.files
|
||||
assert not data.form
|
||||
|
||||
def test_file_closing(self):
|
||||
data = (b'--foo\r\n'
|
||||
b'Content-Disposition: form-data; name="foo"; filename="foo.txt"\r\n'
|
||||
b'Content-Type: text/plain; charset=utf-8\r\n\r\n'
|
||||
b'file contents, just the contents\r\n'
|
||||
b'--foo--')
|
||||
req = wrappers.Request.from_values(
|
||||
input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='multipart/form-data; boundary=foo',
|
||||
method='POST'
|
||||
)
|
||||
foo = req.files['foo']
|
||||
self.assert_equal(foo.mimetype, 'text/plain')
|
||||
self.assert_equal(foo.filename, 'foo.txt')
|
||||
|
||||
self.assert_equal(foo.closed, False)
|
||||
req.close()
|
||||
self.assert_equal(foo.closed, True)
|
||||
|
||||
def test_file_closing_with(self):
|
||||
data = (b'--foo\r\n'
|
||||
b'Content-Disposition: form-data; name="foo"; filename="foo.txt"\r\n'
|
||||
b'Content-Type: text/plain; charset=utf-8\r\n\r\n'
|
||||
b'file contents, just the contents\r\n'
|
||||
b'--foo--')
|
||||
req = wrappers.Request.from_values(
|
||||
input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='multipart/form-data; boundary=foo',
|
||||
method='POST'
|
||||
)
|
||||
with req:
|
||||
foo = req.files['foo']
|
||||
self.assert_equal(foo.mimetype, 'text/plain')
|
||||
self.assert_equal(foo.filename, 'foo.txt')
|
||||
|
||||
self.assert_equal(foo.closed, True)
|
||||
|
||||
def test_url_charset_reflection(self):
|
||||
req = wrappers.Request.from_values()
|
||||
req.charset = 'utf-7'
|
||||
self.assert_equal(req.url_charset, 'utf-7')
|
||||
|
||||
def test_response_streamed(self):
|
||||
r = wrappers.Response()
|
||||
assert not r.is_streamed
|
||||
r = wrappers.Response("Hello World")
|
||||
assert not r.is_streamed
|
||||
r = wrappers.Response(["foo", "bar"])
|
||||
assert not r.is_streamed
|
||||
def gen():
|
||||
if 0:
|
||||
yield None
|
||||
r = wrappers.Response(gen())
|
||||
assert r.is_streamed
|
||||
|
||||
def test_response_iter_wrapping(self):
|
||||
def uppercasing(iterator):
|
||||
for item in iterator:
|
||||
yield item.upper()
|
||||
def generator():
|
||||
yield 'foo'
|
||||
yield 'bar'
|
||||
req = wrappers.Request.from_values()
|
||||
resp = wrappers.Response(generator())
|
||||
del resp.headers['Content-Length']
|
||||
resp.response = uppercasing(resp.iter_encoded())
|
||||
actual_resp = wrappers.Response.from_app(resp, req.environ, buffered=True)
|
||||
self.assertEqual(actual_resp.get_data(), b'FOOBAR')
|
||||
|
||||
def test_response_freeze(self):
|
||||
def generate():
|
||||
yield "foo"
|
||||
yield "bar"
|
||||
resp = wrappers.Response(generate())
|
||||
resp.freeze()
|
||||
self.assert_equal(resp.response, [b'foo', b'bar'])
|
||||
self.assert_equal(resp.headers['content-length'], '6')
|
||||
|
||||
def test_other_method_payload(self):
|
||||
data = b'Hello World'
|
||||
req = wrappers.Request.from_values(input_stream=BytesIO(data),
|
||||
content_length=len(data),
|
||||
content_type='text/plain',
|
||||
method='WHAT_THE_FUCK')
|
||||
self.assert_equal(req.get_data(), data)
|
||||
self.assert_is_instance(req.stream, LimitedStream)
|
||||
|
||||
def test_urlfication(self):
|
||||
resp = wrappers.Response()
|
||||
resp.headers['Location'] = u'http://üser:pässword@☃.net/påth'
|
||||
resp.headers['Content-Location'] = u'http://☃.net/'
|
||||
headers = resp.get_wsgi_headers(create_environ())
|
||||
self.assert_equal(headers['location'], \
|
||||
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
|
||||
self.assert_equal(headers['content-location'], 'http://xn--n3h.net/')
|
||||
|
||||
def test_new_response_iterator_behavior(self):
|
||||
req = wrappers.Request.from_values()
|
||||
resp = wrappers.Response(u'Hello Wörld!')
|
||||
|
||||
def get_content_length(resp):
|
||||
headers = resp.get_wsgi_headers(req.environ)
|
||||
return headers.get('content-length', type=int)
|
||||
|
||||
def generate_items():
|
||||
yield "Hello "
|
||||
yield u"Wörld!"
|
||||
|
||||
# werkzeug encodes when set to `data` now, which happens
|
||||
# if a string is passed to the response object.
|
||||
self.assert_equal(resp.response, [u'Hello Wörld!'.encode('utf-8')])
|
||||
self.assert_equal(resp.get_data(), u'Hello Wörld!'.encode('utf-8'))
|
||||
self.assert_equal(get_content_length(resp), 13)
|
||||
assert not resp.is_streamed
|
||||
assert resp.is_sequence
|
||||
|
||||
# try the same for manual assignment
|
||||
resp.set_data(u'Wörd')
|
||||
self.assert_equal(resp.response, [u'Wörd'.encode('utf-8')])
|
||||
self.assert_equal(resp.get_data(), u'Wörd'.encode('utf-8'))
|
||||
self.assert_equal(get_content_length(resp), 5)
|
||||
assert not resp.is_streamed
|
||||
assert resp.is_sequence
|
||||
|
||||
# automatic generator sequence conversion
|
||||
resp.response = generate_items()
|
||||
assert resp.is_streamed
|
||||
assert not resp.is_sequence
|
||||
self.assert_equal(resp.get_data(), u'Hello Wörld!'.encode('utf-8'))
|
||||
self.assert_equal(resp.response, [b'Hello ', u'Wörld!'.encode('utf-8')])
|
||||
assert not resp.is_streamed
|
||||
assert resp.is_sequence
|
||||
|
||||
# automatic generator sequence conversion
|
||||
resp.response = generate_items()
|
||||
resp.implicit_sequence_conversion = False
|
||||
assert resp.is_streamed
|
||||
assert not resp.is_sequence
|
||||
self.assert_raises(RuntimeError, lambda: resp.get_data())
|
||||
resp.make_sequence()
|
||||
self.assert_equal(resp.get_data(), u'Hello Wörld!'.encode('utf-8'))
|
||||
self.assert_equal(resp.response, [b'Hello ', u'Wörld!'.encode('utf-8')])
|
||||
assert not resp.is_streamed
|
||||
assert resp.is_sequence
|
||||
|
||||
# stream makes it a list no matter how the conversion is set
|
||||
for val in True, False:
|
||||
resp.implicit_sequence_conversion = val
|
||||
resp.response = ("foo", "bar")
|
||||
assert resp.is_sequence
|
||||
resp.stream.write('baz')
|
||||
self.assert_equal(resp.response, ['foo', 'bar', 'baz'])
|
||||
|
||||
def test_form_data_ordering(self):
|
||||
class MyRequest(wrappers.Request):
|
||||
parameter_storage_class = ImmutableOrderedMultiDict
|
||||
|
||||
req = MyRequest.from_values('/?foo=1&bar=0&foo=3')
|
||||
self.assert_equal(list(req.args), ['foo', 'bar'])
|
||||
self.assert_equal(list(req.args.items(multi=True)), [
|
||||
('foo', '1'),
|
||||
('bar', '0'),
|
||||
('foo', '3')
|
||||
])
|
||||
self.assert_is_instance(req.args, ImmutableOrderedMultiDict)
|
||||
self.assert_is_instance(req.values, CombinedMultiDict)
|
||||
self.assert_equal(req.values['foo'], '1')
|
||||
self.assert_equal(req.values.getlist('foo'), ['1', '3'])
|
||||
|
||||
def test_storage_classes(self):
|
||||
class MyRequest(wrappers.Request):
|
||||
dict_storage_class = dict
|
||||
list_storage_class = list
|
||||
parameter_storage_class = dict
|
||||
req = MyRequest.from_values('/?foo=baz', headers={
|
||||
'Cookie': 'foo=bar'
|
||||
})
|
||||
assert type(req.cookies) is dict
|
||||
self.assert_equal(req.cookies, {'foo': 'bar'})
|
||||
assert type(req.access_route) is list
|
||||
|
||||
assert type(req.args) is dict
|
||||
assert type(req.values) is CombinedMultiDict
|
||||
self.assert_equal(req.values['foo'], u'baz')
|
||||
|
||||
req = wrappers.Request.from_values(headers={
|
||||
'Cookie': 'foo=bar'
|
||||
})
|
||||
assert type(req.cookies) is ImmutableTypeConversionDict
|
||||
self.assert_equal(req.cookies, {'foo': 'bar'})
|
||||
assert type(req.access_route) is ImmutableList
|
||||
|
||||
MyRequest.list_storage_class = tuple
|
||||
req = MyRequest.from_values()
|
||||
assert type(req.access_route) is tuple
|
||||
|
||||
def test_response_headers_passthrough(self):
|
||||
headers = wrappers.Headers()
|
||||
resp = wrappers.Response(headers=headers)
|
||||
assert resp.headers is headers
|
||||
|
||||
def test_response_304_no_content_length(self):
|
||||
resp = wrappers.Response('Test', status=304)
|
||||
env = create_environ()
|
||||
assert 'content-length' not in resp.get_wsgi_headers(env)
|
||||
|
||||
def test_ranges(self):
|
||||
# basic range stuff
|
||||
req = wrappers.Request.from_values()
|
||||
assert req.range is None
|
||||
req = wrappers.Request.from_values(headers={'Range': 'bytes=0-499'})
|
||||
self.assert_equal(req.range.ranges, [(0, 500)])
|
||||
|
||||
resp = wrappers.Response()
|
||||
resp.content_range = req.range.make_content_range(1000)
|
||||
self.assert_equal(resp.content_range.units, 'bytes')
|
||||
self.assert_equal(resp.content_range.start, 0)
|
||||
self.assert_equal(resp.content_range.stop, 500)
|
||||
self.assert_equal(resp.content_range.length, 1000)
|
||||
self.assert_equal(resp.headers['Content-Range'], 'bytes 0-499/1000')
|
||||
|
||||
resp.content_range.unset()
|
||||
assert 'Content-Range' not in resp.headers
|
||||
|
||||
resp.headers['Content-Range'] = 'bytes 0-499/1000'
|
||||
self.assert_equal(resp.content_range.units, 'bytes')
|
||||
self.assert_equal(resp.content_range.start, 0)
|
||||
self.assert_equal(resp.content_range.stop, 500)
|
||||
self.assert_equal(resp.content_range.length, 1000)
|
||||
|
||||
def test_auto_content_length(self):
|
||||
resp = wrappers.Response('Hello World!')
|
||||
self.assert_equal(resp.content_length, 12)
|
||||
|
||||
resp = wrappers.Response(['Hello World!'])
|
||||
assert resp.content_length is None
|
||||
self.assert_equal(resp.get_wsgi_headers({})['Content-Length'], '12')
|
||||
|
||||
def test_disabled_auto_content_length(self):
|
||||
class MyResponse(wrappers.Response):
|
||||
automatically_set_content_length = False
|
||||
resp = MyResponse('Hello World!')
|
||||
self.assert_is_none(resp.content_length)
|
||||
|
||||
resp = MyResponse(['Hello World!'])
|
||||
self.assert_is_none(resp.content_length)
|
||||
self.assert_not_in('Content-Length', resp.get_wsgi_headers({}))
|
||||
|
||||
def test_location_header_autocorrect(self):
|
||||
env = create_environ()
|
||||
class MyResponse(wrappers.Response):
|
||||
autocorrect_location_header = False
|
||||
resp = MyResponse('Hello World!')
|
||||
resp.headers['Location'] = '/test'
|
||||
self.assert_equal(resp.get_wsgi_headers(env)['Location'], '/test')
|
||||
|
||||
resp = wrappers.Response('Hello World!')
|
||||
resp.headers['Location'] = '/test'
|
||||
self.assert_equal(resp.get_wsgi_headers(env)['Location'], 'http://localhost/test')
|
||||
|
||||
def test_modified_url_encoding(self):
|
||||
class ModifiedRequest(wrappers.Request):
|
||||
url_charset = 'euc-kr'
|
||||
|
||||
req = ModifiedRequest.from_values(u'/?foo=정상처리'.encode('euc-kr'))
|
||||
self.assert_strict_equal(req.args['foo'], u'정상처리')
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(WrappersTestCase))
|
||||
return suite
|
||||
|
|
@ -0,0 +1,352 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testsuite.wsgi
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tests the WSGI utilities.
|
||||
|
||||
:copyright: (c) 2013 by Armin Ronacher.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import unittest
|
||||
from os import path
|
||||
from contextlib import closing
|
||||
|
||||
from werkzeug.testsuite import WerkzeugTestCase, get_temporary_directory
|
||||
|
||||
from werkzeug.wrappers import BaseResponse
|
||||
from werkzeug.exceptions import BadRequest, ClientDisconnected
|
||||
from werkzeug.test import Client, create_environ, run_wsgi_app
|
||||
from werkzeug import wsgi
|
||||
from werkzeug._compat import StringIO, BytesIO, NativeStringIO, to_native
|
||||
|
||||
|
||||
class WSGIUtilsTestCase(WerkzeugTestCase):
|
||||
|
||||
def test_shareddatamiddleware_get_file_loader(self):
|
||||
app = wsgi.SharedDataMiddleware(None, {})
|
||||
assert callable(app.get_file_loader('foo'))
|
||||
|
||||
def test_shared_data_middleware(self):
|
||||
def null_application(environ, start_response):
|
||||
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
|
||||
yield b'NOT FOUND'
|
||||
|
||||
test_dir = get_temporary_directory()
|
||||
with open(path.join(test_dir, to_native(u'äöü', 'utf-8')), 'w') as test_file:
|
||||
test_file.write(u'FOUND')
|
||||
|
||||
app = wsgi.SharedDataMiddleware(null_application, {
|
||||
'/': path.join(path.dirname(__file__), 'res'),
|
||||
'/sources': path.join(path.dirname(__file__), 'res'),
|
||||
'/pkg': ('werkzeug.debug', 'shared'),
|
||||
'/foo': test_dir
|
||||
})
|
||||
|
||||
for p in '/test.txt', '/sources/test.txt', '/foo/äöü':
|
||||
app_iter, status, headers = run_wsgi_app(app, create_environ(p))
|
||||
self.assert_equal(status, '200 OK')
|
||||
with closing(app_iter) as app_iter:
|
||||
data = b''.join(app_iter).strip()
|
||||
self.assert_equal(data, b'FOUND')
|
||||
|
||||
app_iter, status, headers = run_wsgi_app(
|
||||
app, create_environ('/pkg/debugger.js'))
|
||||
with closing(app_iter) as app_iter:
|
||||
contents = b''.join(app_iter)
|
||||
self.assert_in(b'$(function() {', contents)
|
||||
|
||||
app_iter, status, headers = run_wsgi_app(
|
||||
app, create_environ('/missing'))
|
||||
self.assert_equal(status, '404 NOT FOUND')
|
||||
self.assert_equal(b''.join(app_iter).strip(), b'NOT FOUND')
|
||||
|
||||
|
||||
def test_get_host(self):
|
||||
env = {'HTTP_X_FORWARDED_HOST': 'example.org',
|
||||
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
|
||||
self.assert_equal(wsgi.get_host(env), 'example.org')
|
||||
self.assert_equal(
|
||||
wsgi.get_host(create_environ('/', 'http://example.org')),
|
||||
'example.org')
|
||||
|
||||
def test_get_host_multiple_forwarded(self):
|
||||
env = {'HTTP_X_FORWARDED_HOST': 'example.com, example.org',
|
||||
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
|
||||
self.assert_equal(wsgi.get_host(env), 'example.com')
|
||||
self.assert_equal(
|
||||
wsgi.get_host(create_environ('/', 'http://example.com')),
|
||||
'example.com')
|
||||
|
||||
def test_get_host_validation(self):
|
||||
env = {'HTTP_X_FORWARDED_HOST': 'example.org',
|
||||
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
|
||||
self.assert_equal(wsgi.get_host(env, trusted_hosts=['.example.org']),
|
||||
'example.org')
|
||||
self.assert_raises(BadRequest, wsgi.get_host, env,
|
||||
trusted_hosts=['example.com'])
|
||||
|
||||
def test_responder(self):
|
||||
def foo(environ, start_response):
|
||||
return BaseResponse(b'Test')
|
||||
client = Client(wsgi.responder(foo), BaseResponse)
|
||||
response = client.get('/')
|
||||
self.assert_equal(response.status_code, 200)
|
||||
self.assert_equal(response.data, b'Test')
|
||||
|
||||
def test_pop_path_info(self):
|
||||
original_env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b///c'}
|
||||
|
||||
# regular path info popping
|
||||
def assert_tuple(script_name, path_info):
|
||||
self.assert_equal(env.get('SCRIPT_NAME'), script_name)
|
||||
self.assert_equal(env.get('PATH_INFO'), path_info)
|
||||
env = original_env.copy()
|
||||
pop = lambda: wsgi.pop_path_info(env)
|
||||
|
||||
assert_tuple('/foo', '/a/b///c')
|
||||
self.assert_equal(pop(), 'a')
|
||||
assert_tuple('/foo/a', '/b///c')
|
||||
self.assert_equal(pop(), 'b')
|
||||
assert_tuple('/foo/a/b', '///c')
|
||||
self.assert_equal(pop(), 'c')
|
||||
assert_tuple('/foo/a/b///c', '')
|
||||
self.assert_is_none(pop())
|
||||
|
||||
def test_peek_path_info(self):
|
||||
env = {
|
||||
'SCRIPT_NAME': '/foo',
|
||||
'PATH_INFO': '/aaa/b///c'
|
||||
}
|
||||
|
||||
self.assert_equal(wsgi.peek_path_info(env), 'aaa')
|
||||
self.assert_equal(wsgi.peek_path_info(env), 'aaa')
|
||||
self.assert_equal(wsgi.peek_path_info(env, charset=None), b'aaa')
|
||||
self.assert_equal(wsgi.peek_path_info(env, charset=None), b'aaa')
|
||||
|
||||
def test_path_info_and_script_name_fetching(self):
|
||||
env = create_environ(u'/\N{SNOWMAN}', u'http://example.com/\N{COMET}/')
|
||||
self.assert_equal(wsgi.get_path_info(env), u'/\N{SNOWMAN}')
|
||||
self.assert_equal(wsgi.get_path_info(env, charset=None), u'/\N{SNOWMAN}'.encode('utf-8'))
|
||||
self.assert_equal(wsgi.get_script_name(env), u'/\N{COMET}')
|
||||
self.assert_equal(wsgi.get_script_name(env, charset=None), u'/\N{COMET}'.encode('utf-8'))
|
||||
|
||||
def test_query_string_fetching(self):
|
||||
env = create_environ(u'/?\N{SNOWMAN}=\N{COMET}')
|
||||
qs = wsgi.get_query_string(env)
|
||||
self.assert_strict_equal(qs, '%E2%98%83=%E2%98%84')
|
||||
|
||||
def test_limited_stream(self):
|
||||
class RaisingLimitedStream(wsgi.LimitedStream):
|
||||
def on_exhausted(self):
|
||||
raise BadRequest('input stream exhausted')
|
||||
|
||||
io = BytesIO(b'123456')
|
||||
stream = RaisingLimitedStream(io, 3)
|
||||
self.assert_strict_equal(stream.read(), b'123')
|
||||
self.assert_raises(BadRequest, stream.read)
|
||||
|
||||
io = BytesIO(b'123456')
|
||||
stream = RaisingLimitedStream(io, 3)
|
||||
self.assert_strict_equal(stream.tell(), 0)
|
||||
self.assert_strict_equal(stream.read(1), b'1')
|
||||
self.assert_strict_equal(stream.tell(), 1)
|
||||
self.assert_strict_equal(stream.read(1), b'2')
|
||||
self.assert_strict_equal(stream.tell(), 2)
|
||||
self.assert_strict_equal(stream.read(1), b'3')
|
||||
self.assert_strict_equal(stream.tell(), 3)
|
||||
self.assert_raises(BadRequest, stream.read)
|
||||
|
||||
io = BytesIO(b'123456\nabcdefg')
|
||||
stream = wsgi.LimitedStream(io, 9)
|
||||
self.assert_strict_equal(stream.readline(), b'123456\n')
|
||||
self.assert_strict_equal(stream.readline(), b'ab')
|
||||
|
||||
io = BytesIO(b'123456\nabcdefg')
|
||||
stream = wsgi.LimitedStream(io, 9)
|
||||
self.assert_strict_equal(stream.readlines(), [b'123456\n', b'ab'])
|
||||
|
||||
io = BytesIO(b'123456\nabcdefg')
|
||||
stream = wsgi.LimitedStream(io, 9)
|
||||
self.assert_strict_equal(stream.readlines(2), [b'12'])
|
||||
self.assert_strict_equal(stream.readlines(2), [b'34'])
|
||||
self.assert_strict_equal(stream.readlines(), [b'56\n', b'ab'])
|
||||
|
||||
io = BytesIO(b'123456\nabcdefg')
|
||||
stream = wsgi.LimitedStream(io, 9)
|
||||
self.assert_strict_equal(stream.readline(100), b'123456\n')
|
||||
|
||||
io = BytesIO(b'123456\nabcdefg')
|
||||
stream = wsgi.LimitedStream(io, 9)
|
||||
self.assert_strict_equal(stream.readlines(100), [b'123456\n', b'ab'])
|
||||
|
||||
io = BytesIO(b'123456')
|
||||
stream = wsgi.LimitedStream(io, 3)
|
||||
self.assert_strict_equal(stream.read(1), b'1')
|
||||
self.assert_strict_equal(stream.read(1), b'2')
|
||||
self.assert_strict_equal(stream.read(), b'3')
|
||||
self.assert_strict_equal(stream.read(), b'')
|
||||
|
||||
io = BytesIO(b'123456')
|
||||
stream = wsgi.LimitedStream(io, 3)
|
||||
self.assert_strict_equal(stream.read(-1), b'123')
|
||||
|
||||
io = BytesIO(b'123456')
|
||||
stream = wsgi.LimitedStream(io, 0)
|
||||
self.assert_strict_equal(stream.read(-1), b'')
|
||||
|
||||
io = StringIO(u'123456')
|
||||
stream = wsgi.LimitedStream(io, 0)
|
||||
self.assert_strict_equal(stream.read(-1), u'')
|
||||
|
||||
io = StringIO(u'123\n456\n')
|
||||
stream = wsgi.LimitedStream(io, 8)
|
||||
self.assert_strict_equal(list(stream), [u'123\n', u'456\n'])
|
||||
|
||||
def test_limited_stream_disconnection(self):
|
||||
io = BytesIO(b'A bit of content')
|
||||
|
||||
# disconnect detection on out of bytes
|
||||
stream = wsgi.LimitedStream(io, 255)
|
||||
with self.assert_raises(ClientDisconnected):
|
||||
stream.read()
|
||||
|
||||
# disconnect detection because file close
|
||||
io = BytesIO(b'x' * 255)
|
||||
io.close()
|
||||
stream = wsgi.LimitedStream(io, 255)
|
||||
with self.assert_raises(ClientDisconnected):
|
||||
stream.read()
|
||||
|
||||
def test_path_info_extraction(self):
|
||||
x = wsgi.extract_path_info('http://example.com/app', '/app/hello')
|
||||
self.assert_equal(x, u'/hello')
|
||||
x = wsgi.extract_path_info('http://example.com/app',
|
||||
'https://example.com/app/hello')
|
||||
self.assert_equal(x, u'/hello')
|
||||
x = wsgi.extract_path_info('http://example.com/app/',
|
||||
'https://example.com/app/hello')
|
||||
self.assert_equal(x, u'/hello')
|
||||
x = wsgi.extract_path_info('http://example.com/app/',
|
||||
'https://example.com/app')
|
||||
self.assert_equal(x, u'/')
|
||||
x = wsgi.extract_path_info(u'http://☃.net/', u'/fööbär')
|
||||
self.assert_equal(x, u'/fööbär')
|
||||
x = wsgi.extract_path_info(u'http://☃.net/x', u'http://☃.net/x/fööbär')
|
||||
self.assert_equal(x, u'/fööbär')
|
||||
|
||||
env = create_environ(u'/fööbär', u'http://☃.net/x/')
|
||||
x = wsgi.extract_path_info(env, u'http://☃.net/x/fööbär')
|
||||
self.assert_equal(x, u'/fööbär')
|
||||
|
||||
x = wsgi.extract_path_info('http://example.com/app/',
|
||||
'https://example.com/a/hello')
|
||||
self.assert_is_none(x)
|
||||
x = wsgi.extract_path_info('http://example.com/app/',
|
||||
'https://example.com/app/hello',
|
||||
collapse_http_schemes=False)
|
||||
self.assert_is_none(x)
|
||||
|
||||
def test_get_host_fallback(self):
|
||||
self.assert_equal(wsgi.get_host({
|
||||
'SERVER_NAME': 'foobar.example.com',
|
||||
'wsgi.url_scheme': 'http',
|
||||
'SERVER_PORT': '80'
|
||||
}), 'foobar.example.com')
|
||||
self.assert_equal(wsgi.get_host({
|
||||
'SERVER_NAME': 'foobar.example.com',
|
||||
'wsgi.url_scheme': 'http',
|
||||
'SERVER_PORT': '81'
|
||||
}), 'foobar.example.com:81')
|
||||
|
||||
def test_get_current_url_unicode(self):
|
||||
env = create_environ()
|
||||
env['QUERY_STRING'] = 'foo=bar&baz=blah&meh=\xcf'
|
||||
rv = wsgi.get_current_url(env)
|
||||
self.assert_strict_equal(rv,
|
||||
u'http://localhost/?foo=bar&baz=blah&meh=\ufffd')
|
||||
|
||||
def test_multi_part_line_breaks(self):
|
||||
data = 'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
|
||||
test_stream = NativeStringIO(data)
|
||||
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
|
||||
buffer_size=16))
|
||||
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n',
|
||||
'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
|
||||
|
||||
data = 'abc\r\nThis line is broken by the buffer length.' \
|
||||
'\r\nFoo bar baz'
|
||||
test_stream = NativeStringIO(data)
|
||||
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
|
||||
buffer_size=24))
|
||||
self.assert_equal(lines, ['abc\r\n', 'This line is broken by the '
|
||||
'buffer length.\r\n', 'Foo bar baz'])
|
||||
|
||||
def test_multi_part_line_breaks_bytes(self):
|
||||
data = b'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
|
||||
test_stream = BytesIO(data)
|
||||
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
|
||||
buffer_size=16))
|
||||
self.assert_equal(lines, [b'abcdef\r\n', b'ghijkl\r\n',
|
||||
b'mnopqrstuvwxyz\r\n', b'ABCDEFGHIJK'])
|
||||
|
||||
data = b'abc\r\nThis line is broken by the buffer length.' \
|
||||
b'\r\nFoo bar baz'
|
||||
test_stream = BytesIO(data)
|
||||
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
|
||||
buffer_size=24))
|
||||
self.assert_equal(lines, [b'abc\r\n', b'This line is broken by the '
|
||||
b'buffer length.\r\n', b'Foo bar baz'])
|
||||
|
||||
def test_multi_part_line_breaks_problematic(self):
|
||||
data = 'abc\rdef\r\nghi'
|
||||
for x in range(1, 10):
|
||||
test_stream = NativeStringIO(data)
|
||||
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
|
||||
buffer_size=4))
|
||||
self.assert_equal(lines, ['abc\r', 'def\r\n', 'ghi'])
|
||||
|
||||
def test_iter_functions_support_iterators(self):
|
||||
data = ['abcdef\r\nghi', 'jkl\r\nmnopqrstuvwxyz\r', '\nABCDEFGHIJK']
|
||||
lines = list(wsgi.make_line_iter(data))
|
||||
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n',
|
||||
'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
|
||||
|
||||
def test_make_chunk_iter(self):
|
||||
data = [u'abcdefXghi', u'jklXmnopqrstuvwxyzX', u'ABCDEFGHIJK']
|
||||
rv = list(wsgi.make_chunk_iter(data, 'X'))
|
||||
self.assert_equal(rv, [u'abcdef', u'ghijkl', u'mnopqrstuvwxyz',
|
||||
u'ABCDEFGHIJK'])
|
||||
|
||||
data = u'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK'
|
||||
test_stream = StringIO(data)
|
||||
rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data),
|
||||
buffer_size=4))
|
||||
self.assert_equal(rv, [u'abcdef', u'ghijkl', u'mnopqrstuvwxyz',
|
||||
u'ABCDEFGHIJK'])
|
||||
|
||||
def test_make_chunk_iter_bytes(self):
|
||||
data = [b'abcdefXghi', b'jklXmnopqrstuvwxyzX', b'ABCDEFGHIJK']
|
||||
rv = list(wsgi.make_chunk_iter(data, 'X'))
|
||||
self.assert_equal(rv, [b'abcdef', b'ghijkl', b'mnopqrstuvwxyz',
|
||||
b'ABCDEFGHIJK'])
|
||||
|
||||
data = b'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK'
|
||||
test_stream = BytesIO(data)
|
||||
rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data),
|
||||
buffer_size=4))
|
||||
self.assert_equal(rv, [b'abcdef', b'ghijkl', b'mnopqrstuvwxyz',
|
||||
b'ABCDEFGHIJK'])
|
||||
|
||||
def test_lines_longer_buffer_size(self):
|
||||
data = '1234567890\n1234567890\n'
|
||||
for bufsize in range(1, 15):
|
||||
lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data),
|
||||
buffer_size=4))
|
||||
self.assert_equal(lines, ['1234567890\n', '1234567890\n'])
|
||||
|
||||
|
||||
def suite():
|
||||
suite = unittest.TestSuite()
|
||||
suite.addTest(unittest.makeSuite(WSGIUtilsTestCase))
|
||||
return suite
|
||||
883
Linux_x86_64/lib/python2.7/site-packages/werkzeug/urls.py
Normal file
|
|
@ -0,0 +1,883 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.urls
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
This module implements various URL related functions.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
from werkzeug._compat import text_type, PY2, to_unicode, \
|
||||
to_native, implements_to_string, try_coerce_native, \
|
||||
normalize_string_tuple, make_literal_wrapper, \
|
||||
fix_tuple_repr
|
||||
from werkzeug._internal import _encode_idna, _decode_idna
|
||||
from werkzeug.datastructures import MultiDict, iter_multi_items
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
# A regular expression for what a valid schema looks like
|
||||
_scheme_re = re.compile(r'^[a-zA-Z0-9+-.]+$')
|
||||
|
||||
# Characters that are safe in any part of an URL.
|
||||
_always_safe = (b'abcdefghijklmnopqrstuvwxyz'
|
||||
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.-+')
|
||||
|
||||
_hexdigits = '0123456789ABCDEFabcdef'
|
||||
_hextobyte = dict(
|
||||
((a + b).encode(), int(a + b, 16))
|
||||
for a in _hexdigits for b in _hexdigits
|
||||
)
|
||||
|
||||
|
||||
_URLTuple = fix_tuple_repr(namedtuple('_URLTuple',
|
||||
['scheme', 'netloc', 'path', 'query', 'fragment']))
|
||||
|
||||
|
||||
class _URLMixin(object):
|
||||
__slots__ = ()
|
||||
|
||||
def replace(self, **kwargs):
|
||||
"""Return an URL with the same values, except for those parameters
|
||||
given new values by whichever keyword arguments are specified."""
|
||||
return self._replace(**kwargs)
|
||||
|
||||
@property
|
||||
def host(self):
|
||||
"""The host part of the URL if available, otherwise `None`. The
|
||||
host is either the hostname or the IP address mentioned in the
|
||||
URL. It will not contain the port.
|
||||
"""
|
||||
return self._split_host()[0]
|
||||
|
||||
@property
|
||||
def ascii_host(self):
|
||||
"""Works exactly like :attr:`host` but will return a result that
|
||||
is restricted to ASCII. If it finds a netloc that is not ASCII
|
||||
it will attempt to idna decode it. This is useful for socket
|
||||
operations when the URL might include internationalized characters.
|
||||
"""
|
||||
rv = self.host
|
||||
if rv is not None and isinstance(rv, text_type):
|
||||
rv = _encode_idna(rv)
|
||||
return to_native(rv, 'ascii', 'ignore')
|
||||
|
||||
@property
|
||||
def port(self):
|
||||
"""The port in the URL as an integer if it was present, `None`
|
||||
otherwise. This does not fill in default ports.
|
||||
"""
|
||||
try:
|
||||
rv = int(to_native(self._split_host()[1]))
|
||||
if 0 <= rv <= 65535:
|
||||
return rv
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
@property
|
||||
def auth(self):
|
||||
"""The authentication part in the URL if available, `None`
|
||||
otherwise.
|
||||
"""
|
||||
return self._split_netloc()[0]
|
||||
|
||||
@property
|
||||
def username(self):
|
||||
"""The username if it was part of the URL, `None` otherwise.
|
||||
This undergoes URL decoding and will always be a unicode string.
|
||||
"""
|
||||
rv = self._split_auth()[0]
|
||||
if rv is not None:
|
||||
return _url_unquote_legacy(rv)
|
||||
|
||||
@property
|
||||
def raw_username(self):
|
||||
"""The username if it was part of the URL, `None` otherwise.
|
||||
Unlike :attr:`username` this one is not being decoded.
|
||||
"""
|
||||
return self._split_auth()[0]
|
||||
|
||||
@property
|
||||
def password(self):
|
||||
"""The password if it was part of the URL, `None` otherwise.
|
||||
This undergoes URL decoding and will always be a unicode string.
|
||||
"""
|
||||
rv = self._split_auth()[1]
|
||||
if rv is not None:
|
||||
return _url_unquote_legacy(rv)
|
||||
|
||||
@property
|
||||
def raw_password(self):
|
||||
"""The password if it was part of the URL, `None` otherwise.
|
||||
Unlike :attr:`password` this one is not being decoded.
|
||||
"""
|
||||
return self._split_auth()[1]
|
||||
|
||||
def decode_query(self, *args, **kwargs):
|
||||
"""Decodes the query part of the URL. Ths is a shortcut for
|
||||
calling :func:`url_decode` on the query argument. The arguments and
|
||||
keyword arguments are forwarded to :func:`url_decode` unchanged.
|
||||
"""
|
||||
return url_decode(self.query, *args, **kwargs)
|
||||
|
||||
def join(self, *args, **kwargs):
|
||||
"""Joins this URL with another one. This is just a convenience
|
||||
function for calling into :meth:`url_join` and then parsing the
|
||||
return value again.
|
||||
"""
|
||||
return url_parse(url_join(self, *args, **kwargs))
|
||||
|
||||
def to_url(self):
|
||||
"""Returns a URL string or bytes depending on the type of the
|
||||
information stored. This is just a convenience function
|
||||
for calling :meth:`url_unparse` for this URL.
|
||||
"""
|
||||
return url_unparse(self)
|
||||
|
||||
def decode_netloc(self):
|
||||
"""Decodes the netloc part into a string."""
|
||||
rv = _decode_idna(self.host or '')
|
||||
|
||||
if ':' in rv:
|
||||
rv = '[%s]' % rv
|
||||
port = self.port
|
||||
if port is not None:
|
||||
rv = '%s:%d' % (rv, port)
|
||||
auth = ':'.join(filter(None, [
|
||||
_url_unquote_legacy(self.raw_username or '', '/:%@'),
|
||||
_url_unquote_legacy(self.raw_password or '', '/:%@'),
|
||||
]))
|
||||
if auth:
|
||||
rv = '%s@%s' % (auth, rv)
|
||||
return rv
|
||||
|
||||
def to_uri_tuple(self):
|
||||
"""Returns a :class:`BytesURL` tuple that holds a URI. This will
|
||||
encode all the information in the URL properly to ASCII using the
|
||||
rules a web browser would follow.
|
||||
|
||||
It's usually more interesting to directly call :meth:`iri_to_uri` which
|
||||
will return a string.
|
||||
"""
|
||||
return url_parse(iri_to_uri(self).encode('ascii'))
|
||||
|
||||
def to_iri_tuple(self):
|
||||
"""Returns a :class:`URL` tuple that holds a IRI. This will try
|
||||
to decode as much information as possible in the URL without
|
||||
losing information similar to how a web browser does it for the
|
||||
URL bar.
|
||||
|
||||
It's usually more interesting to directly call :meth:`uri_to_iri` which
|
||||
will return a string.
|
||||
"""
|
||||
return url_parse(uri_to_iri(self))
|
||||
|
||||
def _split_netloc(self):
|
||||
if self._at in self.netloc:
|
||||
return self.netloc.split(self._at, 1)
|
||||
return None, self.netloc
|
||||
|
||||
def _split_auth(self):
|
||||
auth = self._split_netloc()[0]
|
||||
if not auth:
|
||||
return None, None
|
||||
if self._colon not in auth:
|
||||
return auth, None
|
||||
return auth.split(self._colon, 1)
|
||||
|
||||
def _split_host(self):
|
||||
rv = self._split_netloc()[1]
|
||||
if not rv:
|
||||
return None, None
|
||||
|
||||
if not rv.startswith(self._lbracket):
|
||||
if self._colon in rv:
|
||||
return rv.split(self._colon, 1)
|
||||
return rv, None
|
||||
|
||||
idx = rv.find(self._rbracket)
|
||||
if idx < 0:
|
||||
return rv, None
|
||||
|
||||
host = rv[1:idx]
|
||||
rest = rv[idx + 1:]
|
||||
if rest.startswith(self._colon):
|
||||
return host, rest[1:]
|
||||
return host, None
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class URL(_URLTuple, _URLMixin):
|
||||
"""Represents a parsed URL. This behaves like a regular tuple but
|
||||
also has some extra attributes that give further insight into the
|
||||
URL.
|
||||
"""
|
||||
__slots__ = ()
|
||||
_at = '@'
|
||||
_colon = ':'
|
||||
_lbracket = '['
|
||||
_rbracket = ']'
|
||||
|
||||
def __str__(self):
|
||||
return self.to_url()
|
||||
|
||||
def encode_netloc(self):
|
||||
"""Encodes the netloc part to an ASCII safe URL as bytes."""
|
||||
rv = self.ascii_host or ''
|
||||
if ':' in rv:
|
||||
rv = '[%s]' % rv
|
||||
port = self.port
|
||||
if port is not None:
|
||||
rv = '%s:%d' % (rv, port)
|
||||
auth = ':'.join(filter(None, [
|
||||
url_quote(self.raw_username or '', 'utf-8', 'strict', '/:%'),
|
||||
url_quote(self.raw_password or '', 'utf-8', 'strict', '/:%'),
|
||||
]))
|
||||
if auth:
|
||||
rv = '%s@%s' % (auth, rv)
|
||||
return rv.encode('ascii')
|
||||
|
||||
def encode(self, charset='utf-8', errors='replace'):
|
||||
"""Encodes the URL to a tuple made out of bytes. The charset is
|
||||
only being used for the path, query and fragment.
|
||||
"""
|
||||
return BytesURL(
|
||||
self.scheme.encode('ascii'),
|
||||
self.encode_netloc(),
|
||||
self.path.encode(charset, errors),
|
||||
self.query.encode(charset, errors),
|
||||
self.fragment.encode(charset, errors)
|
||||
)
|
||||
|
||||
|
||||
class BytesURL(_URLTuple, _URLMixin):
|
||||
"""Represents a parsed URL in bytes."""
|
||||
__slots__ = ()
|
||||
_at = b'@'
|
||||
_colon = b':'
|
||||
_lbracket = b'['
|
||||
_rbracket = b']'
|
||||
|
||||
def __str__(self):
|
||||
return self.to_url().decode('utf-8', 'replace')
|
||||
|
||||
def encode_netloc(self):
|
||||
"""Returns the netloc unchanged as bytes."""
|
||||
return self.netloc
|
||||
|
||||
def decode(self, charset='utf-8', errors='replace'):
|
||||
"""Decodes the URL to a tuple made out of strings. The charset is
|
||||
only being used for the path, query and fragment.
|
||||
"""
|
||||
return URL(
|
||||
self.scheme.decode('ascii'),
|
||||
self.decode_netloc(),
|
||||
self.path.decode(charset, errors),
|
||||
self.query.decode(charset, errors),
|
||||
self.fragment.decode(charset, errors)
|
||||
)
|
||||
|
||||
|
||||
def _unquote_to_bytes(string, unsafe=''):
|
||||
if isinstance(string, text_type):
|
||||
string = string.encode('utf-8')
|
||||
if isinstance(unsafe, text_type):
|
||||
unsafe = unsafe.encode('utf-8')
|
||||
unsafe = frozenset(bytearray(unsafe))
|
||||
bits = iter(string.split(b'%'))
|
||||
result = bytearray(next(bits, b''))
|
||||
for item in bits:
|
||||
try:
|
||||
char = _hextobyte[item[:2]]
|
||||
if char in unsafe:
|
||||
raise KeyError()
|
||||
result.append(char)
|
||||
result.extend(item[2:])
|
||||
except KeyError:
|
||||
result.extend(b'%')
|
||||
result.extend(item)
|
||||
return bytes(result)
|
||||
|
||||
|
||||
def _url_encode_impl(obj, charset, encode_keys, sort, key):
|
||||
iterable = iter_multi_items(obj)
|
||||
if sort:
|
||||
iterable = sorted(iterable, key=key)
|
||||
for key, value in iterable:
|
||||
if value is None:
|
||||
continue
|
||||
if not isinstance(key, bytes):
|
||||
key = text_type(key).encode(charset)
|
||||
if not isinstance(value, bytes):
|
||||
value = text_type(value).encode(charset)
|
||||
yield url_quote(key) + '=' + url_quote_plus(value)
|
||||
|
||||
|
||||
def _url_unquote_legacy(value, unsafe=''):
|
||||
try:
|
||||
return url_unquote(value, charset='utf-8',
|
||||
errors='strict', unsafe=unsafe)
|
||||
except UnicodeError:
|
||||
return url_unquote(value, charset='latin1', unsafe=unsafe)
|
||||
|
||||
|
||||
def url_parse(url, scheme=None, allow_fragments=True):
|
||||
"""Parses a URL from a string into a :class:`URL` tuple. If the URL
|
||||
is lacking a scheme it can be provided as second argument. Otherwise,
|
||||
it is ignored. Optionally fragments can be stripped from the URL
|
||||
by setting `allow_fragments` to `False`.
|
||||
|
||||
The inverse of this function is :func:`url_unparse`.
|
||||
|
||||
:param url: the URL to parse.
|
||||
:param scheme: the default schema to use if the URL is schemaless.
|
||||
:param allow_fragments: if set to `False` a fragment will be removed
|
||||
from the URL.
|
||||
"""
|
||||
s = make_literal_wrapper(url)
|
||||
is_text_based = isinstance(url, text_type)
|
||||
|
||||
if scheme is None:
|
||||
scheme = s('')
|
||||
netloc = query = fragment = s('')
|
||||
i = url.find(s(':'))
|
||||
if i > 0 and _scheme_re.match(to_native(url[:i], errors='replace')):
|
||||
# make sure "iri" is not actually a port number (in which case
|
||||
# "scheme" is really part of the path)
|
||||
rest = url[i + 1:]
|
||||
if not rest or any(c not in s('0123456789') for c in rest):
|
||||
# not a port number
|
||||
scheme, url = url[:i].lower(), rest
|
||||
|
||||
if url[:2] == s('//'):
|
||||
delim = len(url)
|
||||
for c in s('/?#'):
|
||||
wdelim = url.find(c, 2)
|
||||
if wdelim >= 0:
|
||||
delim = min(delim, wdelim)
|
||||
netloc, url = url[2:delim], url[delim:]
|
||||
if ((s('[') in netloc and s(']') not in netloc) or
|
||||
(s(']') in netloc and s('[') not in netloc)):
|
||||
raise ValueError('Invalid IPv6 URL')
|
||||
|
||||
if allow_fragments and s('#') in url:
|
||||
url, fragment = url.split(s('#'), 1)
|
||||
if s('?') in url:
|
||||
url, query = url.split(s('?'), 1)
|
||||
|
||||
result_type = is_text_based and URL or BytesURL
|
||||
return result_type(scheme, netloc, url, query, fragment)
|
||||
|
||||
|
||||
def url_quote(string, charset='utf-8', errors='strict', safe='/:', unsafe=''):
|
||||
"""URL encode a single string with a given encoding.
|
||||
|
||||
:param s: the string to quote.
|
||||
:param charset: the charset to be used.
|
||||
:param safe: an optional sequence of safe characters.
|
||||
:param unsafe: an optional sequence of unsafe characters.
|
||||
|
||||
.. versionadded:: 0.9.2
|
||||
The `unsafe` parameter was added.
|
||||
"""
|
||||
if not isinstance(string, (text_type, bytes, bytearray)):
|
||||
string = text_type(string)
|
||||
if isinstance(string, text_type):
|
||||
string = string.encode(charset, errors)
|
||||
if isinstance(safe, text_type):
|
||||
safe = safe.encode(charset, errors)
|
||||
if isinstance(unsafe, text_type):
|
||||
unsafe = unsafe.encode(charset, errors)
|
||||
safe = frozenset(bytearray(safe) + _always_safe) - frozenset(bytearray(unsafe))
|
||||
rv = bytearray()
|
||||
for char in bytearray(string):
|
||||
if char in safe:
|
||||
rv.append(char)
|
||||
else:
|
||||
rv.extend(('%%%02X' % char).encode('ascii'))
|
||||
return to_native(bytes(rv))
|
||||
|
||||
|
||||
def url_quote_plus(string, charset='utf-8', errors='strict', safe=''):
|
||||
"""URL encode a single string with the given encoding and convert
|
||||
whitespace to "+".
|
||||
|
||||
:param s: The string to quote.
|
||||
:param charset: The charset to be used.
|
||||
:param safe: An optional sequence of safe characters.
|
||||
"""
|
||||
return url_quote(string, charset, errors, safe + ' ', '+').replace(' ', '+')
|
||||
|
||||
|
||||
def url_unparse(components):
|
||||
"""The reverse operation to :meth:`url_parse`. This accepts arbitrary
|
||||
as well as :class:`URL` tuples and returns a URL as a string.
|
||||
|
||||
:param components: the parsed URL as tuple which should be converted
|
||||
into a URL string.
|
||||
"""
|
||||
scheme, netloc, path, query, fragment = \
|
||||
normalize_string_tuple(components)
|
||||
s = make_literal_wrapper(scheme)
|
||||
url = s('')
|
||||
|
||||
# We generally treat file:///x and file:/x the same which is also
|
||||
# what browsers seem to do. This also allows us to ignore a schema
|
||||
# register for netloc utilization or having to differenciate between
|
||||
# empty and missing netloc.
|
||||
if netloc or (scheme and path.startswith(s('/'))):
|
||||
if path and path[:1] != s('/'):
|
||||
path = s('/') + path
|
||||
url = s('//') + (netloc or s('')) + path
|
||||
elif path:
|
||||
url += path
|
||||
if scheme:
|
||||
url = scheme + s(':') + url
|
||||
if query:
|
||||
url = url + s('?') + query
|
||||
if fragment:
|
||||
url = url + s('#') + fragment
|
||||
return url
|
||||
|
||||
|
||||
def url_unquote(string, charset='utf-8', errors='replace', unsafe=''):
|
||||
"""URL decode a single string with a given encoding. If the charset
|
||||
is set to `None` no unicode decoding is performed and raw bytes
|
||||
are returned.
|
||||
|
||||
:param s: the string to unquote.
|
||||
:param charset: the charset of the query string. If set to `None`
|
||||
no unicode decoding will take place.
|
||||
:param errors: the error handling for the charset decoding.
|
||||
"""
|
||||
rv = _unquote_to_bytes(string, unsafe)
|
||||
if charset is not None:
|
||||
rv = rv.decode(charset, errors)
|
||||
return rv
|
||||
|
||||
|
||||
def url_unquote_plus(s, charset='utf-8', errors='replace'):
|
||||
"""URL decode a single string with the given `charset` and decode "+" to
|
||||
whitespace.
|
||||
|
||||
Per default encoding errors are ignored. If you want a different behavior
|
||||
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
|
||||
:exc:`HTTPUnicodeError` is raised.
|
||||
|
||||
:param s: The string to unquote.
|
||||
:param charset: the charset of the query string. If set to `None`
|
||||
no unicode decoding will take place.
|
||||
:param errors: The error handling for the `charset` decoding.
|
||||
"""
|
||||
if isinstance(s, text_type):
|
||||
s = s.replace(u'+', u' ')
|
||||
else:
|
||||
s = s.replace(b'+', b' ')
|
||||
return url_unquote(s, charset, errors)
|
||||
|
||||
|
||||
def url_fix(s, charset='utf-8'):
|
||||
r"""Sometimes you get an URL by a user that just isn't a real URL because
|
||||
it contains unsafe characters like ' ' and so on. This function can fix
|
||||
some of the problems in a similar way browsers handle data entered by the
|
||||
user:
|
||||
|
||||
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
|
||||
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
|
||||
|
||||
:param s: the string with the URL to fix.
|
||||
:param charset: The target charset for the URL if the url was given as
|
||||
unicode string.
|
||||
"""
|
||||
scheme, netloc, path, qs, anchor = url_parse(to_unicode(s, charset, 'replace'))
|
||||
path = url_quote(path, charset, safe='/%+$!*\'(),')
|
||||
qs = url_quote_plus(qs, charset, safe=':&%=+$!*\'(),')
|
||||
return to_native(url_unparse((scheme, netloc, path, qs, anchor)))
|
||||
|
||||
|
||||
def uri_to_iri(uri, charset='utf-8', errors='replace'):
|
||||
r"""
|
||||
Converts a URI in a given charset to a IRI.
|
||||
|
||||
Examples for URI versus IRI:
|
||||
|
||||
>>> uri_to_iri(b'http://xn--n3h.net/')
|
||||
u'http://\u2603.net/'
|
||||
>>> uri_to_iri(b'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
|
||||
u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th'
|
||||
|
||||
Query strings are left unchanged:
|
||||
|
||||
>>> uri_to_iri('/?foo=24&x=%26%2f')
|
||||
u'/?foo=24&x=%26%2f'
|
||||
|
||||
.. versionadded:: 0.6
|
||||
|
||||
:param uri: The URI to convert.
|
||||
:param charset: The charset of the URI.
|
||||
:param errors: The error handling on decode.
|
||||
"""
|
||||
if isinstance(uri, tuple):
|
||||
uri = url_unparse(uri)
|
||||
uri = url_parse(to_unicode(uri, charset))
|
||||
path = url_unquote(uri.path, charset, errors, '%/;?')
|
||||
query = url_unquote(uri.query, charset, errors, '%;/?:@&=+,$')
|
||||
fragment = url_unquote(uri.fragment, charset, errors, '%;/?:@&=+,$')
|
||||
return url_unparse((uri.scheme, uri.decode_netloc(),
|
||||
path, query, fragment))
|
||||
|
||||
|
||||
def iri_to_uri(iri, charset='utf-8', errors='strict'):
|
||||
r"""
|
||||
Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always
|
||||
uses utf-8 URLs internally because this is what browsers and HTTP do as
|
||||
well. In some places where it accepts an URL it also accepts a unicode IRI
|
||||
and converts it into a URI.
|
||||
|
||||
Examples for IRI versus URI:
|
||||
|
||||
>>> iri_to_uri(u'http://☃.net/')
|
||||
'http://xn--n3h.net/'
|
||||
>>> iri_to_uri(u'http://üser:pässword@☃.net/påth')
|
||||
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'
|
||||
|
||||
.. versionadded:: 0.6
|
||||
|
||||
:param iri: The IRI to convert.
|
||||
:param charset: The charset for the URI.
|
||||
"""
|
||||
if isinstance(iri, tuple):
|
||||
iri = url_unparse(iri)
|
||||
iri = url_parse(to_unicode(iri, charset, errors))
|
||||
|
||||
netloc = iri.encode_netloc().decode('ascii')
|
||||
path = url_quote(iri.path, charset, errors, '/:~+%')
|
||||
query = url_quote(iri.query, charset, errors, '%&[]:;$*()+,!?*/=')
|
||||
fragment = url_quote(iri.fragment, charset, errors, '=%&[]:;$()+,!?*/')
|
||||
|
||||
return to_native(url_unparse((iri.scheme, netloc,
|
||||
path, query, fragment)))
|
||||
|
||||
|
||||
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True,
|
||||
errors='replace', separator='&', cls=None):
|
||||
"""
|
||||
Parse a querystring and return it as :class:`MultiDict`. There is a
|
||||
difference in key decoding on different Python versions. On Python 3
|
||||
keys will always be fully decoded whereas on Python 2, keys will
|
||||
remain bytestrings if they fit into ASCII. On 2.x keys can be forced
|
||||
to be unicode by setting `decode_keys` to `True`.
|
||||
|
||||
If the charset is set to `None` no unicode decoding will happen and
|
||||
raw bytes will be returned.
|
||||
|
||||
Per default a missing value for a key will default to an empty key. If
|
||||
you don't want that behavior you can set `include_empty` to `False`.
|
||||
|
||||
Per default encoding errors are ignored. If you want a different behavior
|
||||
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
|
||||
`HTTPUnicodeError` is raised.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
In previous versions ";" and "&" could be used for url decoding.
|
||||
This changed in 0.5 where only "&" is supported. If you want to
|
||||
use ";" instead a different `separator` can be provided.
|
||||
|
||||
The `cls` parameter was added.
|
||||
|
||||
:param s: a string with the query string to decode.
|
||||
:param charset: the charset of the query string. If set to `None`
|
||||
no unicode decoding will take place.
|
||||
:param decode_keys: Used on Python 2.x to control whether keys should
|
||||
be forced to be unicode objects. If set to `True`
|
||||
then keys will be unicode in all cases. Otherwise,
|
||||
they remain `str` if they fit into ASCII.
|
||||
:param include_empty: Set to `False` if you don't want empty values to
|
||||
appear in the dict.
|
||||
:param errors: the decoding error behavior.
|
||||
:param separator: the pair separator to be used, defaults to ``&``
|
||||
:param cls: an optional dict class to use. If this is not specified
|
||||
or `None` the default :class:`MultiDict` is used.
|
||||
"""
|
||||
if cls is None:
|
||||
cls = MultiDict
|
||||
if isinstance(s, text_type) and not isinstance(separator, text_type):
|
||||
separator = separator.decode(charset or 'ascii')
|
||||
elif isinstance(s, bytes) and not isinstance(separator, bytes):
|
||||
separator = separator.encode(charset or 'ascii')
|
||||
return cls(_url_decode_impl(s.split(separator), charset, decode_keys,
|
||||
include_empty, errors))
|
||||
|
||||
|
||||
def url_decode_stream(stream, charset='utf-8', decode_keys=False,
|
||||
include_empty=True, errors='replace', separator='&',
|
||||
cls=None, limit=None, return_iterator=False):
|
||||
"""Works like :func:`url_decode` but decodes a stream. The behavior
|
||||
of stream and limit follows functions like
|
||||
:func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is
|
||||
directly fed to the `cls` so you can consume the data while it's
|
||||
parsed.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
|
||||
:param stream: a stream with the encoded querystring
|
||||
:param charset: the charset of the query string. If set to `None`
|
||||
no unicode decoding will take place.
|
||||
:param decode_keys: Used on Python 2.x to control whether keys should
|
||||
be forced to be unicode objects. If set to `True`,
|
||||
keys will be unicode in all cases. Otherwise, they
|
||||
remain `str` if they fit into ASCII.
|
||||
:param include_empty: Set to `False` if you don't want empty values to
|
||||
appear in the dict.
|
||||
:param errors: the decoding error behavior.
|
||||
:param separator: the pair separator to be used, defaults to ``&``
|
||||
:param cls: an optional dict class to use. If this is not specified
|
||||
or `None` the default :class:`MultiDict` is used.
|
||||
:param limit: the content length of the URL data. Not necessary if
|
||||
a limited stream is provided.
|
||||
:param return_iterator: if set to `True` the `cls` argument is ignored
|
||||
and an iterator over all decoded pairs is
|
||||
returned
|
||||
"""
|
||||
from werkzeug.wsgi import make_chunk_iter
|
||||
if return_iterator:
|
||||
cls = lambda x: x
|
||||
elif cls is None:
|
||||
cls = MultiDict
|
||||
pair_iter = make_chunk_iter(stream, separator, limit)
|
||||
return cls(_url_decode_impl(pair_iter, charset, decode_keys,
|
||||
include_empty, errors))
|
||||
|
||||
|
||||
def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors):
|
||||
for pair in pair_iter:
|
||||
if not pair:
|
||||
continue
|
||||
s = make_literal_wrapper(pair)
|
||||
equal = s('=')
|
||||
if equal in pair:
|
||||
key, value = pair.split(equal, 1)
|
||||
else:
|
||||
if not include_empty:
|
||||
continue
|
||||
key = pair
|
||||
value = s('')
|
||||
key = url_unquote_plus(key, charset, errors)
|
||||
if charset is not None and PY2 and not decode_keys:
|
||||
key = try_coerce_native(key)
|
||||
yield key, url_unquote_plus(value, charset, errors)
|
||||
|
||||
|
||||
def url_encode(obj, charset='utf-8', encode_keys=False, sort=False, key=None,
|
||||
separator=b'&'):
|
||||
"""URL encode a dict/`MultiDict`. If a value is `None` it will not appear
|
||||
in the result string. Per default only values are encoded into the target
|
||||
charset strings. If `encode_keys` is set to ``True`` unicode keys are
|
||||
supported too.
|
||||
|
||||
If `sort` is set to `True` the items are sorted by `key` or the default
|
||||
sorting algorithm.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
`sort`, `key`, and `separator` were added.
|
||||
|
||||
:param obj: the object to encode into a query string.
|
||||
:param charset: the charset of the query string.
|
||||
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
|
||||
Python 3.x)
|
||||
:param sort: set to `True` if you want parameters to be sorted by `key`.
|
||||
:param separator: the separator to be used for the pairs.
|
||||
:param key: an optional function to be used for sorting. For more details
|
||||
check out the :func:`sorted` documentation.
|
||||
"""
|
||||
separator = to_native(separator, 'ascii')
|
||||
return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key))
|
||||
|
||||
|
||||
def url_encode_stream(obj, stream=None, charset='utf-8', encode_keys=False,
|
||||
sort=False, key=None, separator=b'&'):
|
||||
"""Like :meth:`url_encode` but writes the results to a stream
|
||||
object. If the stream is `None` a generator over all encoded
|
||||
pairs is returned.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
|
||||
:param obj: the object to encode into a query string.
|
||||
:param stream: a stream to write the encoded object into or `None` if
|
||||
an iterator over the encoded pairs should be returned. In
|
||||
that case the separator argument is ignored.
|
||||
:param charset: the charset of the query string.
|
||||
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
|
||||
Python 3.x)
|
||||
:param sort: set to `True` if you want parameters to be sorted by `key`.
|
||||
:param separator: the separator to be used for the pairs.
|
||||
:param key: an optional function to be used for sorting. For more details
|
||||
check out the :func:`sorted` documentation.
|
||||
"""
|
||||
separator = to_native(separator, 'ascii')
|
||||
gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
|
||||
if stream is None:
|
||||
return gen
|
||||
for idx, chunk in enumerate(gen):
|
||||
if idx:
|
||||
stream.write(separator)
|
||||
stream.write(chunk)
|
||||
|
||||
|
||||
def url_join(base, url, allow_fragments=True):
|
||||
"""Join a base URL and a possibly relative URL to form an absolute
|
||||
interpretation of the latter.
|
||||
|
||||
:param base: the base URL for the join operation.
|
||||
:param url: the URL to join.
|
||||
:param allow_fragments: indicates whether fragments should be allowed.
|
||||
"""
|
||||
if isinstance(base, tuple):
|
||||
base = url_unparse(base)
|
||||
if isinstance(url, tuple):
|
||||
url = url_unparse(url)
|
||||
|
||||
base, url = normalize_string_tuple((base, url))
|
||||
s = make_literal_wrapper(base)
|
||||
|
||||
if not base:
|
||||
return url
|
||||
if not url:
|
||||
return base
|
||||
|
||||
bscheme, bnetloc, bpath, bquery, bfragment = \
|
||||
url_parse(base, allow_fragments=allow_fragments)
|
||||
scheme, netloc, path, query, fragment = \
|
||||
url_parse(url, bscheme, allow_fragments)
|
||||
if scheme != bscheme:
|
||||
return url
|
||||
if netloc:
|
||||
return url_unparse((scheme, netloc, path, query, fragment))
|
||||
netloc = bnetloc
|
||||
|
||||
if path[:1] == s('/'):
|
||||
segments = path.split(s('/'))
|
||||
elif not path:
|
||||
segments = bpath.split(s('/'))
|
||||
if not query:
|
||||
query = bquery
|
||||
else:
|
||||
segments = bpath.split(s('/'))[:-1] + path.split(s('/'))
|
||||
|
||||
# If the rightmost part is "./" we want to keep the slash but
|
||||
# remove the dot.
|
||||
if segments[-1] == s('.'):
|
||||
segments[-1] = s('')
|
||||
|
||||
# Resolve ".." and "."
|
||||
segments = [segment for segment in segments if segment != s('.')]
|
||||
while 1:
|
||||
i = 1
|
||||
n = len(segments) - 1
|
||||
while i < n:
|
||||
if segments[i] == s('..') and \
|
||||
segments[i - 1] not in (s(''), s('..')):
|
||||
del segments[i - 1:i + 1]
|
||||
break
|
||||
i += 1
|
||||
else:
|
||||
break
|
||||
|
||||
# Remove trailing ".." if the URL is absolute
|
||||
unwanted_marker = [s(''), s('..')]
|
||||
while segments[:2] == unwanted_marker:
|
||||
del segments[1]
|
||||
|
||||
path = s('/').join(segments)
|
||||
return url_unparse((scheme, netloc, path, query, fragment))
|
||||
|
||||
|
||||
class Href(object):
|
||||
"""Implements a callable that constructs URLs with the given base. The
|
||||
function can be called with any number of positional and keyword
|
||||
arguments which than are used to assemble the URL. Works with URLs
|
||||
and posix paths.
|
||||
|
||||
Positional arguments are appended as individual segments to
|
||||
the path of the URL:
|
||||
|
||||
>>> href = Href('/foo')
|
||||
>>> href('bar', 23)
|
||||
'/foo/bar/23'
|
||||
>>> href('foo', bar=23)
|
||||
'/foo/foo?bar=23'
|
||||
|
||||
If any of the arguments (positional or keyword) evaluates to `None` it
|
||||
will be skipped. If no keyword arguments are given the last argument
|
||||
can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
|
||||
otherwise the keyword arguments are used for the query parameters, cutting
|
||||
off the first trailing underscore of the parameter name:
|
||||
|
||||
>>> href(is_=42)
|
||||
'/foo?is=42'
|
||||
>>> href({'foo': 'bar'})
|
||||
'/foo?foo=bar'
|
||||
|
||||
Combining of both methods is not allowed:
|
||||
|
||||
>>> href({'foo': 'bar'}, bar=42)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
TypeError: keyword arguments and query-dicts can't be combined
|
||||
|
||||
Accessing attributes on the href object creates a new href object with
|
||||
the attribute name as prefix:
|
||||
|
||||
>>> bar_href = href.bar
|
||||
>>> bar_href("blub")
|
||||
'/foo/bar/blub'
|
||||
|
||||
If `sort` is set to `True` the items are sorted by `key` or the default
|
||||
sorting algorithm:
|
||||
|
||||
>>> href = Href("/", sort=True)
|
||||
>>> href(a=1, b=2, c=3)
|
||||
'/?a=1&b=2&c=3'
|
||||
|
||||
.. versionadded:: 0.5
|
||||
`sort` and `key` were added.
|
||||
"""
|
||||
|
||||
def __init__(self, base='./', charset='utf-8', sort=False, key=None):
|
||||
if not base:
|
||||
base = './'
|
||||
self.base = base
|
||||
self.charset = charset
|
||||
self.sort = sort
|
||||
self.key = key
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name[:2] == '__':
|
||||
raise AttributeError(name)
|
||||
base = self.base
|
||||
if base[-1:] != '/':
|
||||
base += '/'
|
||||
return Href(url_join(base, name), self.charset, self.sort, self.key)
|
||||
|
||||
def __call__(self, *path, **query):
|
||||
if path and isinstance(path[-1], dict):
|
||||
if query:
|
||||
raise TypeError('keyword arguments and query-dicts '
|
||||
'can\'t be combined')
|
||||
query, path = path[-1], path[:-1]
|
||||
elif query:
|
||||
query = dict([(k.endswith('_') and k[:-1] or k, v)
|
||||
for k, v in query.items()])
|
||||
path = '/'.join([to_unicode(url_quote(x, self.charset), 'ascii')
|
||||
for x in path if x is not None]).lstrip('/')
|
||||
rv = self.base
|
||||
if path:
|
||||
if not rv.endswith('/'):
|
||||
rv += '/'
|
||||
rv = url_join(rv, './' + path)
|
||||
if query:
|
||||
rv += '?' + to_unicode(url_encode(query, self.charset, sort=self.sort,
|
||||
key=self.key), 'ascii')
|
||||
return to_native(rv)
|
||||
190
Linux_x86_64/lib/python2.7/site-packages/werkzeug/useragents.py
Normal file
|
|
@ -0,0 +1,190 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.useragents
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module provides a helper to inspect user agent strings. This module
|
||||
is far from complete but should work for most of the currently available
|
||||
browsers.
|
||||
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
|
||||
|
||||
class UserAgentParser(object):
|
||||
"""A simple user agent parser. Used by the `UserAgent`."""
|
||||
|
||||
platforms = (
|
||||
('iphone|ios', 'iphone'),
|
||||
('ipad', 'ipad'),
|
||||
(r'darwin|mac|os\s*x', 'macos'),
|
||||
('win', 'windows'),
|
||||
(r'android', 'android'),
|
||||
(r'x11|lin(\b|ux)?', 'linux'),
|
||||
('(sun|i86)os', 'solaris'),
|
||||
(r'nintendo\s+wii', 'wii'),
|
||||
('irix', 'irix'),
|
||||
('hp-?ux', 'hpux'),
|
||||
('aix', 'aix'),
|
||||
('sco|unix_sv', 'sco'),
|
||||
('bsd', 'bsd'),
|
||||
('amiga', 'amiga'),
|
||||
('blackberry|playbook', 'blackberry')
|
||||
)
|
||||
browsers = (
|
||||
('googlebot', 'google'),
|
||||
('msnbot', 'msn'),
|
||||
('yahoo', 'yahoo'),
|
||||
('ask jeeves', 'ask'),
|
||||
(r'aol|america\s+online\s+browser', 'aol'),
|
||||
('opera', 'opera'),
|
||||
('chrome', 'chrome'),
|
||||
('firefox|firebird|phoenix|iceweasel', 'firefox'),
|
||||
('galeon', 'galeon'),
|
||||
('safari', 'safari'),
|
||||
('webkit', 'webkit'),
|
||||
('camino', 'camino'),
|
||||
('konqueror', 'konqueror'),
|
||||
('k-meleon', 'kmeleon'),
|
||||
('netscape', 'netscape'),
|
||||
(r'msie|microsoft\s+internet\s+explorer', 'msie'),
|
||||
('lynx', 'lynx'),
|
||||
('links', 'links'),
|
||||
('seamonkey|mozilla', 'seamonkey')
|
||||
)
|
||||
|
||||
_browser_version_re = r'(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?(?i)'
|
||||
_language_re = re.compile(
|
||||
r'(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|'
|
||||
r'(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)'
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
|
||||
self.browsers = [(b, re.compile(self._browser_version_re % a))
|
||||
for a, b in self.browsers]
|
||||
|
||||
def __call__(self, user_agent):
|
||||
for platform, regex in self.platforms:
|
||||
match = regex.search(user_agent)
|
||||
if match is not None:
|
||||
break
|
||||
else:
|
||||
platform = None
|
||||
for browser, regex in self.browsers:
|
||||
match = regex.search(user_agent)
|
||||
if match is not None:
|
||||
version = match.group(1)
|
||||
break
|
||||
else:
|
||||
browser = version = None
|
||||
match = self._language_re.search(user_agent)
|
||||
if match is not None:
|
||||
language = match.group(1) or match.group(2)
|
||||
else:
|
||||
language = None
|
||||
return platform, browser, version, language
|
||||
|
||||
|
||||
class UserAgent(object):
|
||||
"""Represents a user agent. Pass it a WSGI environment or a user agent
|
||||
string and you can inspect some of the details from the user agent
|
||||
string via the attributes. The following attributes exist:
|
||||
|
||||
.. attribute:: string
|
||||
|
||||
the raw user agent string
|
||||
|
||||
.. attribute:: platform
|
||||
|
||||
the browser platform. The following platforms are currently
|
||||
recognized:
|
||||
|
||||
- `aix`
|
||||
- `amiga`
|
||||
- `android`
|
||||
- `bsd`
|
||||
- `hpux`
|
||||
- `iphone`
|
||||
- `ipad`
|
||||
- `irix`
|
||||
- `linux`
|
||||
- `macos`
|
||||
- `sco`
|
||||
- `solaris`
|
||||
- `wii`
|
||||
- `windows`
|
||||
|
||||
.. attribute:: browser
|
||||
|
||||
the name of the browser. The following browsers are currently
|
||||
recognized:
|
||||
|
||||
- `aol` *
|
||||
- `ask` *
|
||||
- `camino`
|
||||
- `chrome`
|
||||
- `firefox`
|
||||
- `galeon`
|
||||
- `google` *
|
||||
- `kmeleon`
|
||||
- `konqueror`
|
||||
- `links`
|
||||
- `lynx`
|
||||
- `msie`
|
||||
- `msn`
|
||||
- `netscape`
|
||||
- `opera`
|
||||
- `safari`
|
||||
- `seamonkey`
|
||||
- `webkit`
|
||||
- `yahoo` *
|
||||
|
||||
(Browsers maked with a star (``*``) are crawlers.)
|
||||
|
||||
.. attribute:: version
|
||||
|
||||
the version of the browser
|
||||
|
||||
.. attribute:: language
|
||||
|
||||
the language of the browser
|
||||
"""
|
||||
|
||||
_parser = UserAgentParser()
|
||||
|
||||
def __init__(self, environ_or_string):
|
||||
if isinstance(environ_or_string, dict):
|
||||
environ_or_string = environ_or_string.get('HTTP_USER_AGENT', '')
|
||||
self.string = environ_or_string
|
||||
self.platform, self.browser, self.version, self.language = \
|
||||
self._parser(environ_or_string)
|
||||
|
||||
def to_header(self):
|
||||
return self.string
|
||||
|
||||
def __str__(self):
|
||||
return self.string
|
||||
|
||||
def __nonzero__(self):
|
||||
return bool(self.browser)
|
||||
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %r/%s>' % (
|
||||
self.__class__.__name__,
|
||||
self.browser,
|
||||
self.version
|
||||
)
|
||||
|
||||
|
||||
# conceptionally this belongs in this module but because we want to lazily
|
||||
# load the user agent module (which happens in wrappers.py) we have to import
|
||||
# it afterwards. The class itself has the module set to this module so
|
||||
# pickle, inspect and similar modules treat the object as if it was really
|
||||
# implemented here.
|
||||
from werkzeug.wrappers import UserAgentMixin
|
||||
611
Linux_x86_64/lib/python2.7/site-packages/werkzeug/utils.py
Normal file
|
|
@ -0,0 +1,611 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.utils
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
This module implements various utilities for WSGI applications. Most of
|
||||
them are used by the request and response wrappers but especially for
|
||||
middleware development it makes sense to use them without the wrappers.
|
||||
|
||||
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
|
||||
:license: BSD, see LICENSE for more details.
|
||||
"""
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import pkgutil
|
||||
try:
|
||||
from html.entities import name2codepoint
|
||||
except ImportError:
|
||||
from htmlentitydefs import name2codepoint
|
||||
|
||||
from werkzeug._compat import unichr, text_type, string_types, iteritems, \
|
||||
reraise, PY2
|
||||
from werkzeug._internal import _DictAccessorProperty, \
|
||||
_parse_signature, _missing
|
||||
|
||||
|
||||
_format_re = re.compile(r'\$(?:(%s)|\{(%s)\})' % (('[a-zA-Z_][a-zA-Z0-9_]*',) * 2))
|
||||
_entity_re = re.compile(r'&([^;]+);')
|
||||
_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
|
||||
_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1',
|
||||
'LPT2', 'LPT3', 'PRN', 'NUL')
|
||||
|
||||
|
||||
class cached_property(object):
|
||||
"""A decorator that converts a function into a lazy property. The
|
||||
function wrapped is called the first time to retrieve the result
|
||||
and then that calculated result is used the next time you access
|
||||
the value::
|
||||
|
||||
class Foo(object):
|
||||
|
||||
@cached_property
|
||||
def foo(self):
|
||||
# calculate something important here
|
||||
return 42
|
||||
|
||||
The class has to have a `__dict__` in order for this property to
|
||||
work.
|
||||
"""
|
||||
|
||||
# implementation detail: this property is implemented as non-data
|
||||
# descriptor. non-data descriptors are only invoked if there is
|
||||
# no entry with the same name in the instance's __dict__.
|
||||
# this allows us to completely get rid of the access function call
|
||||
# overhead. If one choses to invoke __get__ by hand the property
|
||||
# will still work as expected because the lookup logic is replicated
|
||||
# in __get__ for manual invocation.
|
||||
|
||||
def __init__(self, func, name=None, doc=None):
|
||||
self.__name__ = name or func.__name__
|
||||
self.__module__ = func.__module__
|
||||
self.__doc__ = doc or func.__doc__
|
||||
self.func = func
|
||||
|
||||
def __get__(self, obj, type=None):
|
||||
if obj is None:
|
||||
return self
|
||||
value = obj.__dict__.get(self.__name__, _missing)
|
||||
if value is _missing:
|
||||
value = self.func(obj)
|
||||
obj.__dict__[self.__name__] = value
|
||||
return value
|
||||
|
||||
|
||||
class environ_property(_DictAccessorProperty):
|
||||
"""Maps request attributes to environment variables. This works not only
|
||||
for the Werzeug request object, but also any other class with an
|
||||
environ attribute:
|
||||
|
||||
>>> class Test(object):
|
||||
... environ = {'key': 'value'}
|
||||
... test = environ_property('key')
|
||||
>>> var = Test()
|
||||
>>> var.test
|
||||
'value'
|
||||
|
||||
If you pass it a second value it's used as default if the key does not
|
||||
exist, the third one can be a converter that takes a value and converts
|
||||
it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value
|
||||
is used. If no default value is provided `None` is used.
|
||||
|
||||
Per default the property is read only. You have to explicitly enable it
|
||||
by passing ``read_only=False`` to the constructor.
|
||||
"""
|
||||
|
||||
read_only = True
|
||||
|
||||
def lookup(self, obj):
|
||||
return obj.environ
|
||||
|
||||
|
||||
class header_property(_DictAccessorProperty):
|
||||
"""Like `environ_property` but for headers."""
|
||||
|
||||
def lookup(self, obj):
|
||||
return obj.headers
|
||||
|
||||
|
||||
class HTMLBuilder(object):
|
||||
"""Helper object for HTML generation.
|
||||
|
||||
Per default there are two instances of that class. The `html` one, and
|
||||
the `xhtml` one for those two dialects. The class uses keyword parameters
|
||||
and positional parameters to generate small snippets of HTML.
|
||||
|
||||
Keyword parameters are converted to XML/SGML attributes, positional
|
||||
arguments are used as children. Because Python accepts positional
|
||||
arguments before keyword arguments it's a good idea to use a list with the
|
||||
star-syntax for some children:
|
||||
|
||||
>>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
|
||||
... html.a('bar', href='bar.html')])
|
||||
u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
|
||||
|
||||
This class works around some browser limitations and can not be used for
|
||||
arbitrary SGML/XML generation. For that purpose lxml and similar
|
||||
libraries exist.
|
||||
|
||||
Calling the builder escapes the string passed:
|
||||
|
||||
>>> html.p(html("<foo>"))
|
||||
u'<p><foo></p>'
|
||||
"""
|
||||
|
||||
_entity_re = re.compile(r'&([^;]+);')
|
||||
_entities = name2codepoint.copy()
|
||||
_entities['apos'] = 39
|
||||
_empty_elements = set([
|
||||
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
|
||||
'hr', 'img', 'input', 'keygen', 'isindex', 'link', 'meta', 'param',
|
||||
'source', 'wbr'
|
||||
])
|
||||
_boolean_attributes = set([
|
||||
'selected', 'checked', 'compact', 'declare', 'defer', 'disabled',
|
||||
'ismap', 'multiple', 'nohref', 'noresize', 'noshade', 'nowrap'
|
||||
])
|
||||
_plaintext_elements = set(['textarea'])
|
||||
_c_like_cdata = set(['script', 'style'])
|
||||
|
||||
def __init__(self, dialect):
|
||||
self._dialect = dialect
|
||||
|
||||
def __call__(self, s):
|
||||
return escape(s)
|
||||
|
||||
def __getattr__(self, tag):
|
||||
if tag[:2] == '__':
|
||||
raise AttributeError(tag)
|
||||
def proxy(*children, **arguments):
|
||||
buffer = '<' + tag
|
||||
for key, value in iteritems(arguments):
|
||||
if value is None:
|
||||
continue
|
||||
if key[-1] == '_':
|
||||
key = key[:-1]
|
||||
if key in self._boolean_attributes:
|
||||
if not value:
|
||||
continue
|
||||
if self._dialect == 'xhtml':
|
||||
value = '="' + key + '"'
|
||||
else:
|
||||
value = ''
|
||||
else:
|
||||
value = '="' + escape(value) + '"'
|
||||
buffer += ' ' + key + value
|
||||
if not children and tag in self._empty_elements:
|
||||
if self._dialect == 'xhtml':
|
||||
buffer += ' />'
|
||||
else:
|
||||
buffer += '>'
|
||||
return buffer
|
||||
buffer += '>'
|
||||
|
||||
children_as_string = ''.join([text_type(x) for x in children
|
||||
if x is not None])
|
||||
|
||||
if children_as_string:
|
||||
if tag in self._plaintext_elements:
|
||||
children_as_string = escape(children_as_string)
|
||||
elif tag in self._c_like_cdata and self._dialect == 'xhtml':
|
||||
children_as_string = '/*<![CDATA[*/' + \
|
||||
children_as_string + '/*]]>*/'
|
||||
buffer += children_as_string + '</' + tag + '>'
|
||||
return buffer
|
||||
return proxy
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s for %r>' % (
|
||||
self.__class__.__name__,
|
||||
self._dialect
|
||||
)
|
||||
|
||||
|
||||
html = HTMLBuilder('html')
|
||||
xhtml = HTMLBuilder('xhtml')
|
||||
|
||||
|
||||
def get_content_type(mimetype, charset):
|
||||
"""Return the full content type string with charset for a mimetype.
|
||||
|
||||
If the mimetype represents text the charset will be appended as charset
|
||||
parameter, otherwise the mimetype is returned unchanged.
|
||||
|
||||
:param mimetype: the mimetype to be used as content type.
|
||||
:param charset: the charset to be appended in case it was a text mimetype.
|
||||
:return: the content type.
|
||||
"""
|
||||
if mimetype.startswith('text/') or \
|
||||
mimetype == 'application/xml' or \
|
||||
(mimetype.startswith('application/') and
|
||||
mimetype.endswith('+xml')):
|
||||
mimetype += '; charset=' + charset
|
||||
return mimetype
|
||||
|
||||
|
||||
def format_string(string, context):
|
||||
"""String-template format a string:
|
||||
|
||||
>>> format_string('$foo and ${foo}s', dict(foo=42))
|
||||
'42 and 42s'
|
||||
|
||||
This does not do any attribute lookup etc. For more advanced string
|
||||
formattings have a look at the `werkzeug.template` module.
|
||||
|
||||
:param string: the format string.
|
||||
:param context: a dict with the variables to insert.
|
||||
"""
|
||||
def lookup_arg(match):
|
||||
x = context[match.group(1) or match.group(2)]
|
||||
if not isinstance(x, string_types):
|
||||
x = type(string)(x)
|
||||
return x
|
||||
return _format_re.sub(lookup_arg, string)
|
||||
|
||||
|
||||
def secure_filename(filename):
|
||||
r"""Pass it a filename and it will return a secure version of it. This
|
||||
filename can then safely be stored on a regular file system and passed
|
||||
to :func:`os.path.join`. The filename returned is an ASCII only string
|
||||
for maximum portability.
|
||||
|
||||
On windows system the function also makes sure that the file is not
|
||||
named after one of the special device files.
|
||||
|
||||
>>> secure_filename("My cool movie.mov")
|
||||
'My_cool_movie.mov'
|
||||
>>> secure_filename("../../../etc/passwd")
|
||||
'etc_passwd'
|
||||
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
|
||||
'i_contain_cool_umlauts.txt'
|
||||
|
||||
The function might return an empty filename. It's your responsibility
|
||||
to ensure that the filename is unique and that you generate random
|
||||
filename if the function returned an empty one.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:param filename: the filename to secure
|
||||
"""
|
||||
if isinstance(filename, text_type):
|
||||
from unicodedata import normalize
|
||||
filename = normalize('NFKD', filename).encode('ascii', 'ignore')
|
||||
if not PY2:
|
||||
filename = filename.decode('ascii')
|
||||
for sep in os.path.sep, os.path.altsep:
|
||||
if sep:
|
||||
filename = filename.replace(sep, ' ')
|
||||
filename = str(_filename_ascii_strip_re.sub('', '_'.join(
|
||||
filename.split()))).strip('._')
|
||||
|
||||
# on nt a couple of special files are present in each folder. We
|
||||
# have to ensure that the target file is not such a filename. In
|
||||
# this case we prepend an underline
|
||||
if os.name == 'nt' and filename and \
|
||||
filename.split('.')[0].upper() in _windows_device_files:
|
||||
filename = '_' + filename
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def escape(s, quote=None):
|
||||
"""Replace special characters "&", "<", ">" and (") to HTML-safe sequences.
|
||||
|
||||
There is a special handling for `None` which escapes to an empty string.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
`quote` is now implicitly on.
|
||||
|
||||
:param s: the string to escape.
|
||||
:param quote: ignored.
|
||||
"""
|
||||
if s is None:
|
||||
return ''
|
||||
elif hasattr(s, '__html__'):
|
||||
return text_type(s.__html__())
|
||||
elif not isinstance(s, string_types):
|
||||
s = text_type(s)
|
||||
if quote is not None:
|
||||
from warnings import warn
|
||||
warn(DeprecationWarning('quote parameter is implicit now'), stacklevel=2)
|
||||
s = s.replace('&', '&').replace('<', '<') \
|
||||
.replace('>', '>').replace('"', """)
|
||||
return s
|
||||
|
||||
|
||||
def unescape(s):
|
||||
"""The reverse function of `escape`. This unescapes all the HTML
|
||||
entities, not only the XML entities inserted by `escape`.
|
||||
|
||||
:param s: the string to unescape.
|
||||
"""
|
||||
def handle_match(m):
|
||||
name = m.group(1)
|
||||
if name in HTMLBuilder._entities:
|
||||
return unichr(HTMLBuilder._entities[name])
|
||||
try:
|
||||
if name[:2] in ('#x', '#X'):
|
||||
return unichr(int(name[2:], 16))
|
||||
elif name.startswith('#'):
|
||||
return unichr(int(name[1:]))
|
||||
except ValueError:
|
||||
pass
|
||||
return u''
|
||||
return _entity_re.sub(handle_match, s)
|
||||
|
||||
|
||||
def redirect(location, code=302):
|
||||
"""Return a response object (a WSGI application) that, if called,
|
||||
redirects the client to the target location. Supported codes are 301,
|
||||
302, 303, 305, and 307. 300 is not supported because it's not a real
|
||||
redirect and 304 because it's the answer for a request with a request
|
||||
with defined If-Modified-Since headers.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
The location can now be a unicode string that is encoded using
|
||||
the :func:`iri_to_uri` function.
|
||||
|
||||
:param location: the location the response should redirect to.
|
||||
:param code: the redirect status code. defaults to 302.
|
||||
"""
|
||||
from werkzeug.wrappers import Response
|
||||
display_location = escape(location)
|
||||
if isinstance(location, text_type):
|
||||
from werkzeug.urls import iri_to_uri
|
||||
location = iri_to_uri(location)
|
||||
response = Response(
|
||||
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
|
||||
'<title>Redirecting...</title>\n'
|
||||
'<h1>Redirecting...</h1>\n'
|
||||
'<p>You should be redirected automatically to target URL: '
|
||||
'<a href="%s">%s</a>. If not click the link.' %
|
||||
(escape(location), display_location), code, mimetype='text/html')
|
||||
response.headers['Location'] = location
|
||||
return response
|
||||
|
||||
|
||||
def append_slash_redirect(environ, code=301):
|
||||
"""Redirect to the same URL but with a slash appended. The behavior
|
||||
of this function is undefined if the path ends with a slash already.
|
||||
|
||||
:param environ: the WSGI environment for the request that triggers
|
||||
the redirect.
|
||||
:param code: the status code for the redirect.
|
||||
"""
|
||||
new_path = environ['PATH_INFO'].strip('/') + '/'
|
||||
query_string = environ.get('QUERY_STRING')
|
||||
if query_string:
|
||||
new_path += '?' + query_string
|
||||
return redirect(new_path, code)
|
||||
|
||||
|
||||
def import_string(import_name, silent=False):
|
||||
"""Imports an object based on a string. This is useful if you want to
|
||||
use import paths as endpoints or something similar. An import path can
|
||||
be specified either in dotted notation (``xml.sax.saxutils.escape``)
|
||||
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
|
||||
|
||||
If `silent` is True the return value will be `None` if the import fails.
|
||||
|
||||
:param import_name: the dotted name for the object to import.
|
||||
:param silent: if set to `True` import errors are ignored and
|
||||
`None` is returned instead.
|
||||
:return: imported object
|
||||
"""
|
||||
#XXX: py3 review needed
|
||||
assert isinstance(import_name, string_types)
|
||||
# force the import name to automatically convert to strings
|
||||
import_name = str(import_name)
|
||||
try:
|
||||
if ':' in import_name:
|
||||
module, obj = import_name.split(':', 1)
|
||||
elif '.' in import_name:
|
||||
module, obj = import_name.rsplit('.', 1)
|
||||
else:
|
||||
return __import__(import_name)
|
||||
# __import__ is not able to handle unicode strings in the fromlist
|
||||
# if the module is a package
|
||||
if PY2 and isinstance(obj, unicode):
|
||||
obj = obj.encode('utf-8')
|
||||
try:
|
||||
return getattr(__import__(module, None, None, [obj]), obj)
|
||||
except (ImportError, AttributeError):
|
||||
# support importing modules not yet set up by the parent module
|
||||
# (or package for that matter)
|
||||
modname = module + '.' + obj
|
||||
__import__(modname)
|
||||
return sys.modules[modname]
|
||||
except ImportError as e:
|
||||
if not silent:
|
||||
reraise(
|
||||
ImportStringError,
|
||||
ImportStringError(import_name, e),
|
||||
sys.exc_info()[2])
|
||||
|
||||
|
||||
def find_modules(import_path, include_packages=False, recursive=False):
|
||||
"""Find all the modules below a package. This can be useful to
|
||||
automatically import all views / controllers so that their metaclasses /
|
||||
function decorators have a chance to register themselves on the
|
||||
application.
|
||||
|
||||
Packages are not returned unless `include_packages` is `True`. This can
|
||||
also recursively list modules but in that case it will import all the
|
||||
packages to get the correct load path of that module.
|
||||
|
||||
:param import_name: the dotted name for the package to find child modules.
|
||||
:param include_packages: set to `True` if packages should be returned, too.
|
||||
:param recursive: set to `True` if recursion should happen.
|
||||
:return: generator
|
||||
"""
|
||||
module = import_string(import_path)
|
||||
path = getattr(module, '__path__', None)
|
||||
if path is None:
|
||||
raise ValueError('%r is not a package' % import_path)
|
||||
basename = module.__name__ + '.'
|
||||
for importer, modname, ispkg in pkgutil.iter_modules(path):
|
||||
modname = basename + modname
|
||||
if ispkg:
|
||||
if include_packages:
|
||||
yield modname
|
||||
if recursive:
|
||||
for item in find_modules(modname, include_packages, True):
|
||||
yield item
|
||||
else:
|
||||
yield modname
|
||||
|
||||
|
||||
def validate_arguments(func, args, kwargs, drop_extra=True):
|
||||
"""Check if the function accepts the arguments and keyword arguments.
|
||||
Returns a new ``(args, kwargs)`` tuple that can safely be passed to
|
||||
the function without causing a `TypeError` because the function signature
|
||||
is incompatible. If `drop_extra` is set to `True` (which is the default)
|
||||
any extra positional or keyword arguments are dropped automatically.
|
||||
|
||||
The exception raised provides three attributes:
|
||||
|
||||
`missing`
|
||||
A set of argument names that the function expected but where
|
||||
missing.
|
||||
|
||||
`extra`
|
||||
A dict of keyword arguments that the function can not handle but
|
||||
where provided.
|
||||
|
||||
`extra_positional`
|
||||
A list of values that where given by positional argument but the
|
||||
function cannot accept.
|
||||
|
||||
This can be useful for decorators that forward user submitted data to
|
||||
a view function::
|
||||
|
||||
from werkzeug.utils import ArgumentValidationError, validate_arguments
|
||||
|
||||
def sanitize(f):
|
||||
def proxy(request):
|
||||
data = request.values.to_dict()
|
||||
try:
|
||||
args, kwargs = validate_arguments(f, (request,), data)
|
||||
except ArgumentValidationError:
|
||||
raise BadRequest('The browser failed to transmit all '
|
||||
'the data expected.')
|
||||
return f(*args, **kwargs)
|
||||
return proxy
|
||||
|
||||
:param func: the function the validation is performed against.
|
||||
:param args: a tuple of positional arguments.
|
||||
:param kwargs: a dict of keyword arguments.
|
||||
:param drop_extra: set to `False` if you don't want extra arguments
|
||||
to be silently dropped.
|
||||
:return: tuple in the form ``(args, kwargs)``.
|
||||
"""
|
||||
parser = _parse_signature(func)
|
||||
args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
|
||||
if missing:
|
||||
raise ArgumentValidationError(tuple(missing))
|
||||
elif (extra or extra_positional) and not drop_extra:
|
||||
raise ArgumentValidationError(None, extra, extra_positional)
|
||||
return tuple(args), kwargs
|
||||
|
||||
|
||||
def bind_arguments(func, args, kwargs):
|
||||
"""Bind the arguments provided into a dict. When passed a function,
|
||||
a tuple of arguments and a dict of keyword arguments `bind_arguments`
|
||||
returns a dict of names as the function would see it. This can be useful
|
||||
to implement a cache decorator that uses the function arguments to build
|
||||
the cache key based on the values of the arguments.
|
||||
|
||||
:param func: the function the arguments should be bound for.
|
||||
:param args: tuple of positional arguments.
|
||||
:param kwargs: a dict of keyword arguments.
|
||||
:return: a :class:`dict` of bound keyword arguments.
|
||||
"""
|
||||
args, kwargs, missing, extra, extra_positional, \
|
||||
arg_spec, vararg_var, kwarg_var = _parse_signature(func)(args, kwargs)
|
||||
values = {}
|
||||
for (name, has_default, default), value in zip(arg_spec, args):
|
||||
values[name] = value
|
||||
if vararg_var is not None:
|
||||
values[vararg_var] = tuple(extra_positional)
|
||||
elif extra_positional:
|
||||
raise TypeError('too many positional arguments')
|
||||
if kwarg_var is not None:
|
||||
multikw = set(extra) & set([x[0] for x in arg_spec])
|
||||
if multikw:
|
||||
raise TypeError('got multiple values for keyword argument ' +
|
||||
repr(next(iter(multikw))))
|
||||
values[kwarg_var] = extra
|
||||
elif extra:
|
||||
raise TypeError('got unexpected keyword argument ' +
|
||||
repr(next(iter(extra))))
|
||||
return values
|
||||
|
||||
|
||||
class ArgumentValidationError(ValueError):
|
||||
"""Raised if :func:`validate_arguments` fails to validate"""
|
||||
|
||||
def __init__(self, missing=None, extra=None, extra_positional=None):
|
||||
self.missing = set(missing or ())
|
||||
self.extra = extra or {}
|
||||
self.extra_positional = extra_positional or []
|
||||
ValueError.__init__(self, 'function arguments invalid. ('
|
||||
'%d missing, %d additional)' % (
|
||||
len(self.missing),
|
||||
len(self.extra) + len(self.extra_positional)
|
||||
))
|
||||
|
||||
|
||||
class ImportStringError(ImportError):
|
||||
"""Provides information about a failed :func:`import_string` attempt."""
|
||||
|
||||
#: String in dotted notation that failed to be imported.
|
||||
import_name = None
|
||||
#: Wrapped exception.
|
||||
exception = None
|
||||
|
||||
def __init__(self, import_name, exception):
|
||||
self.import_name = import_name
|
||||
self.exception = exception
|
||||
|
||||
msg = (
|
||||
'import_string() failed for %r. Possible reasons are:\n\n'
|
||||
'- missing __init__.py in a package;\n'
|
||||
'- package or module path not included in sys.path;\n'
|
||||
'- duplicated package or module name taking precedence in '
|
||||
'sys.path;\n'
|
||||
'- missing module, class, function or variable;\n\n'
|
||||
'Debugged import:\n\n%s\n\n'
|
||||
'Original exception:\n\n%s: %s')
|
||||
|
||||
name = ''
|
||||
tracked = []
|
||||
for part in import_name.replace(':', '.').split('.'):
|
||||
name += (name and '.') + part
|
||||
imported = import_string(name, silent=True)
|
||||
if imported:
|
||||
tracked.append((name, getattr(imported, '__file__', None)))
|
||||
else:
|
||||
track = ['- %r found in %r.' % (n, i) for n, i in tracked]
|
||||
track.append('- %r not found.' % name)
|
||||
msg = msg % (import_name, '\n'.join(track),
|
||||
exception.__class__.__name__, str(exception))
|
||||
break
|
||||
|
||||
ImportError.__init__(self, msg)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name,
|
||||
self.exception)
|
||||
|
||||
|
||||
# circular dependencies
|
||||
from werkzeug.http import quote_header_value, unquote_header_value, \
|
||||
cookie_date
|
||||
|
||||
# DEPRECATED
|
||||
# these objects were previously in this module as well. we import
|
||||
# them here for backwards compatibility with old pickles.
|
||||
from werkzeug.datastructures import MultiDict, CombinedMultiDict, \
|
||||
Headers, EnvironHeaders
|
||||
from werkzeug.http import parse_cookie, dump_cookie
|
||||