2013-10-11 17:28:32 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
"""
|
|
|
|
requests.utils
|
|
|
|
~~~~~~~~~~~~~~
|
|
|
|
|
|
|
|
This module provides utility functions that are used within Requests
|
|
|
|
that are also useful for external consumption.
|
|
|
|
"""
|
|
|
|
|
|
|
|
import codecs
|
2019-01-13 08:01:53 +00:00
|
|
|
import contextlib
|
2013-10-11 17:28:32 +00:00
|
|
|
import io
|
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import socket
|
|
|
|
import struct
|
2019-01-13 08:01:53 +00:00
|
|
|
import sys
|
|
|
|
import tempfile
|
2016-02-23 06:06:55 +00:00
|
|
|
import warnings
|
2019-01-13 08:01:53 +00:00
|
|
|
import zipfile
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
from .__version__ import __version__
|
2013-10-11 17:28:32 +00:00
|
|
|
from . import certs
|
2019-01-13 08:01:53 +00:00
|
|
|
# to_native_string is unused here, but imported here for backwards compatibility
|
|
|
|
from ._internal_utils import to_native_string
|
2013-10-11 17:28:32 +00:00
|
|
|
from .compat import parse_http_list as _parse_list_header
|
2019-01-13 08:01:53 +00:00
|
|
|
from .compat import (
|
|
|
|
quote, urlparse, bytes, str, OrderedDict, unquote, getproxies,
|
|
|
|
proxy_bypass, urlunparse, basestring, integer_types, is_py3,
|
|
|
|
proxy_bypass_environment, getproxies_environment, Mapping)
|
|
|
|
from .cookies import cookiejar_from_dict
|
2013-10-11 17:28:32 +00:00
|
|
|
from .structures import CaseInsensitiveDict
|
2019-01-13 08:01:53 +00:00
|
|
|
from .exceptions import (
|
|
|
|
InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError)
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
NETRC_FILES = ('.netrc', '_netrc')
|
|
|
|
|
|
|
|
DEFAULT_CA_BUNDLE_PATH = certs.where()
|
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
DEFAULT_PORTS = {'http': 80, 'https': 443}
|
|
|
|
|
|
|
|
|
|
|
|
if sys.platform == 'win32':
|
|
|
|
# provide a proxy_bypass version on Windows without DNS lookups
|
|
|
|
|
|
|
|
def proxy_bypass_registry(host):
|
|
|
|
try:
|
|
|
|
if is_py3:
|
|
|
|
import winreg
|
|
|
|
else:
|
|
|
|
import _winreg as winreg
|
|
|
|
except ImportError:
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
|
|
|
internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
|
|
|
|
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
|
|
|
|
# ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
|
|
|
|
proxyEnable = int(winreg.QueryValueEx(internetSettings,
|
|
|
|
'ProxyEnable')[0])
|
|
|
|
# ProxyOverride is almost always a string
|
|
|
|
proxyOverride = winreg.QueryValueEx(internetSettings,
|
|
|
|
'ProxyOverride')[0]
|
|
|
|
except OSError:
|
|
|
|
return False
|
|
|
|
if not proxyEnable or not proxyOverride:
|
|
|
|
return False
|
|
|
|
|
|
|
|
# make a check value list from the registry entry: replace the
|
|
|
|
# '<local>' string by the localhost entry and the corresponding
|
|
|
|
# canonical entry.
|
|
|
|
proxyOverride = proxyOverride.split(';')
|
|
|
|
# now check if we match one of the registry values.
|
|
|
|
for test in proxyOverride:
|
|
|
|
if test == '<local>':
|
|
|
|
if '.' not in host:
|
|
|
|
return True
|
|
|
|
test = test.replace(".", r"\.") # mask dots
|
|
|
|
test = test.replace("*", r".*") # change glob sequence
|
|
|
|
test = test.replace("?", r".") # change glob char
|
|
|
|
if re.match(test, host, re.I):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def proxy_bypass(host): # noqa
|
|
|
|
"""Return True, if the host should be bypassed.
|
|
|
|
|
|
|
|
Checks proxy settings gathered from the environment, if specified,
|
|
|
|
or the registry.
|
|
|
|
"""
|
|
|
|
if getproxies_environment():
|
|
|
|
return proxy_bypass_environment(host)
|
|
|
|
else:
|
|
|
|
return proxy_bypass_registry(host)
|
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
def dict_to_sequence(d):
|
|
|
|
"""Returns an internal sequence dictionary update."""
|
|
|
|
|
|
|
|
if hasattr(d, 'items'):
|
|
|
|
d = d.items()
|
|
|
|
|
|
|
|
return d
|
|
|
|
|
|
|
|
|
|
|
|
def super_len(o):
|
2019-01-13 08:01:53 +00:00
|
|
|
total_length = None
|
2016-02-23 06:06:55 +00:00
|
|
|
current_position = 0
|
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
if hasattr(o, '__len__'):
|
2016-02-23 06:06:55 +00:00
|
|
|
total_length = len(o)
|
|
|
|
|
|
|
|
elif hasattr(o, 'len'):
|
|
|
|
total_length = o.len
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2016-02-23 06:06:55 +00:00
|
|
|
elif hasattr(o, 'fileno'):
|
2013-10-11 17:28:32 +00:00
|
|
|
try:
|
|
|
|
fileno = o.fileno()
|
|
|
|
except io.UnsupportedOperation:
|
|
|
|
pass
|
|
|
|
else:
|
2016-02-23 06:06:55 +00:00
|
|
|
total_length = os.fstat(fileno).st_size
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2016-02-23 06:06:55 +00:00
|
|
|
# Having used fstat to determine the file length, we need to
|
|
|
|
# confirm that this file was opened up in binary mode.
|
|
|
|
if 'b' not in o.mode:
|
|
|
|
warnings.warn((
|
|
|
|
"Requests has determined the content-length for this "
|
|
|
|
"request using the binary size of the file: however, the "
|
|
|
|
"file has been opened in text mode (i.e. without the 'b' "
|
|
|
|
"flag in the mode). This may lead to an incorrect "
|
|
|
|
"content-length. In Requests 3.0, support will be removed "
|
|
|
|
"for files in text mode."),
|
|
|
|
FileModeWarning
|
|
|
|
)
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2016-02-23 06:06:55 +00:00
|
|
|
if hasattr(o, 'tell'):
|
2019-01-13 08:01:53 +00:00
|
|
|
try:
|
|
|
|
current_position = o.tell()
|
|
|
|
except (OSError, IOError):
|
|
|
|
# This can happen in some weird situations, such as when the file
|
|
|
|
# is actually a special file descriptor like stdin. In this
|
|
|
|
# instance, we don't know what the length is, so set it to zero and
|
|
|
|
# let requests chunk it instead.
|
|
|
|
if total_length is not None:
|
|
|
|
current_position = total_length
|
|
|
|
else:
|
|
|
|
if hasattr(o, 'seek') and total_length is None:
|
|
|
|
# StringIO and BytesIO have seek but no useable fileno
|
|
|
|
try:
|
|
|
|
# seek to end of file
|
|
|
|
o.seek(0, 2)
|
|
|
|
total_length = o.tell()
|
|
|
|
|
|
|
|
# seek back to current position to support
|
|
|
|
# partially read file-like objects
|
|
|
|
o.seek(current_position or 0)
|
|
|
|
except (OSError, IOError):
|
|
|
|
total_length = 0
|
|
|
|
|
|
|
|
if total_length is None:
|
|
|
|
total_length = 0
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2016-02-23 06:06:55 +00:00
|
|
|
return max(0, total_length - current_position)
|
|
|
|
|
|
|
|
|
|
|
|
def get_netrc_auth(url, raise_errors=False):
|
2013-10-11 17:28:32 +00:00
|
|
|
"""Returns the Requests tuple auth for a given url from netrc."""
|
|
|
|
|
|
|
|
try:
|
|
|
|
from netrc import netrc, NetrcParseError
|
|
|
|
|
|
|
|
netrc_path = None
|
|
|
|
|
|
|
|
for f in NETRC_FILES:
|
|
|
|
try:
|
2019-01-13 08:01:53 +00:00
|
|
|
loc = os.path.expanduser('~/{}'.format(f))
|
2013-10-11 17:28:32 +00:00
|
|
|
except KeyError:
|
|
|
|
# os.path.expanduser can fail when $HOME is undefined and
|
2019-01-13 08:01:53 +00:00
|
|
|
# getpwuid fails. See https://bugs.python.org/issue20164 &
|
|
|
|
# https://github.com/requests/requests/issues/1846
|
2013-10-11 17:28:32 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
if os.path.exists(loc):
|
|
|
|
netrc_path = loc
|
|
|
|
break
|
|
|
|
|
|
|
|
# Abort early if there isn't one.
|
|
|
|
if netrc_path is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
ri = urlparse(url)
|
|
|
|
|
2016-02-23 06:06:55 +00:00
|
|
|
# Strip port numbers from netloc. This weird `if...encode`` dance is
|
|
|
|
# used for Python 3.2, which doesn't support unicode literals.
|
|
|
|
splitstr = b':'
|
|
|
|
if isinstance(url, str):
|
|
|
|
splitstr = splitstr.decode('ascii')
|
|
|
|
host = ri.netloc.split(splitstr)[0]
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
_netrc = netrc(netrc_path).authenticators(host)
|
|
|
|
if _netrc:
|
|
|
|
# Return with login / password
|
|
|
|
login_i = (0 if _netrc[0] else 1)
|
|
|
|
return (_netrc[login_i], _netrc[2])
|
|
|
|
except (NetrcParseError, IOError):
|
|
|
|
# If there was a parsing error or a permissions issue reading the file,
|
2016-02-23 06:06:55 +00:00
|
|
|
# we'll just skip netrc auth unless explicitly asked to raise errors.
|
|
|
|
if raise_errors:
|
|
|
|
raise
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
# AppEngine hackiness.
|
|
|
|
except (ImportError, AttributeError):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
def guess_filename(obj):
|
|
|
|
"""Tries to guess the filename of the given object."""
|
|
|
|
name = getattr(obj, 'name', None)
|
2016-02-23 06:06:55 +00:00
|
|
|
if (name and isinstance(name, basestring) and name[0] != '<' and
|
|
|
|
name[-1] != '>'):
|
2013-10-11 17:28:32 +00:00
|
|
|
return os.path.basename(name)
|
|
|
|
|
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
def extract_zipped_paths(path):
|
|
|
|
"""Replace nonexistent paths that look like they refer to a member of a zip
|
|
|
|
archive with the location of an extracted copy of the target, or else
|
|
|
|
just return the provided path unchanged.
|
|
|
|
"""
|
|
|
|
if os.path.exists(path):
|
|
|
|
# this is already a valid path, no need to do anything further
|
|
|
|
return path
|
|
|
|
|
|
|
|
# find the first valid part of the provided path and treat that as a zip archive
|
|
|
|
# assume the rest of the path is the name of a member in the archive
|
|
|
|
archive, member = os.path.split(path)
|
|
|
|
while archive and not os.path.exists(archive):
|
|
|
|
archive, prefix = os.path.split(archive)
|
|
|
|
member = '/'.join([prefix, member])
|
|
|
|
|
|
|
|
if not zipfile.is_zipfile(archive):
|
|
|
|
return path
|
|
|
|
|
|
|
|
zip_file = zipfile.ZipFile(archive)
|
|
|
|
if member not in zip_file.namelist():
|
|
|
|
return path
|
|
|
|
|
|
|
|
# we have a valid zip archive and a valid member of that archive
|
|
|
|
tmp = tempfile.gettempdir()
|
|
|
|
extracted_path = os.path.join(tmp, *member.split('/'))
|
|
|
|
if not os.path.exists(extracted_path):
|
|
|
|
extracted_path = zip_file.extract(member, path=tmp)
|
|
|
|
|
|
|
|
return extracted_path
|
|
|
|
|
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
def from_key_val_list(value):
|
|
|
|
"""Take an object and test to see if it can be represented as a
|
|
|
|
dictionary. Unless it can not be represented as such, return an
|
|
|
|
OrderedDict, e.g.,
|
|
|
|
|
|
|
|
::
|
|
|
|
|
|
|
|
>>> from_key_val_list([('key', 'val')])
|
|
|
|
OrderedDict([('key', 'val')])
|
|
|
|
>>> from_key_val_list('string')
|
2019-01-13 08:01:53 +00:00
|
|
|
ValueError: cannot encode objects that are not 2-tuples
|
2013-10-11 17:28:32 +00:00
|
|
|
>>> from_key_val_list({'key': 'val'})
|
|
|
|
OrderedDict([('key', 'val')])
|
2019-01-13 08:01:53 +00:00
|
|
|
|
|
|
|
:rtype: OrderedDict
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
|
|
|
if value is None:
|
|
|
|
return None
|
|
|
|
|
|
|
|
if isinstance(value, (str, bytes, bool, int)):
|
|
|
|
raise ValueError('cannot encode objects that are not 2-tuples')
|
|
|
|
|
|
|
|
return OrderedDict(value)
|
|
|
|
|
|
|
|
|
|
|
|
def to_key_val_list(value):
|
|
|
|
"""Take an object and test to see if it can be represented as a
|
|
|
|
dictionary. If it can be, return a list of tuples, e.g.,
|
|
|
|
|
|
|
|
::
|
|
|
|
|
|
|
|
>>> to_key_val_list([('key', 'val')])
|
|
|
|
[('key', 'val')]
|
|
|
|
>>> to_key_val_list({'key': 'val'})
|
|
|
|
[('key', 'val')]
|
|
|
|
>>> to_key_val_list('string')
|
|
|
|
ValueError: cannot encode objects that are not 2-tuples.
|
2019-01-13 08:01:53 +00:00
|
|
|
|
|
|
|
:rtype: list
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
|
|
|
if value is None:
|
|
|
|
return None
|
|
|
|
|
|
|
|
if isinstance(value, (str, bytes, bool, int)):
|
|
|
|
raise ValueError('cannot encode objects that are not 2-tuples')
|
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
if isinstance(value, Mapping):
|
2013-10-11 17:28:32 +00:00
|
|
|
value = value.items()
|
|
|
|
|
|
|
|
return list(value)
|
|
|
|
|
|
|
|
|
|
|
|
# From mitsuhiko/werkzeug (used with permission).
|
|
|
|
def parse_list_header(value):
|
|
|
|
"""Parse lists as described by RFC 2068 Section 2.
|
|
|
|
|
|
|
|
In particular, parse comma-separated lists where the elements of
|
|
|
|
the list may include quoted-strings. A quoted-string could
|
|
|
|
contain a comma. A non-quoted string could have quotes in the
|
|
|
|
middle. Quotes are removed automatically after parsing.
|
|
|
|
|
|
|
|
It basically works like :func:`parse_set_header` just that items
|
|
|
|
may appear multiple times and case sensitivity is preserved.
|
|
|
|
|
|
|
|
The return value is a standard :class:`list`:
|
|
|
|
|
|
|
|
>>> parse_list_header('token, "quoted value"')
|
|
|
|
['token', 'quoted value']
|
|
|
|
|
|
|
|
To create a header from the :class:`list` again, use the
|
|
|
|
:func:`dump_header` function.
|
|
|
|
|
|
|
|
:param value: a string with a list header.
|
|
|
|
:return: :class:`list`
|
2019-01-13 08:01:53 +00:00
|
|
|
:rtype: list
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
|
|
|
result = []
|
|
|
|
for item in _parse_list_header(value):
|
|
|
|
if item[:1] == item[-1:] == '"':
|
|
|
|
item = unquote_header_value(item[1:-1])
|
|
|
|
result.append(item)
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
# From mitsuhiko/werkzeug (used with permission).
|
|
|
|
def parse_dict_header(value):
|
|
|
|
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
|
|
|
|
convert them into a python dict:
|
|
|
|
|
|
|
|
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
|
|
|
|
>>> type(d) is dict
|
|
|
|
True
|
|
|
|
>>> sorted(d.items())
|
|
|
|
[('bar', 'as well'), ('foo', 'is a fish')]
|
|
|
|
|
|
|
|
If there is no value for a key it will be `None`:
|
|
|
|
|
|
|
|
>>> parse_dict_header('key_without_value')
|
|
|
|
{'key_without_value': None}
|
|
|
|
|
|
|
|
To create a header from the :class:`dict` again, use the
|
|
|
|
:func:`dump_header` function.
|
|
|
|
|
|
|
|
:param value: a string with a dict header.
|
|
|
|
:return: :class:`dict`
|
2019-01-13 08:01:53 +00:00
|
|
|
:rtype: dict
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
|
|
|
result = {}
|
|
|
|
for item in _parse_list_header(value):
|
|
|
|
if '=' not in item:
|
|
|
|
result[item] = None
|
|
|
|
continue
|
|
|
|
name, value = item.split('=', 1)
|
|
|
|
if value[:1] == value[-1:] == '"':
|
|
|
|
value = unquote_header_value(value[1:-1])
|
|
|
|
result[name] = value
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
# From mitsuhiko/werkzeug (used with permission).
|
|
|
|
def unquote_header_value(value, is_filename=False):
|
|
|
|
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
|
|
|
|
This does not use the real unquoting but what browsers are actually
|
|
|
|
using for quoting.
|
|
|
|
|
|
|
|
:param value: the header value to unquote.
|
2019-01-13 08:01:53 +00:00
|
|
|
:rtype: str
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
|
|
|
if value and value[0] == value[-1] == '"':
|
|
|
|
# this is not the real unquoting, but fixing this so that the
|
|
|
|
# RFC is met will result in bugs with internet explorer and
|
|
|
|
# probably some other browsers as well. IE for example is
|
|
|
|
# uploading files with "C:\foo\bar.txt" as filename
|
|
|
|
value = value[1:-1]
|
|
|
|
|
|
|
|
# if this is a filename and the starting characters look like
|
|
|
|
# a UNC path, then just return the value without quotes. Using the
|
|
|
|
# replace sequence below on a UNC path has the effect of turning
|
|
|
|
# the leading double slash into a single slash and then
|
|
|
|
# _fix_ie_filename() doesn't work correctly. See #458.
|
|
|
|
if not is_filename or value[:2] != '\\\\':
|
|
|
|
return value.replace('\\\\', '\\').replace('\\"', '"')
|
|
|
|
return value
|
|
|
|
|
|
|
|
|
|
|
|
def dict_from_cookiejar(cj):
|
|
|
|
"""Returns a key/value dictionary from a CookieJar.
|
|
|
|
|
|
|
|
:param cj: CookieJar object to extract cookies from.
|
2019-01-13 08:01:53 +00:00
|
|
|
:rtype: dict
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
cookie_dict = {}
|
|
|
|
|
|
|
|
for cookie in cj:
|
|
|
|
cookie_dict[cookie.name] = cookie.value
|
|
|
|
|
|
|
|
return cookie_dict
|
|
|
|
|
|
|
|
|
|
|
|
def add_dict_to_cookiejar(cj, cookie_dict):
|
|
|
|
"""Returns a CookieJar from a key/value dictionary.
|
|
|
|
|
|
|
|
:param cj: CookieJar to insert cookies into.
|
|
|
|
:param cookie_dict: Dict of key/values to insert into CookieJar.
|
2019-01-13 08:01:53 +00:00
|
|
|
:rtype: CookieJar
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
return cookiejar_from_dict(cookie_dict, cj)
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
def get_encodings_from_content(content):
|
|
|
|
"""Returns encodings from given content string.
|
|
|
|
|
|
|
|
:param content: bytestring to extract encodings from.
|
|
|
|
"""
|
2016-02-23 06:06:55 +00:00
|
|
|
warnings.warn((
|
|
|
|
'In requests 3.0, get_encodings_from_content will be removed. For '
|
|
|
|
'more information, please see the discussion on issue #2266. (This'
|
|
|
|
' warning should only appear once.)'),
|
|
|
|
DeprecationWarning)
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
|
|
|
|
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
|
|
|
|
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
|
|
|
|
|
|
|
|
return (charset_re.findall(content) +
|
|
|
|
pragma_re.findall(content) +
|
|
|
|
xml_re.findall(content))
|
|
|
|
|
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
def _parse_content_type_header(header):
|
|
|
|
"""Returns content type and parameters from given header
|
|
|
|
|
|
|
|
:param header: string
|
|
|
|
:return: tuple containing content type and dictionary of
|
|
|
|
parameters
|
|
|
|
"""
|
|
|
|
|
|
|
|
tokens = header.split(';')
|
|
|
|
content_type, params = tokens[0].strip(), tokens[1:]
|
|
|
|
params_dict = {}
|
|
|
|
items_to_strip = "\"' "
|
|
|
|
|
|
|
|
for param in params:
|
|
|
|
param = param.strip()
|
|
|
|
if param:
|
|
|
|
key, value = param, True
|
|
|
|
index_of_equals = param.find("=")
|
|
|
|
if index_of_equals != -1:
|
|
|
|
key = param[:index_of_equals].strip(items_to_strip)
|
|
|
|
value = param[index_of_equals + 1:].strip(items_to_strip)
|
|
|
|
params_dict[key.lower()] = value
|
|
|
|
return content_type, params_dict
|
|
|
|
|
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
def get_encoding_from_headers(headers):
|
|
|
|
"""Returns encodings from given HTTP Header Dict.
|
|
|
|
|
|
|
|
:param headers: dictionary to extract encoding from.
|
2019-01-13 08:01:53 +00:00
|
|
|
:rtype: str
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
content_type = headers.get('content-type')
|
|
|
|
|
|
|
|
if not content_type:
|
|
|
|
return None
|
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
content_type, params = _parse_content_type_header(content_type)
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
if 'charset' in params:
|
|
|
|
return params['charset'].strip("'\"")
|
|
|
|
|
|
|
|
if 'text' in content_type:
|
|
|
|
return 'ISO-8859-1'
|
|
|
|
|
|
|
|
|
|
|
|
def stream_decode_response_unicode(iterator, r):
|
|
|
|
"""Stream decodes a iterator."""
|
|
|
|
|
|
|
|
if r.encoding is None:
|
|
|
|
for item in iterator:
|
|
|
|
yield item
|
|
|
|
return
|
|
|
|
|
|
|
|
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
|
|
|
|
for chunk in iterator:
|
|
|
|
rv = decoder.decode(chunk)
|
|
|
|
if rv:
|
|
|
|
yield rv
|
|
|
|
rv = decoder.decode(b'', final=True)
|
|
|
|
if rv:
|
|
|
|
yield rv
|
|
|
|
|
|
|
|
|
|
|
|
def iter_slices(string, slice_length):
|
|
|
|
"""Iterate over slices of a string."""
|
|
|
|
pos = 0
|
2019-01-13 08:01:53 +00:00
|
|
|
if slice_length is None or slice_length <= 0:
|
|
|
|
slice_length = len(string)
|
2013-10-11 17:28:32 +00:00
|
|
|
while pos < len(string):
|
|
|
|
yield string[pos:pos + slice_length]
|
|
|
|
pos += slice_length
|
|
|
|
|
|
|
|
|
|
|
|
def get_unicode_from_response(r):
|
|
|
|
"""Returns the requested content back in unicode.
|
|
|
|
|
|
|
|
:param r: Response object to get unicode content from.
|
|
|
|
|
|
|
|
Tried:
|
|
|
|
|
|
|
|
1. charset from content-type
|
2016-02-23 06:06:55 +00:00
|
|
|
2. fall back and replace all unicode characters
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
:rtype: str
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
2016-02-23 06:06:55 +00:00
|
|
|
warnings.warn((
|
|
|
|
'In requests 3.0, get_unicode_from_response will be removed. For '
|
|
|
|
'more information, please see the discussion on issue #2266. (This'
|
|
|
|
' warning should only appear once.)'),
|
|
|
|
DeprecationWarning)
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
tried_encodings = []
|
|
|
|
|
|
|
|
# Try charset from content-type
|
|
|
|
encoding = get_encoding_from_headers(r.headers)
|
|
|
|
|
|
|
|
if encoding:
|
|
|
|
try:
|
|
|
|
return str(r.content, encoding)
|
|
|
|
except UnicodeError:
|
|
|
|
tried_encodings.append(encoding)
|
|
|
|
|
|
|
|
# Fall back:
|
|
|
|
try:
|
|
|
|
return str(r.content, encoding, errors='replace')
|
|
|
|
except TypeError:
|
|
|
|
return r.content
|
|
|
|
|
|
|
|
|
|
|
|
# The unreserved URI characters (RFC 3986)
|
|
|
|
UNRESERVED_SET = frozenset(
|
2019-01-13 08:01:53 +00:00
|
|
|
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~")
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
def unquote_unreserved(uri):
|
|
|
|
"""Un-escape any percent-escape sequences in a URI that are unreserved
|
|
|
|
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
|
2019-01-13 08:01:53 +00:00
|
|
|
|
|
|
|
:rtype: str
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
|
|
|
parts = uri.split('%')
|
|
|
|
for i in range(1, len(parts)):
|
|
|
|
h = parts[i][0:2]
|
|
|
|
if len(h) == 2 and h.isalnum():
|
|
|
|
try:
|
|
|
|
c = chr(int(h, 16))
|
|
|
|
except ValueError:
|
|
|
|
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
|
|
|
|
|
|
|
|
if c in UNRESERVED_SET:
|
|
|
|
parts[i] = c + parts[i][2:]
|
|
|
|
else:
|
|
|
|
parts[i] = '%' + parts[i]
|
|
|
|
else:
|
|
|
|
parts[i] = '%' + parts[i]
|
|
|
|
return ''.join(parts)
|
|
|
|
|
|
|
|
|
|
|
|
def requote_uri(uri):
|
|
|
|
"""Re-quote the given URI.
|
|
|
|
|
|
|
|
This function passes the given URI through an unquote/quote cycle to
|
|
|
|
ensure that it is fully and consistently quoted.
|
2019-01-13 08:01:53 +00:00
|
|
|
|
|
|
|
:rtype: str
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
2016-02-23 06:06:55 +00:00
|
|
|
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
|
|
|
|
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
|
|
|
|
try:
|
|
|
|
# Unquote only the unreserved characters
|
|
|
|
# Then quote only illegal characters (do not quote reserved,
|
|
|
|
# unreserved, or '%')
|
|
|
|
return quote(unquote_unreserved(uri), safe=safe_with_percent)
|
|
|
|
except InvalidURL:
|
|
|
|
# We couldn't unquote the given URI, so let's try quoting it, but
|
|
|
|
# there may be unquoted '%'s in the URI. We need to make sure they're
|
|
|
|
# properly quoted so they do not cause issues elsewhere.
|
|
|
|
return quote(uri, safe=safe_without_percent)
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
def address_in_network(ip, net):
|
2019-01-13 08:01:53 +00:00
|
|
|
"""This function allows you to check if an IP belongs to a network subnet
|
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
|
|
|
|
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
|
2019-01-13 08:01:53 +00:00
|
|
|
|
|
|
|
:rtype: bool
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
|
|
|
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
|
|
|
|
netaddr, bits = net.split('/')
|
|
|
|
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
|
|
|
|
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
|
|
|
|
return (ipaddr & netmask) == (network & netmask)
|
|
|
|
|
|
|
|
|
|
|
|
def dotted_netmask(mask):
|
2019-01-13 08:01:53 +00:00
|
|
|
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
|
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
Example: if mask is 24 function returns 255.255.255.0
|
2019-01-13 08:01:53 +00:00
|
|
|
|
|
|
|
:rtype: str
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
|
|
|
bits = 0xffffffff ^ (1 << 32 - mask) - 1
|
|
|
|
return socket.inet_ntoa(struct.pack('>I', bits))
|
|
|
|
|
|
|
|
|
|
|
|
def is_ipv4_address(string_ip):
|
2019-01-13 08:01:53 +00:00
|
|
|
"""
|
|
|
|
:rtype: bool
|
|
|
|
"""
|
2013-10-11 17:28:32 +00:00
|
|
|
try:
|
|
|
|
socket.inet_aton(string_ip)
|
|
|
|
except socket.error:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
def is_valid_cidr(string_network):
|
2019-01-13 08:01:53 +00:00
|
|
|
"""
|
|
|
|
Very simple check of the cidr format in no_proxy variable.
|
|
|
|
|
|
|
|
:rtype: bool
|
|
|
|
"""
|
2013-10-11 17:28:32 +00:00
|
|
|
if string_network.count('/') == 1:
|
|
|
|
try:
|
|
|
|
mask = int(string_network.split('/')[1])
|
|
|
|
except ValueError:
|
|
|
|
return False
|
|
|
|
|
|
|
|
if mask < 1 or mask > 32:
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
|
|
|
socket.inet_aton(string_network.split('/')[0])
|
|
|
|
except socket.error:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
@contextlib.contextmanager
|
|
|
|
def set_environ(env_name, value):
|
|
|
|
"""Set the environment variable 'env_name' to 'value'
|
|
|
|
|
|
|
|
Save previous value, yield, and then restore the previous value stored in
|
|
|
|
the environment variable 'env_name'.
|
|
|
|
|
|
|
|
If 'value' is None, do nothing"""
|
|
|
|
value_changed = value is not None
|
|
|
|
if value_changed:
|
|
|
|
old_value = os.environ.get(env_name)
|
|
|
|
os.environ[env_name] = value
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
if value_changed:
|
|
|
|
if old_value is None:
|
|
|
|
del os.environ[env_name]
|
|
|
|
else:
|
|
|
|
os.environ[env_name] = old_value
|
|
|
|
|
|
|
|
|
|
|
|
def should_bypass_proxies(url, no_proxy):
|
2014-09-05 16:01:36 +00:00
|
|
|
"""
|
|
|
|
Returns whether we should bypass proxies or not.
|
2019-01-13 08:01:53 +00:00
|
|
|
|
|
|
|
:rtype: bool
|
2014-09-05 16:01:36 +00:00
|
|
|
"""
|
2019-01-13 08:01:53 +00:00
|
|
|
# Prioritize lowercase environment variables over uppercase
|
|
|
|
# to keep a consistent behaviour with other http projects (curl, wget).
|
2013-10-11 17:28:32 +00:00
|
|
|
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
|
|
|
|
|
|
|
|
# First check whether no_proxy is defined. If it is, check that the URL
|
|
|
|
# we're getting isn't in the no_proxy list.
|
2019-01-13 08:01:53 +00:00
|
|
|
no_proxy_arg = no_proxy
|
|
|
|
if no_proxy is None:
|
|
|
|
no_proxy = get_proxy('no_proxy')
|
|
|
|
parsed = urlparse(url)
|
|
|
|
|
|
|
|
if parsed.hostname is None:
|
|
|
|
# URLs don't always have hostnames, e.g. file:/// urls.
|
|
|
|
return True
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
if no_proxy:
|
|
|
|
# We need to check whether we match here. We need to see if we match
|
2019-01-13 08:01:53 +00:00
|
|
|
# the end of the hostname, both with and without the port.
|
2016-02-23 06:06:55 +00:00
|
|
|
no_proxy = (
|
|
|
|
host for host in no_proxy.replace(' ', '').split(',') if host
|
|
|
|
)
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
if is_ipv4_address(parsed.hostname):
|
2013-10-11 17:28:32 +00:00
|
|
|
for proxy_ip in no_proxy:
|
|
|
|
if is_valid_cidr(proxy_ip):
|
2019-01-13 08:01:53 +00:00
|
|
|
if address_in_network(parsed.hostname, proxy_ip):
|
2014-09-05 16:01:36 +00:00
|
|
|
return True
|
2019-01-13 08:01:53 +00:00
|
|
|
elif parsed.hostname == proxy_ip:
|
|
|
|
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
|
|
|
|
# matches the IP of the index
|
|
|
|
return True
|
2013-10-11 17:28:32 +00:00
|
|
|
else:
|
2019-01-13 08:01:53 +00:00
|
|
|
host_with_port = parsed.hostname
|
|
|
|
if parsed.port:
|
|
|
|
host_with_port += ':{}'.format(parsed.port)
|
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
for host in no_proxy:
|
2019-01-13 08:01:53 +00:00
|
|
|
if parsed.hostname.endswith(host) or host_with_port.endswith(host):
|
2013-10-11 17:28:32 +00:00
|
|
|
# The URL does match something in no_proxy, so we don't want
|
|
|
|
# to apply the proxies on this URL.
|
2014-09-05 16:01:36 +00:00
|
|
|
return True
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
with set_environ('no_proxy', no_proxy_arg):
|
|
|
|
# parsed.hostname can be `None` in cases such as a file URI.
|
|
|
|
try:
|
|
|
|
bypass = proxy_bypass(parsed.hostname)
|
|
|
|
except (TypeError, socket.gaierror):
|
|
|
|
bypass = False
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
if bypass:
|
2014-09-05 16:01:36 +00:00
|
|
|
return True
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2014-09-05 16:01:36 +00:00
|
|
|
return False
|
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
|
|
|
|
def get_environ_proxies(url, no_proxy=None):
|
|
|
|
"""
|
|
|
|
Return a dict of environment proxies.
|
|
|
|
|
|
|
|
:rtype: dict
|
|
|
|
"""
|
|
|
|
if should_bypass_proxies(url, no_proxy=no_proxy):
|
2014-09-05 16:01:36 +00:00
|
|
|
return {}
|
|
|
|
else:
|
|
|
|
return getproxies()
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
|
2016-02-23 06:06:55 +00:00
|
|
|
def select_proxy(url, proxies):
|
|
|
|
"""Select a proxy for the url, if applicable.
|
|
|
|
|
|
|
|
:param url: The url being for the request
|
|
|
|
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
|
|
|
|
"""
|
|
|
|
proxies = proxies or {}
|
|
|
|
urlparts = urlparse(url)
|
2019-01-13 08:01:53 +00:00
|
|
|
if urlparts.hostname is None:
|
|
|
|
return proxies.get(urlparts.scheme, proxies.get('all'))
|
|
|
|
|
|
|
|
proxy_keys = [
|
|
|
|
urlparts.scheme + '://' + urlparts.hostname,
|
|
|
|
urlparts.scheme,
|
|
|
|
'all://' + urlparts.hostname,
|
|
|
|
'all',
|
|
|
|
]
|
|
|
|
proxy = None
|
|
|
|
for proxy_key in proxy_keys:
|
|
|
|
if proxy_key in proxies:
|
|
|
|
proxy = proxies[proxy_key]
|
|
|
|
break
|
|
|
|
|
2016-02-23 06:06:55 +00:00
|
|
|
return proxy
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
|
2013-10-11 17:28:32 +00:00
|
|
|
def default_user_agent(name="python-requests"):
|
2019-01-13 08:01:53 +00:00
|
|
|
"""
|
|
|
|
Return a string representing the default user agent.
|
|
|
|
|
|
|
|
:rtype: str
|
|
|
|
"""
|
2016-02-23 06:06:55 +00:00
|
|
|
return '%s/%s' % (name, __version__)
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
def default_headers():
|
2019-01-13 08:01:53 +00:00
|
|
|
"""
|
|
|
|
:rtype: requests.structures.CaseInsensitiveDict
|
|
|
|
"""
|
2013-10-11 17:28:32 +00:00
|
|
|
return CaseInsensitiveDict({
|
|
|
|
'User-Agent': default_user_agent(),
|
2014-09-05 16:01:36 +00:00
|
|
|
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
|
2016-02-23 06:06:55 +00:00
|
|
|
'Accept': '*/*',
|
|
|
|
'Connection': 'keep-alive',
|
2013-10-11 17:28:32 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
|
|
|
|
def parse_header_links(value):
|
2019-01-13 08:01:53 +00:00
|
|
|
"""Return a list of parsed link headers proxies.
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
|
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
:rtype: list
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
links = []
|
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
replace_chars = ' \'"'
|
|
|
|
|
|
|
|
value = value.strip(replace_chars)
|
|
|
|
if not value:
|
|
|
|
return links
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
for val in re.split(', *<', value):
|
2013-10-11 17:28:32 +00:00
|
|
|
try:
|
2019-01-13 08:01:53 +00:00
|
|
|
url, params = val.split(';', 1)
|
2013-10-11 17:28:32 +00:00
|
|
|
except ValueError:
|
|
|
|
url, params = val, ''
|
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
link = {'url': url.strip('<> \'"')}
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
for param in params.split(';'):
|
2013-10-11 17:28:32 +00:00
|
|
|
try:
|
2019-01-13 08:01:53 +00:00
|
|
|
key, value = param.split('=')
|
2013-10-11 17:28:32 +00:00
|
|
|
except ValueError:
|
|
|
|
break
|
|
|
|
|
|
|
|
link[key.strip(replace_chars)] = value.strip(replace_chars)
|
|
|
|
|
|
|
|
links.append(link)
|
|
|
|
|
|
|
|
return links
|
|
|
|
|
|
|
|
|
|
|
|
# Null bytes; no need to recreate these on each call to guess_json_utf
|
|
|
|
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
|
|
|
|
_null2 = _null * 2
|
|
|
|
_null3 = _null * 3
|
|
|
|
|
|
|
|
|
|
|
|
def guess_json_utf(data):
|
2019-01-13 08:01:53 +00:00
|
|
|
"""
|
|
|
|
:rtype: str
|
|
|
|
"""
|
2013-10-11 17:28:32 +00:00
|
|
|
# JSON always starts with two ASCII characters, so detection is as
|
|
|
|
# easy as counting the nulls and from their location and count
|
|
|
|
# determine the encoding. Also detect a BOM, if present.
|
|
|
|
sample = data[:4]
|
2019-01-13 08:01:53 +00:00
|
|
|
if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
|
2013-10-11 17:28:32 +00:00
|
|
|
return 'utf-32' # BOM included
|
|
|
|
if sample[:3] == codecs.BOM_UTF8:
|
|
|
|
return 'utf-8-sig' # BOM included, MS style (discouraged)
|
|
|
|
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
|
|
|
|
return 'utf-16' # BOM included
|
|
|
|
nullcount = sample.count(_null)
|
|
|
|
if nullcount == 0:
|
|
|
|
return 'utf-8'
|
|
|
|
if nullcount == 2:
|
|
|
|
if sample[::2] == _null2: # 1st and 3rd are null
|
|
|
|
return 'utf-16-be'
|
|
|
|
if sample[1::2] == _null2: # 2nd and 4th are null
|
|
|
|
return 'utf-16-le'
|
|
|
|
# Did not detect 2 valid UTF-16 ascii-range characters
|
|
|
|
if nullcount == 3:
|
|
|
|
if sample[:3] == _null3:
|
|
|
|
return 'utf-32-be'
|
|
|
|
if sample[1:] == _null3:
|
|
|
|
return 'utf-32-le'
|
|
|
|
# Did not detect a valid UTF-32 ascii-range character
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2014-09-05 16:01:36 +00:00
|
|
|
def prepend_scheme_if_needed(url, new_scheme):
|
2019-01-13 08:01:53 +00:00
|
|
|
"""Given a URL that may or may not have a scheme, prepend the given scheme.
|
|
|
|
Does not replace a present scheme with the one provided as an argument.
|
|
|
|
|
|
|
|
:rtype: str
|
|
|
|
"""
|
2014-09-05 16:01:36 +00:00
|
|
|
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
|
|
|
|
|
|
|
|
# urlparse is a finicky beast, and sometimes decides that there isn't a
|
|
|
|
# netloc present. Assume that it's being over-cautious, and switch netloc
|
|
|
|
# and path if urlparse decided there was no netloc.
|
|
|
|
if not netloc:
|
|
|
|
netloc, path = path, netloc
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2014-09-05 16:01:36 +00:00
|
|
|
return urlunparse((scheme, netloc, path, params, query, fragment))
|
2013-10-11 17:28:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
def get_auth_from_url(url):
|
|
|
|
"""Given a url with authentication components, extract them into a tuple of
|
2019-01-13 08:01:53 +00:00
|
|
|
username,password.
|
|
|
|
|
|
|
|
:rtype: (str,str)
|
|
|
|
"""
|
2013-10-11 17:28:32 +00:00
|
|
|
parsed = urlparse(url)
|
|
|
|
|
|
|
|
try:
|
|
|
|
auth = (unquote(parsed.username), unquote(parsed.password))
|
|
|
|
except (AttributeError, TypeError):
|
|
|
|
auth = ('', '')
|
|
|
|
|
|
|
|
return auth
|
|
|
|
|
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
# Moved outside of function to avoid recompile every call
|
|
|
|
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
|
|
|
|
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
|
|
|
|
|
|
|
|
|
|
|
|
def check_header_validity(header):
|
|
|
|
"""Verifies that header value is a string which doesn't contain
|
|
|
|
leading whitespace or return characters. This prevents unintended
|
|
|
|
header injection.
|
|
|
|
|
|
|
|
:param header: tuple, in the format (name, value).
|
2013-10-11 17:28:32 +00:00
|
|
|
"""
|
2019-01-13 08:01:53 +00:00
|
|
|
name, value = header
|
2013-10-11 17:28:32 +00:00
|
|
|
|
2019-01-13 08:01:53 +00:00
|
|
|
if isinstance(value, bytes):
|
|
|
|
pat = _CLEAN_HEADER_REGEX_BYTE
|
2013-10-11 17:28:32 +00:00
|
|
|
else:
|
2019-01-13 08:01:53 +00:00
|
|
|
pat = _CLEAN_HEADER_REGEX_STR
|
|
|
|
try:
|
|
|
|
if not pat.match(value):
|
|
|
|
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
|
|
|
|
except TypeError:
|
|
|
|
raise InvalidHeader("Value for header {%s: %s} must be of type str or "
|
|
|
|
"bytes, not %s" % (name, value, type(value)))
|
2016-02-23 06:06:55 +00:00
|
|
|
|
|
|
|
|
|
|
|
def urldefragauth(url):
|
|
|
|
"""
|
2019-01-13 08:01:53 +00:00
|
|
|
Given a url remove the fragment and the authentication part.
|
|
|
|
|
|
|
|
:rtype: str
|
2016-02-23 06:06:55 +00:00
|
|
|
"""
|
|
|
|
scheme, netloc, path, params, query, fragment = urlparse(url)
|
|
|
|
|
|
|
|
# see func:`prepend_scheme_if_needed`
|
|
|
|
if not netloc:
|
|
|
|
netloc, path = path, netloc
|
|
|
|
|
|
|
|
netloc = netloc.rsplit('@', 1)[-1]
|
|
|
|
|
|
|
|
return urlunparse((scheme, netloc, path, params, query, ''))
|
2019-01-13 08:01:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
def rewind_body(prepared_request):
|
|
|
|
"""Move file pointer back to its recorded starting position
|
|
|
|
so it can be read again on redirect.
|
|
|
|
"""
|
|
|
|
body_seek = getattr(prepared_request.body, 'seek', None)
|
|
|
|
if body_seek is not None and isinstance(prepared_request._body_position, integer_types):
|
|
|
|
try:
|
|
|
|
body_seek(prepared_request._body_position)
|
|
|
|
except (IOError, OSError):
|
|
|
|
raise UnrewindableBodyError("An error occurred when rewinding request "
|
|
|
|
"body for redirect.")
|
|
|
|
else:
|
|
|
|
raise UnrewindableBodyError("Unable to rewind request body for redirect.")
|