update sqlalchemy

This commit is contained in:
j 2016-02-22 16:43:36 +05:30
commit 3b436646a2
362 changed files with 37720 additions and 11021 deletions

View file

@ -6,8 +6,17 @@ import shutil
import socket
import base64
import hashlib
import itertools
from functools import wraps
try:
from urllib.parse import splituser
except ImportError:
from urllib2 import splituser
from setuptools.extern import six
from setuptools.extern.six.moves import urllib, http_client, configparser, map
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
require, Environment, find_distributions, safe_name, safe_version,
@ -16,12 +25,6 @@ from pkg_resources import (
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from setuptools.compat import (urllib2, httplib, StringIO, HTTPError,
urlparse, urlunparse, unquote, splituser,
url2pathname, name2codepoint,
unichr, urljoin, urlsplit, urlunsplit,
ConfigParser)
from setuptools.compat import filterfalse
from fnmatch import translate
from setuptools.py26compat import strip_fragment
from setuptools.py27compat import get_all_headers
@ -68,10 +71,11 @@ def parse_bdist_wininst(name):
def egg_info_for_url(url):
scheme, server, path, parameters, query, fragment = urlparse(url)
base = unquote(path.split('/')[-1])
parts = urllib.parse.urlparse(url)
scheme, server, path, parameters, query, fragment = parts
base = urllib.parse.unquote(path.split('/')[-1])
if server=='sourceforge.net' and base=='download': # XXX Yuck
base = unquote(path.split('/')[-2])
base = urllib.parse.unquote(path.split('/')[-2])
if '#' in base: base, fragment = base.split('#',1)
return base,fragment
@ -157,7 +161,7 @@ def unique_everseen(iterable, key=None):
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
for element in six.moves.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
@ -189,14 +193,14 @@ def find_external_links(url, page):
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urljoin(url, htmldecode(match.group(1)))
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urljoin(url, htmldecode(match.group(1)))
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
user_agent = "Python-urllib/%s setuptools/%s" % (
sys.version[:3], require('setuptools')[0].version
@ -239,7 +243,7 @@ class HashChecker(ContentChecker):
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urlparse(url)[-1]
fragment = urllib.parse.urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
@ -274,7 +278,7 @@ class PackageIndex(Environment):
self.to_scan = []
if verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()):
self.opener = ssl_support.opener_for(ca_bundle)
else: self.opener = urllib2.urlopen
else: self.opener = urllib.request.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
@ -311,7 +315,7 @@ class PackageIndex(Environment):
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, HTTPError):
if isinstance(f, urllib.error.HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
@ -319,7 +323,7 @@ class PackageIndex(Environment):
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urljoin(base, htmldecode(match.group(1)))
link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f,'code',None)!=404:
page = self.process_index(url, page)
@ -342,7 +346,7 @@ class PackageIndex(Environment):
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
if (s and s.group(1).lower()=='file') or self.allows(urlparse(url)[1]):
if (s and s.group(1).lower()=='file') or self.allows(urllib.parse.urlparse(url)[1]):
return True
msg = ("\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/1dg9ijs for details).\n")
@ -352,20 +356,30 @@ class PackageIndex(Environment):
self.warn(msg, url)
def scan_egg_links(self, search_path):
for item in search_path:
if os.path.isdir(item):
for entry in os.listdir(item):
if entry.endswith('.egg-link'):
self.scan_egg_link(item, entry)
dirs = filter(os.path.isdir, search_path)
egg_links = (
(path, entry)
for path in dirs
for entry in os.listdir(path)
if entry.endswith('.egg-link')
)
list(itertools.starmap(self.scan_egg_link, egg_links))
def scan_egg_link(self, path, entry):
lines = [_f for _f in map(str.strip,
open(os.path.join(path, entry))) if _f]
if len(lines)==2:
for dist in find_distributions(os.path.join(path, lines[0])):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
with open(os.path.join(path, entry)) as raw_lines:
# filter non-empty lines
lines = list(filter(None, map(str.strip, raw_lines)))
if len(lines) != 2:
# format is not recognized; punt
return
egg_path, setup_path = lines
for dist in find_distributions(os.path.join(path, egg_path)):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self,url,page):
"""Process the contents of a PyPI page"""
@ -373,7 +387,7 @@ class PackageIndex(Environment):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
unquote, link[len(self.index_url):].split('/')
urllib.parse.unquote, link[len(self.index_url):].split('/')
))
if len(parts)==2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
@ -386,7 +400,7 @@ class PackageIndex(Environment):
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urljoin(url, htmldecode(match.group(1))))
scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
@ -662,7 +676,7 @@ class PackageIndex(Environment):
try:
checker = HashChecker.from_url(url)
fp = self.open_url(strip_fragment(url))
if isinstance(fp, HTTPError):
if isinstance(fp, urllib.error.HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code,fp.msg)
)
@ -698,21 +712,21 @@ class PackageIndex(Environment):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, httplib.InvalidURL) as v:
except (ValueError, http_client.InvalidURL) as v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib2.HTTPError as v:
except urllib.error.HTTPError as v:
return v
except urllib2.URLError as v:
except urllib.error.URLError as v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except httplib.BadStatusLine as v:
except http_client.BadStatusLine as v:
if warning:
self.warn(warning, v.line)
else:
@ -721,7 +735,7 @@ class PackageIndex(Environment):
'down, %s' %
(url, v.line)
)
except httplib.HTTPException as v:
except http_client.HTTPException as v:
if warning:
self.warn(warning, v)
else:
@ -752,7 +766,7 @@ class PackageIndex(Environment):
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme=='file':
return url2pathname(urlparse(url)[2])
return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
@ -786,7 +800,7 @@ class PackageIndex(Environment):
url = url.split('#',1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urlparse(url)
scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/',1)
auth, host = splituser(netloc)
@ -797,14 +811,15 @@ class PackageIndex(Environment):
else:
creds = " --username="+auth
netloc = host
url = urlunparse((scheme, netloc, url, p, q, f))
parts = scheme, netloc, url, p, q, f
url = urllib.parse.urlunparse(parts)
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urlsplit(url)
scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
scheme = scheme.split('+', 1)[-1]
@ -816,7 +831,7 @@ class PackageIndex(Environment):
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urlunsplit((scheme, netloc, path, query, ''))
url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
@ -868,7 +883,7 @@ entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def uchr(c):
if not isinstance(c, int):
return c
if c>255: return unichr(c)
if c>255: return six.unichr(c)
return chr(c)
def decode_entity(match):
@ -878,7 +893,7 @@ def decode_entity(match):
elif what.startswith('#'):
what = int(what[1:])
else:
what = name2codepoint.get(what, match.group(0))
what = six.moves.html_entities.name2codepoint.get(what, match.group(0))
return uchr(what)
def htmldecode(text):
@ -909,7 +924,7 @@ def _encode_auth(auth):
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = unquote(auth)
auth_s = urllib.parse.unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
@ -934,14 +949,14 @@ class Credential(object):
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(ConfigParser.ConfigParser):
class PyPIConfig(configparser.RawConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
ConfigParser.ConfigParser.__init__(self, defaults)
configparser.RawConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
@ -973,15 +988,15 @@ class PyPIConfig(ConfigParser.ConfigParser):
return cred
def open_with_auth(url, opener=urllib2.urlopen):
def open_with_auth(url, opener=urllib.request.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urlparse(url)
scheme, netloc, path, params, query, frag = urllib.parse.urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise httplib.InvalidURL("nonnumeric port: ''")
raise http_client.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = splituser(netloc)
@ -997,11 +1012,12 @@ def open_with_auth(url, opener=urllib2.urlopen):
if auth:
auth = "Basic " + _encode_auth(auth)
new_url = urlunparse((scheme,host,path,params,query,frag))
request = urllib2.Request(new_url)
parts = scheme, host, path, params, query, frag
new_url = urllib.parse.urlunparse(parts)
request = urllib.request.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib2.Request(url)
request = urllib.request.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
@ -1009,9 +1025,10 @@ def open_with_auth(url, opener=urllib2.urlopen):
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urlparse(fp.url)
s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
if s2==scheme and h2==host:
fp.url = urlunparse((s2,netloc,path2,param2,query2,frag2))
parts = s2, netloc, path2, param2, query2, frag2
fp.url = urllib.parse.urlunparse(parts)
return fp
@ -1024,26 +1041,29 @@ def fix_sf_url(url):
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urlparse(url)
filename = url2pathname(path)
scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
filename = urllib.request.url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
return urllib.request.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f=='index.html':
with open(os.path.join(filename,f),'r') as fp:
filepath = os.path.join(filename, f)
if f == 'index.html':
with open(filepath, 'r') as fp:
body = fp.read()
break
elif os.path.isdir(os.path.join(filename,f)):
f+='/'
files.append("<a href=%r>%s</a>" % (f,f))
elif os.path.isdir(filepath):
f += '/'
files.append('<a href="{name}">{name}</a>'.format(name=f))
else:
body = ("<html><head><title>%s</title>" % url) + \
"</head><body>%s</body></html>" % '\n'.join(files)
tmpl = ("<html><head><title>{url}</title>"
"</head><body>{files}</body></html>")
body = tmpl.format(url=url, files='\n'.join(files))
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
return HTTPError(url, status, message, headers, StringIO(body))
body_stream = six.StringIO(body)
return urllib.error.HTTPError(url, status, message, headers, body_stream)